From 67578a7602a5be7eb51f324086c8d49bcf8b7498 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Fri, 16 Jun 2023 11:41:18 +0200 Subject: Merging upstream version 16.2.1. Signed-off-by: Daniel Baumann --- CHANGELOG.md | 129 +- README.md | 6 +- docs/search.js | 2 +- docs/sqlglot.html | 8 +- docs/sqlglot/_version.html | 4 +- docs/sqlglot/dataframe/sql.html | 38 +- docs/sqlglot/dialects/bigquery.html | 1896 +- docs/sqlglot/dialects/clickhouse.html | 2195 ++- docs/sqlglot/dialects/databricks.html | 83 +- docs/sqlglot/dialects/dialect.html | 1985 +- docs/sqlglot/dialects/drill.html | 737 +- docs/sqlglot/dialects/duckdb.html | 1188 +- docs/sqlglot/dialects/hive.html | 1426 +- docs/sqlglot/dialects/mysql.html | 2273 ++- docs/sqlglot/dialects/oracle.html | 952 +- docs/sqlglot/dialects/postgres.html | 1460 +- docs/sqlglot/dialects/presto.html | 1359 +- docs/sqlglot/dialects/redshift.html | 1097 +- docs/sqlglot/dialects/snowflake.html | 1178 +- docs/sqlglot/dialects/spark.html | 82 +- docs/sqlglot/dialects/spark2.html | 683 +- docs/sqlglot/dialects/sqlite.html | 1100 +- docs/sqlglot/dialects/starrocks.html | 82 +- docs/sqlglot/dialects/tableau.html | 286 +- docs/sqlglot/dialects/teradata.html | 1228 +- docs/sqlglot/dialects/trino.html | 67 +- docs/sqlglot/dialects/tsql.html | 435 +- docs/sqlglot/executor/env.html | 18 +- docs/sqlglot/executor/python.html | 69 +- docs/sqlglot/expressions.html | 17396 +++++++++--------- docs/sqlglot/generator.html | 12901 ++++++------- docs/sqlglot/optimizer/annotate_types.html | 1625 +- docs/sqlglot/optimizer/canonicalize.html | 4 +- docs/sqlglot/optimizer/isolate_table_selects.html | 4 +- docs/sqlglot/optimizer/optimizer.html | 6 +- docs/sqlglot/optimizer/qualify_columns.html | 8 +- docs/sqlglot/optimizer/qualify_tables.html | 44 +- docs/sqlglot/optimizer/scope.html | 2 +- docs/sqlglot/parser.html | 18166 +++++++++---------- docs/sqlglot/schema.html | 718 +- docs/sqlglot/serde.html | 18 +- docs/sqlglot/tokens.html | 3985 ++-- sqlglot/dialects/bigquery.py | 65 +- sqlglot/dialects/clickhouse.py | 38 +- sqlglot/dialects/dialect.py | 201 +- sqlglot/dialects/drill.py | 34 +- sqlglot/dialects/duckdb.py | 35 +- sqlglot/dialects/hive.py | 40 +- sqlglot/dialects/mysql.py | 26 +- sqlglot/dialects/oracle.py | 17 +- sqlglot/dialects/postgres.py | 13 +- sqlglot/dialects/presto.py | 64 +- sqlglot/dialects/redshift.py | 14 +- sqlglot/dialects/snowflake.py | 19 +- sqlglot/dialects/spark2.py | 10 +- sqlglot/dialects/sqlite.py | 7 +- sqlglot/dialects/tableau.py | 6 +- sqlglot/dialects/teradata.py | 44 +- sqlglot/dialects/tsql.py | 34 +- sqlglot/executor/env.py | 2 +- sqlglot/executor/python.py | 2 +- sqlglot/expressions.py | 90 +- sqlglot/generator.py | 383 +- sqlglot/helper.py | 28 +- sqlglot/optimizer/annotate_types.py | 516 +- sqlglot/optimizer/canonicalize.py | 2 +- sqlglot/optimizer/eliminate_joins.py | 4 +- sqlglot/optimizer/isolate_table_selects.py | 2 +- sqlglot/optimizer/merge_subqueries.py | 9 +- sqlglot/optimizer/optimize_joins.py | 33 +- sqlglot/optimizer/optimizer.py | 2 +- sqlglot/optimizer/pushdown_predicates.py | 8 +- sqlglot/optimizer/qualify_columns.py | 6 +- sqlglot/optimizer/qualify_tables.py | 6 +- sqlglot/optimizer/scope.py | 2 +- sqlglot/parser.py | 682 +- sqlglot/planner.py | 2 +- sqlglot/schema.py | 2 - sqlglot/tokens.py | 40 +- tests/dataframe/unit/test_functions.py | 2 +- tests/dialects/test_bigquery.py | 24 +- tests/dialects/test_clickhouse.py | 14 + tests/dialects/test_dialect.py | 23 +- tests/dialects/test_duckdb.py | 18 +- tests/dialects/test_mysql.py | 2 + tests/dialects/test_oracle.py | 2 +- tests/dialects/test_postgres.py | 56 + tests/dialects/test_presto.py | 2 + tests/dialects/test_snowflake.py | 8 +- tests/dialects/test_teradata.py | 12 +- tests/fixtures/identity.sql | 15 + tests/fixtures/optimizer/isolate_table_selects.sql | 4 +- tests/fixtures/optimizer/optimizer.sql | 35 +- tests/fixtures/optimizer/pushdown_predicates.sql | 4 +- tests/fixtures/optimizer/qualify_columns.sql | 4 + tests/fixtures/optimizer/tpc-ds/tpc-ds.sql | 2110 ++- tests/fixtures/optimizer/tpc-h/tpc-h.sql | 122 +- tests/test_build.py | 10 + tests/test_executor.py | 24 +- tests/test_expressions.py | 30 +- tests/test_helper.py | 21 +- tests/test_optimizer.py | 17 +- tests/test_parser.py | 5 + tests/test_tokens.py | 4 +- tests/test_transpile.py | 5 + tests/tpch.py | 90 +- 106 files changed, 41936 insertions(+), 40158 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 749ac68..75473d3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,125 @@ Changelog ========= +## [v16.2.0] - 2023-06-15 +### :bug: Bug Fixes +- [`b29a421`](https://github.com/tobymao/sqlglot/commit/b29a421843bc94d88e5f67dd787ee07a675d16ab) - parsing unknown into data type build *(commit by [@tobymao](https://github.com/tobymao))* + +### :wrench: Chores +- [`3233c73`](https://github.com/tobymao/sqlglot/commit/3233c73a4acb803e31143b3afe8aece7ef80313c) - mute logger *(commit by [@tobymao](https://github.com/tobymao))* + + +## [v16.1.4] - 2023-06-15 +### :bug: Bug Fixes +- [`4a1068b`](https://github.com/tobymao/sqlglot/commit/4a1068b51fcf6c9e49ec32c29345eac189d24ef2) - **Postgres**: Set INDEX_OFFSET to 1 *(PR [#1782](https://github.com/tobymao/sqlglot/pull/1782) by [@vegarsti](https://github.com/vegarsti))* + - :arrow_lower_right: *fixes issue [#1781](undefined) opened by [@vegarsti](https://github.com/vegarsti)* +- [`f523dd6`](https://github.com/tobymao/sqlglot/commit/f523dd62f516a94cd69ecb51d864ee6aea45820a) - build uppercasing everything *(commit by [@tobymao](https://github.com/tobymao))* + + +## [v16.1.3] - 2023-06-15 +### :sparkles: New Features +- [`9660c33`](https://github.com/tobymao/sqlglot/commit/9660c331f7a7c4dac267b38ceffe16d33da69015) - add enum/set types to mysql closes [#1778](https://github.com/tobymao/sqlglot/pull/1778) *(commit by [@tobymao](https://github.com/tobymao))* + +### :bug: Bug Fixes +- [`fd0fc97`](https://github.com/tobymao/sqlglot/commit/fd0fc971393eaa9a61f48aead04eeea11d74c897) - bigquery timestamp -> timestamptz *(commit by [@tobymao](https://github.com/tobymao))* +- [`b86f7e8`](https://github.com/tobymao/sqlglot/commit/b86f7e8aced2fbb71aef8532073ede16810babe6) - dialect build *(commit by [@tobymao](https://github.com/tobymao))* + + +## [v16.1.1] - 2023-06-15 +### :bug: Bug Fixes +- [`697c8b1`](https://github.com/tobymao/sqlglot/commit/697c8b13f4983a9e00110ae88ab7d58e5ba22e06) - **bigquery**: allow SPLIT call with 1 argument *(PR [#1770](https://github.com/tobymao/sqlglot/pull/1770) by [@GeorgeSittas](https://github.com/GeorgeSittas))* +- [`0796cdc`](https://github.com/tobymao/sqlglot/commit/0796cdc924fc525e819122c323713ec1570d0357) - join using struct *(commit by [@tobymao](https://github.com/tobymao))* +- [`b13d0b9`](https://github.com/tobymao/sqlglot/commit/b13d0b9faf36808f01354ba4d161fcec827bba92) - map "RETURNING" to its token in the base Tokenizer *(PR [#1773](https://github.com/tobymao/sqlglot/pull/1773) by [@GeorgeSittas](https://github.com/GeorgeSittas))* + - :arrow_lower_right: *fixes issue [#1771](undefined) opened by [@LilyFoote](https://github.com/LilyFoote)* +- [`a2deee3`](https://github.com/tobymao/sqlglot/commit/a2deee38e7667f9b555edf18fd102472409a07d9) - **parser**: don't parse an alias for non-source UNNESTs *(PR [#1774](https://github.com/tobymao/sqlglot/pull/1774) by [@GeorgeSittas](https://github.com/GeorgeSittas))* +- [`0a1362b`](https://github.com/tobymao/sqlglot/commit/0a1362b8ca5e18c2bee2cc8a6ab8554ed9142a78) - bigquery regexp_extract closes [#1776](https://github.com/tobymao/sqlglot/pull/1776) *(commit by [@tobymao](https://github.com/tobymao))* +- [`f84732e`](https://github.com/tobymao/sqlglot/commit/f84732eee1b09b481bdd67fa8d20de933463f06a) - bigquery timestamp mapping *(commit by [@tobymao](https://github.com/tobymao))* + +### :recycle: Refactors +- [`1dbed85`](https://github.com/tobymao/sqlglot/commit/1dbed8595c43f4c7eef5ed835ba06e7430cdafef) - **optimizer**: make the type annotator more dry *(PR [#1777](https://github.com/tobymao/sqlglot/pull/1777) by [@GeorgeSittas](https://github.com/GeorgeSittas))* + + +## [v16.1.0] - 2023-06-13 +### :sparkles: New Features +- [`a4934cb`](https://github.com/tobymao/sqlglot/commit/a4934cb6ea2bed3cc96d4207f20496a33881b83b) - add hint builder *(PR [#1758](https://github.com/tobymao/sqlglot/pull/1758) by [@GeorgeSittas](https://github.com/GeorgeSittas))* +- [`b87fa35`](https://github.com/tobymao/sqlglot/commit/b87fa35d00c578b672842e049b7e438bf233746e) - add copy flag to replace_tables *(commit by [@tobymao](https://github.com/tobymao))* +- [`6cfc873`](https://github.com/tobymao/sqlglot/commit/6cfc8732f02f3b34883fed98558a7f34b872d57d) - **snowflake**: add support for // comments *(PR [#1765](https://github.com/tobymao/sqlglot/pull/1765) by [@GeorgeSittas](https://github.com/GeorgeSittas))* + - :arrow_lower_right: *addresses issue [#1763](undefined) opened by [@florian-ernst-alan](https://github.com/florian-ernst-alan)* + +### :bug: Bug Fixes +- [`146e66a`](https://github.com/tobymao/sqlglot/commit/146e66acb91878ec6751b96935bd94ef643bd77a) - select x.update *(commit by [@tobymao](https://github.com/tobymao))* +- [`2b46782`](https://github.com/tobymao/sqlglot/commit/2b46782a2b302a616f48c42b2ea043c215321c5f) - json_object(*) closes [#1757](https://github.com/tobymao/sqlglot/pull/1757) *(commit by [@tobymao](https://github.com/tobymao))* +- [`0264b43`](https://github.com/tobymao/sqlglot/commit/0264b4383e2f45a51d7c758758e918c8cf9dd4ed) - limit offset multi arg order *(commit by [@tobymao](https://github.com/tobymao))* +- [`4fcdb0f`](https://github.com/tobymao/sqlglot/commit/4fcdb0f003a543751a1b11cd63b0f36e719a7f3a) - **tokenizer**: improve tokenization of decimals ending in . *(PR [#1766](https://github.com/tobymao/sqlglot/pull/1766) by [@GeorgeSittas](https://github.com/GeorgeSittas))* + - :arrow_lower_right: *fixes issue [#1764](undefined) opened by [@florian-ernst-alan](https://github.com/florian-ernst-alan)* +- [`35d960a`](https://github.com/tobymao/sqlglot/commit/35d960adebdd2fc2d96c2e6b00b4660870409a57) - **parser**: disallow no paren functions when parsing table parts *(PR [#1767](https://github.com/tobymao/sqlglot/pull/1767) by [@GeorgeSittas](https://github.com/GeorgeSittas))* + - :arrow_lower_right: *fixes issue [#1762](undefined) opened by [@florian-ernst-alan](https://github.com/florian-ernst-alan)* +- [`5955b9e`](https://github.com/tobymao/sqlglot/commit/5955b9ece1c60c5d2bbfb247c990c29cdd093f17) - values inner alias snowflake closes [#1768](https://github.com/tobymao/sqlglot/pull/1768) *(commit by [@tobymao](https://github.com/tobymao))* +- [`0a9cecb`](https://github.com/tobymao/sqlglot/commit/0a9cecbe6391949f1a86fce28fc05aeef940fd0d) - **Postgres**: Support UNNEST *(PR [#1761](https://github.com/tobymao/sqlglot/pull/1761) by [@vegarsti](https://github.com/vegarsti))* + - :arrow_lower_right: *fixes issue [#1760](undefined) opened by [@vegarsti](https://github.com/vegarsti)* + +### :recycle: Refactors +- [`46abf16`](https://github.com/tobymao/sqlglot/commit/46abf16af88bb1f1704f959cfb30dfa86fdfe636) - simplify list comprehension in hint parser *(commit by [@GeorgeSittas](https://github.com/GeorgeSittas))* + + +## [v16.0.0] - 2023-06-12 +### :boom: BREAKING CHANGES +- due to [`e00647a`](https://github.com/tobymao/sqlglot/commit/e00647af4b5998ee2c6799dd44be268a56dfde7c) - output name for parens *(commit by [@tobymao](https://github.com/tobymao))*: + + output name for parens + +- due to [`2dd8cba`](https://github.com/tobymao/sqlglot/commit/2dd8cba03fea94b811ec6bf2c6ce0a60bc48744f) - misc. improvements in formatting, type hints, dialect class variables *(PR [#1750](https://github.com/tobymao/sqlglot/pull/1750) by [@GeorgeSittas](https://github.com/GeorgeSittas))*: + + misc. improvements in formatting, type hints, dialect class variables (#1750) + +- due to [`a233afa`](https://github.com/tobymao/sqlglot/commit/a233afa79a3f6ece1436f4950b04e2343346e4e8) - bigquery cast date format closes [#1753](https://github.com/tobymao/sqlglot/pull/1753) *(commit by [@tobymao](https://github.com/tobymao))*: + + bigquery cast date format closes #1753 + + +### :sparkles: New Features +- [`99c41d9`](https://github.com/tobymao/sqlglot/commit/99c41d96b2afd41432ffb919caf918f3a36f612f) - **clickhouse**: support CREATE VIEW TO syntax *(PR [#1752](https://github.com/tobymao/sqlglot/pull/1752) by [@pkit](https://github.com/pkit))* +- [`e00647a`](https://github.com/tobymao/sqlglot/commit/e00647af4b5998ee2c6799dd44be268a56dfde7c) - output name for parens *(commit by [@tobymao](https://github.com/tobymao))* + +### :bug: Bug Fixes +- [`48ad1f1`](https://github.com/tobymao/sqlglot/commit/48ad1f15a18ec1e1396e1e7c50abb746b58eaebf) - bigquery table with hyphen number *(commit by [@tobymao](https://github.com/tobymao))* +- [`68b9128`](https://github.com/tobymao/sqlglot/commit/68b9128993999cefc929ca1e7734464232bb5bf0) - index using closes [#1751](https://github.com/tobymao/sqlglot/pull/1751) *(commit by [@tobymao](https://github.com/tobymao))* +- [`55a14a3`](https://github.com/tobymao/sqlglot/commit/55a14a3df96699f32ed1dee8b12a6409aee02ddb) - selecting from table with same name as cte *(commit by [@tobymao](https://github.com/tobymao))* +- [`7000a6f`](https://github.com/tobymao/sqlglot/commit/7000a6f137aabb5d2d2417179501905f37810768) - presto offset limit order closes [#1754](https://github.com/tobymao/sqlglot/pull/1754) *(commit by [@tobymao](https://github.com/tobymao))* +- [`1553bfa`](https://github.com/tobymao/sqlglot/commit/1553bfaf0e5f5859d557b241ce792ba66729c9fe) - count with multiple args closes [#1755](https://github.com/tobymao/sqlglot/pull/1755) *(commit by [@tobymao](https://github.com/tobymao))* +- [`a233afa`](https://github.com/tobymao/sqlglot/commit/a233afa79a3f6ece1436f4950b04e2343346e4e8) - bigquery cast date format closes [#1753](https://github.com/tobymao/sqlglot/pull/1753) *(commit by [@tobymao](https://github.com/tobymao))* + +### :recycle: Refactors +- [`2dd8cba`](https://github.com/tobymao/sqlglot/commit/2dd8cba03fea94b811ec6bf2c6ce0a60bc48744f) - misc. improvements in formatting, type hints, dialect class variables *(PR [#1750](https://github.com/tobymao/sqlglot/pull/1750) by [@GeorgeSittas](https://github.com/GeorgeSittas))* + +### :wrench: Chores +- [`b3f9078`](https://github.com/tobymao/sqlglot/commit/b3f90784b0d85ff78d718d2d8231f75b0166fec7) - make schema get_type more lenient *(commit by [@tobymao](https://github.com/tobymao))* + + +## [v15.2.0] - 2023-06-09 +### :boom: BREAKING CHANGES +- due to [`c6a540c`](https://github.com/tobymao/sqlglot/commit/c6a540c8d8b72f49472c0b1e6891c66e42ddaeb0) - store type dump so it is not reparsed *(commit by [@tobymao](https://github.com/tobymao))*: + + store type dump so it is not reparsed + + +### :sparkles: New Features +- [`e028d98`](https://github.com/tobymao/sqlglot/commit/e028d984cc5631c66aac5f42c29200410caca47e) - **redshift,presto**: transpile FROM_BASE to STRTOL and vice versa *(PR [#1744](https://github.com/tobymao/sqlglot/pull/1744) by [@GeorgeSittas](https://github.com/GeorgeSittas))* + - :arrow_lower_right: *addresses issue [#1742](undefined) opened by [@pangyifish](https://github.com/pangyifish)* +- [`bb1f1a0`](https://github.com/tobymao/sqlglot/commit/bb1f1a035c8701c881f61c65742331cf7e667260) - **redshift,presto**: transpile DATEADD, DATEDIFF to presto *(PR [#1746](https://github.com/tobymao/sqlglot/pull/1746) by [@GeorgeSittas](https://github.com/GeorgeSittas))* + - :arrow_lower_right: *addresses issue [#1745](undefined) opened by [@pangyifish](https://github.com/pangyifish)* + +### :bug: Bug Fixes +- [`9b56fc9`](https://github.com/tobymao/sqlglot/commit/9b56fc9ca3229478ed3a7cc1f51857cee7f1ca2b) - add ts_or_ds to postgres *(commit by [@tobymao](https://github.com/tobymao))* +- [`0cc09cf`](https://github.com/tobymao/sqlglot/commit/0cc09cf9b39ae9c706c486444599e66123be3a05) - is true for presto closes [#1740](https://github.com/tobymao/sqlglot/pull/1740) *(commit by [@tobymao](https://github.com/tobymao))* +- [`6168fbf`](https://github.com/tobymao/sqlglot/commit/6168fbf450d47b06b730680c5f8383bd7460008e) - redshift len->length closes [#1741](https://github.com/tobymao/sqlglot/pull/1741) *(commit by [@tobymao](https://github.com/tobymao))* +- [`824fcb2`](https://github.com/tobymao/sqlglot/commit/824fcb2f0b481306bf0d3371facd3523bce3090d) - bigquery table with hyphen number *(commit by [@tobymao](https://github.com/tobymao))* + +### :wrench: Chores +- [`d2e46c3`](https://github.com/tobymao/sqlglot/commit/d2e46c3f7b68373a64bc909567ebdcbbd2ad4c76) - fix README example *(commit by [@GeorgeSittas](https://github.com/GeorgeSittas))* +- [`c6a540c`](https://github.com/tobymao/sqlglot/commit/c6a540c8d8b72f49472c0b1e6891c66e42ddaeb0) - store type dump so it is not reparsed *(commit by [@tobymao](https://github.com/tobymao))* + + ## [v15.1.0] - 2023-06-07 ### :boom: BREAKING CHANGES - due to [`6ad00ca`](https://github.com/tobymao/sqlglot/commit/6ad00caed965be3d69ebed8c57fea0b1b05406d4) - convert left and right closes [#1733](https://github.com/tobymao/sqlglot/pull/1733) *(commit by [@tobymao](https://github.com/tobymao))*: @@ -395,4 +514,12 @@ Changelog [v14.1.0]: https://github.com/tobymao/sqlglot/compare/show...v14.1.0 [v14.1.1]: https://github.com/tobymao/sqlglot/compare/v14.1.0...v14.1.1 [v15.0.0]: https://github.com/tobymao/sqlglot/compare/v14.1.1...v15.0.0 -[v15.1.0]: https://github.com/tobymao/sqlglot/compare/v15.0.0...v15.1.0 \ No newline at end of file +[v15.1.0]: https://github.com/tobymao/sqlglot/compare/v15.0.0...v15.1.0 +[v15.2.0]: https://github.com/tobymao/sqlglot/compare/v15.1.0...v15.2.0 +[v15.3.0]: https://github.com/tobymao/sqlglot/compare/v15.2.0...v15.3.0 +[v16.0.0]: https://github.com/tobymao/sqlglot/compare/v15.2.0...v16.0.0 +[v16.1.0]: https://github.com/tobymao/sqlglot/compare/v16.0.0...v16.1.0 +[v16.1.1]: https://github.com/tobymao/sqlglot/compare/v16.1.0...v16.1.1 +[v16.1.3]: https://github.com/tobymao/sqlglot/compare/v16.1.2...v16.1.3 +[v16.1.4]: https://github.com/tobymao/sqlglot/compare/v16.1.3...v16.1.4 +[v16.2.0]: https://github.com/tobymao/sqlglot/compare/v16.1.4...v16.2.0 \ No newline at end of file diff --git a/README.md b/README.md index 65d607f..0db8feb 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,8 @@ You can easily [customize](#custom-dialects) the parser, [analyze](#metadata) qu Syntax [errors](#parser-errors) are highlighted and dialect incompatibilities can warn or raise depending on configurations. However, it should be noted that SQL validation is not SQLGlot’s goal, so some syntax errors may go unnoticed. +Learn more about the SQLGlot API in the [documentation](https://sqlglot.com/). + Contributions are very welcome in SQLGlot; read the [contribution guide](https://github.com/tobymao/sqlglot/blob/main/CONTRIBUTING.md) to get started! ## Table of Contents @@ -457,7 +459,9 @@ See also: [Writing a Python SQL engine from scratch](https://github.com/tobymao/ ## Documentation -SQLGlot uses [pdoc](https://pdoc.dev/) to serve its API documentation: +SQLGlot uses [pdoc](https://pdoc.dev/) to serve its API documentation. + +A hosted version is on the [SQLGlot website](https://sqlglot.com/), or you can build locally with: ``` make docs-serve diff --git a/docs/search.js b/docs/search.js index 58c9320..a30047a 100644 --- a/docs/search.js +++ b/docs/search.js @@ -1,6 +1,6 @@ window.pdocSearch = (function(){ /** elasticlunr - http://weixsong.github.io * Copyright (C) 2017 Oliver Nightingale * Copyright (C) 2017 Wei Song * MIT Licensed */!function(){function e(e){if(null===e||"object"!=typeof e)return e;var t=e.constructor();for(var n in e)e.hasOwnProperty(n)&&(t[n]=e[n]);return t}var t=function(e){var n=new t.Index;return n.pipeline.add(t.trimmer,t.stopWordFilter,t.stemmer),e&&e.call(n,n),n};t.version="0.9.5",lunr=t,t.utils={},t.utils.warn=function(e){return function(t){e.console&&console.warn&&console.warn(t)}}(this),t.utils.toString=function(e){return void 0===e||null===e?"":e.toString()},t.EventEmitter=function(){this.events={}},t.EventEmitter.prototype.addListener=function(){var e=Array.prototype.slice.call(arguments),t=e.pop(),n=e;if("function"!=typeof t)throw new TypeError("last argument must be a function");n.forEach(function(e){this.hasHandler(e)||(this.events[e]=[]),this.events[e].push(t)},this)},t.EventEmitter.prototype.removeListener=function(e,t){if(this.hasHandler(e)){var n=this.events[e].indexOf(t);-1!==n&&(this.events[e].splice(n,1),0==this.events[e].length&&delete this.events[e])}},t.EventEmitter.prototype.emit=function(e){if(this.hasHandler(e)){var t=Array.prototype.slice.call(arguments,1);this.events[e].forEach(function(e){e.apply(void 0,t)},this)}},t.EventEmitter.prototype.hasHandler=function(e){return e in this.events},t.tokenizer=function(e){if(!arguments.length||null===e||void 0===e)return[];if(Array.isArray(e)){var n=e.filter(function(e){return null===e||void 0===e?!1:!0});n=n.map(function(e){return t.utils.toString(e).toLowerCase()});var i=[];return n.forEach(function(e){var n=e.split(t.tokenizer.seperator);i=i.concat(n)},this),i}return e.toString().trim().toLowerCase().split(t.tokenizer.seperator)},t.tokenizer.defaultSeperator=/[\s\-]+/,t.tokenizer.seperator=t.tokenizer.defaultSeperator,t.tokenizer.setSeperator=function(e){null!==e&&void 0!==e&&"object"==typeof e&&(t.tokenizer.seperator=e)},t.tokenizer.resetSeperator=function(){t.tokenizer.seperator=t.tokenizer.defaultSeperator},t.tokenizer.getSeperator=function(){return t.tokenizer.seperator},t.Pipeline=function(){this._queue=[]},t.Pipeline.registeredFunctions={},t.Pipeline.registerFunction=function(e,n){n in t.Pipeline.registeredFunctions&&t.utils.warn("Overwriting existing registered function: "+n),e.label=n,t.Pipeline.registeredFunctions[n]=e},t.Pipeline.getRegisteredFunction=function(e){return e in t.Pipeline.registeredFunctions!=!0?null:t.Pipeline.registeredFunctions[e]},t.Pipeline.warnIfFunctionNotRegistered=function(e){var n=e.label&&e.label in this.registeredFunctions;n||t.utils.warn("Function is not registered with pipeline. This may cause problems when serialising the index.\n",e)},t.Pipeline.load=function(e){var n=new t.Pipeline;return e.forEach(function(e){var i=t.Pipeline.getRegisteredFunction(e);if(!i)throw new Error("Cannot load un-registered function: "+e);n.add(i)}),n},t.Pipeline.prototype.add=function(){var e=Array.prototype.slice.call(arguments);e.forEach(function(e){t.Pipeline.warnIfFunctionNotRegistered(e),this._queue.push(e)},this)},t.Pipeline.prototype.after=function(e,n){t.Pipeline.warnIfFunctionNotRegistered(n);var i=this._queue.indexOf(e);if(-1===i)throw new Error("Cannot find existingFn");this._queue.splice(i+1,0,n)},t.Pipeline.prototype.before=function(e,n){t.Pipeline.warnIfFunctionNotRegistered(n);var i=this._queue.indexOf(e);if(-1===i)throw new Error("Cannot find existingFn");this._queue.splice(i,0,n)},t.Pipeline.prototype.remove=function(e){var t=this._queue.indexOf(e);-1!==t&&this._queue.splice(t,1)},t.Pipeline.prototype.run=function(e){for(var t=[],n=e.length,i=this._queue.length,o=0;n>o;o++){for(var r=e[o],s=0;i>s&&(r=this._queue[s](r,o,e),void 0!==r&&null!==r);s++);void 0!==r&&null!==r&&t.push(r)}return t},t.Pipeline.prototype.reset=function(){this._queue=[]},t.Pipeline.prototype.get=function(){return this._queue},t.Pipeline.prototype.toJSON=function(){return this._queue.map(function(e){return t.Pipeline.warnIfFunctionNotRegistered(e),e.label})},t.Index=function(){this._fields=[],this._ref="id",this.pipeline=new t.Pipeline,this.documentStore=new t.DocumentStore,this.index={},this.eventEmitter=new t.EventEmitter,this._idfCache={},this.on("add","remove","update",function(){this._idfCache={}}.bind(this))},t.Index.prototype.on=function(){var e=Array.prototype.slice.call(arguments);return this.eventEmitter.addListener.apply(this.eventEmitter,e)},t.Index.prototype.off=function(e,t){return this.eventEmitter.removeListener(e,t)},t.Index.load=function(e){e.version!==t.version&&t.utils.warn("version mismatch: current "+t.version+" importing "+e.version);var n=new this;n._fields=e.fields,n._ref=e.ref,n.documentStore=t.DocumentStore.load(e.documentStore),n.pipeline=t.Pipeline.load(e.pipeline),n.index={};for(var i in e.index)n.index[i]=t.InvertedIndex.load(e.index[i]);return n},t.Index.prototype.addField=function(e){return this._fields.push(e),this.index[e]=new t.InvertedIndex,this},t.Index.prototype.setRef=function(e){return this._ref=e,this},t.Index.prototype.saveDocument=function(e){return this.documentStore=new t.DocumentStore(e),this},t.Index.prototype.addDoc=function(e,n){if(e){var n=void 0===n?!0:n,i=e[this._ref];this.documentStore.addDoc(i,e),this._fields.forEach(function(n){var o=this.pipeline.run(t.tokenizer(e[n]));this.documentStore.addFieldLength(i,n,o.length);var r={};o.forEach(function(e){e in r?r[e]+=1:r[e]=1},this);for(var s in r){var u=r[s];u=Math.sqrt(u),this.index[n].addToken(s,{ref:i,tf:u})}},this),n&&this.eventEmitter.emit("add",e,this)}},t.Index.prototype.removeDocByRef=function(e){if(e&&this.documentStore.isDocStored()!==!1&&this.documentStore.hasDoc(e)){var t=this.documentStore.getDoc(e);this.removeDoc(t,!1)}},t.Index.prototype.removeDoc=function(e,n){if(e){var n=void 0===n?!0:n,i=e[this._ref];this.documentStore.hasDoc(i)&&(this.documentStore.removeDoc(i),this._fields.forEach(function(n){var o=this.pipeline.run(t.tokenizer(e[n]));o.forEach(function(e){this.index[n].removeToken(e,i)},this)},this),n&&this.eventEmitter.emit("remove",e,this))}},t.Index.prototype.updateDoc=function(e,t){var t=void 0===t?!0:t;this.removeDocByRef(e[this._ref],!1),this.addDoc(e,!1),t&&this.eventEmitter.emit("update",e,this)},t.Index.prototype.idf=function(e,t){var n="@"+t+"/"+e;if(Object.prototype.hasOwnProperty.call(this._idfCache,n))return this._idfCache[n];var i=this.index[t].getDocFreq(e),o=1+Math.log(this.documentStore.length/(i+1));return this._idfCache[n]=o,o},t.Index.prototype.getFields=function(){return this._fields.slice()},t.Index.prototype.search=function(e,n){if(!e)return[];e="string"==typeof e?{any:e}:JSON.parse(JSON.stringify(e));var i=null;null!=n&&(i=JSON.stringify(n));for(var o=new t.Configuration(i,this.getFields()).get(),r={},s=Object.keys(e),u=0;u0&&t.push(e);for(var i in n)"docs"!==i&&"df"!==i&&this.expandToken(e+i,t,n[i]);return t},t.InvertedIndex.prototype.toJSON=function(){return{root:this.root}},t.Configuration=function(e,n){var e=e||"";if(void 0==n||null==n)throw new Error("fields should not be null");this.config={};var i;try{i=JSON.parse(e),this.buildUserConfig(i,n)}catch(o){t.utils.warn("user configuration parse failed, will use default configuration"),this.buildDefaultConfig(n)}},t.Configuration.prototype.buildDefaultConfig=function(e){this.reset(),e.forEach(function(e){this.config[e]={boost:1,bool:"OR",expand:!1}},this)},t.Configuration.prototype.buildUserConfig=function(e,n){var i="OR",o=!1;if(this.reset(),"bool"in e&&(i=e.bool||i),"expand"in e&&(o=e.expand||o),"fields"in e)for(var r in e.fields)if(n.indexOf(r)>-1){var s=e.fields[r],u=o;void 0!=s.expand&&(u=s.expand),this.config[r]={boost:s.boost||0===s.boost?s.boost:1,bool:s.bool||i,expand:u}}else t.utils.warn("field name in user configuration not found in index instance fields");else this.addAllFields2UserConfig(i,o,n)},t.Configuration.prototype.addAllFields2UserConfig=function(e,t,n){n.forEach(function(n){this.config[n]={boost:1,bool:e,expand:t}},this)},t.Configuration.prototype.get=function(){return this.config},t.Configuration.prototype.reset=function(){this.config={}},lunr.SortedSet=function(){this.length=0,this.elements=[]},lunr.SortedSet.load=function(e){var t=new this;return t.elements=e,t.length=e.length,t},lunr.SortedSet.prototype.add=function(){var e,t;for(e=0;e1;){if(r===e)return o;e>r&&(t=o),r>e&&(n=o),i=n-t,o=t+Math.floor(i/2),r=this.elements[o]}return r===e?o:-1},lunr.SortedSet.prototype.locationFor=function(e){for(var t=0,n=this.elements.length,i=n-t,o=t+Math.floor(i/2),r=this.elements[o];i>1;)e>r&&(t=o),r>e&&(n=o),i=n-t,o=t+Math.floor(i/2),r=this.elements[o];return r>e?o:e>r?o+1:void 0},lunr.SortedSet.prototype.intersect=function(e){for(var t=new lunr.SortedSet,n=0,i=0,o=this.length,r=e.length,s=this.elements,u=e.elements;;){if(n>o-1||i>r-1)break;s[n]!==u[i]?s[n]u[i]&&i++:(t.add(s[n]),n++,i++)}return t},lunr.SortedSet.prototype.clone=function(){var e=new lunr.SortedSet;return e.elements=this.toArray(),e.length=e.elements.length,e},lunr.SortedSet.prototype.union=function(e){var t,n,i;this.length>=e.length?(t=this,n=e):(t=e,n=this),i=t.clone();for(var o=0,r=n.toArray();o\"SQLGlot

\n\n

SQLGlot is a no-dependency SQL parser, transpiler, optimizer, and engine. It can be used to format SQL or translate between 19 different dialects like DuckDB, Presto, Spark, Snowflake, and BigQuery. It aims to read a wide variety of SQL inputs and output syntactically correct SQL in the targeted dialects.

\n\n

It is a very comprehensive generic SQL parser with a robust test suite. It is also quite performant, while being written purely in Python.

\n\n

You can easily customize the parser, analyze queries, traverse expression trees, and programmatically build SQL.

\n\n

Syntax errors are highlighted and dialect incompatibilities can warn or raise depending on configurations. However, it should be noted that SQL validation is not SQLGlot\u2019s goal, so some syntax errors may go unnoticed.

\n\n

Contributions are very welcome in SQLGlot; read the contribution guide to get started!

\n\n

Table of Contents

\n\n\n\n

Install

\n\n

From PyPI:

\n\n
pip3 install sqlglot\n
\n\n

Or with a local checkout:

\n\n
make install\n
\n\n

Requirements for development (optional):

\n\n
make install-dev\n
\n\n

Versioning

\n\n

Given a version number MAJOR.MINOR.PATCH, SQLGlot uses the following versioning strategy:

\n\n
    \n
  • The PATCH version is incremented when there are backwards-compatible fixes or feature additions.
  • \n
  • The MINOR version is incremented when there are backwards-incompatible fixes or feature additions.
  • \n
  • The MAJOR version is incremented when there are significant backwards-incompatible fixes or feature additions.
  • \n
\n\n

Get in Touch

\n\n

We'd love to hear from you. Join our community Slack channel!

\n\n

Examples

\n\n

Formatting and Transpiling

\n\n

Easily translate from one dialect to another. For example, date/time functions vary from dialects and can be hard to deal with:

\n\n
\n
import sqlglot\nsqlglot.transpile("SELECT EPOCH_MS(1618088028295)", read="duckdb", write="hive")[0]\n
\n
\n\n
\n
'SELECT FROM_UNIXTIME(1618088028295 / 1000)'\n
\n
\n\n

SQLGlot can even translate custom time formats:

\n\n
\n
import sqlglot\nsqlglot.transpile("SELECT STRFTIME(x, '%y-%-m-%S')", read="duckdb", write="hive")[0]\n
\n
\n\n
\n
"SELECT DATE_FORMAT(x, 'yy-M-ss')"\n
\n
\n\n

As another example, let's suppose that we want to read in a SQL query that contains a CTE and a cast to REAL, and then transpile it to Spark, which uses backticks for identifiers and FLOAT instead of REAL:

\n\n
\n
import sqlglot\n\nsql = """WITH baz AS (SELECT a, c FROM foo WHERE a = 1) SELECT f.a, b.b, baz.c, CAST("b"."a" AS REAL) d FROM foo f JOIN bar b ON f.a = b.a LEFT JOIN baz ON f.a = baz.a"""\nprint(sqlglot.transpile(sql, write="spark", identify=True, pretty=True)[0])\n
\n
\n\n
\n
WITH `baz` AS (\n  SELECT\n    `a`,\n    `c`\n  FROM `foo`\n  WHERE\n    `a` = 1\n)\nSELECT\n  `f`.`a`,\n  `b`.`b`,\n  `baz`.`c`,\n  CAST(`b`.`a` AS FLOAT) AS `d`\nFROM `foo` AS `f`\nJOIN `bar` AS `b`\n  ON `f`.`a` = `b`.`a`\nLEFT JOIN `baz`\n  ON `f`.`a` = `baz`.`a`\n
\n
\n\n

Comments are also preserved in a best-effort basis when transpiling SQL code:

\n\n
\n
sql = """\n/* multi\n   line\n   comment\n*/\nSELECT\n  tbl.cola /* comment 1 */ + tbl.colb /* comment 2 */,\n  CAST(x AS INT), # comment 3\n  y               -- comment 4\nFROM\n  bar /* comment 5 */,\n  tbl #          comment 6\n"""\n\nprint(sqlglot.transpile(sql, read='mysql', pretty=True)[0])\n
\n
\n\n
\n
/* multi\n   line\n   comment\n*/\nSELECT\n  tbl.cola /* comment 1 */ + tbl.colb /* comment 2 */,\n  CAST(x AS INT), /* comment 3 */\n  y /* comment 4 */\nFROM bar /* comment 5 */, tbl /*          comment 6 */\n
\n
\n\n

Metadata

\n\n

You can explore SQL with expression helpers to do things like find columns and tables:

\n\n
\n
from sqlglot import parse_one, exp\n\n# print all column references (a and b)\nfor column in parse_one("SELECT a, b + 1 AS c FROM d").find_all(exp.Column):\n    print(column.alias_or_name)\n\n# find all projections in select statements (a and c)\nfor select in parse_one("SELECT a, b + 1 AS c FROM d").find_all(exp.Select):\n    for projection in select.expressions:\n        print(projection.alias_or_name)\n\n# find all tables (x, y, z)\nfor table in parse_one("SELECT * FROM x JOIN y JOIN z").find_all(exp.Table):\n    print(table.name)\n
\n
\n\n

Parser Errors

\n\n

When the parser detects an error in the syntax, it raises a ParserError:

\n\n
\n
import sqlglot\nsqlglot.transpile("SELECT foo( FROM bar")\n
\n
\n\n
sqlglot.errors.ParseError: Expecting ). Line 1, Col: 13.\n  select foo( FROM bar\n              ~~~~\n
\n\n

Structured syntax errors are accessible for programmatic use:

\n\n
\n
import sqlglot\ntry:\n    sqlglot.transpile("SELECT foo( FROM bar")\nexcept sqlglot.errors.ParseError as e:\n    print(e.errors)\n
\n
\n\n
\n
[{\n  'description': 'Expecting )',\n  'line': 1,\n  'col': 13,\n  'start_context': 'SELECT foo( ',\n  'highlight': 'FROM',\n  'end_context': ' bar'\n}]\n
\n
\n\n

Unsupported Errors

\n\n

Presto APPROX_DISTINCT supports the accuracy argument which is not supported in Hive:

\n\n
\n
import sqlglot\nsqlglot.transpile("SELECT APPROX_DISTINCT(a, 0.1) FROM foo", read="presto", write="hive")\n
\n
\n\n
\n
APPROX_COUNT_DISTINCT does not support accuracy\n'SELECT APPROX_COUNT_DISTINCT(a) FROM foo'\n
\n
\n\n

Build and Modify SQL

\n\n

SQLGlot supports incrementally building sql expressions:

\n\n
\n
from sqlglot import select, condition\n\nwhere = condition("x=1").and_("y=1")\nselect("*").from_("y").where(where).sql()\n
\n
\n\n
\n
'SELECT * FROM y WHERE x = 1 AND y = 1'\n
\n
\n\n

You can also modify a parsed tree:

\n\n
\n
from sqlglot import parse_one\nparse_one("SELECT x FROM y").from_("z").sql()\n
\n
\n\n
\n
'SELECT x FROM y, z'\n
\n
\n\n

There is also a way to recursively transform the parsed tree by applying a mapping function to each tree node:

\n\n
\n
from sqlglot import exp, parse_one\n\nexpression_tree = parse_one("SELECT a FROM x")\n\ndef transformer(node):\n    if isinstance(node, exp.Column) and node.name == "a":\n        return parse_one("FUN(a)")\n    return node\n\ntransformed_tree = expression_tree.transform(transformer)\ntransformed_tree.sql()\n
\n
\n\n
\n
'SELECT FUN(a) FROM x'\n
\n
\n\n

SQL Optimizer

\n\n

SQLGlot can rewrite queries into an \"optimized\" form. It performs a variety of techniques to create a new canonical AST. This AST can be used to standardize queries or provide the foundations for implementing an actual engine. For example:

\n\n
\n
import sqlglot\nfrom sqlglot.optimizer import optimize\n\nprint(\n    optimize(\n        sqlglot.parse_one("""\n            SELECT A OR (B OR (C AND D))\n            FROM x\n            WHERE Z = date '2021-01-01' + INTERVAL '1' month OR 1 = 0\n        """),\n        schema={"x": {"A": "INT", "B": "INT", "C": "INT", "D": "INT", "Z": "STRING"}}\n    ).sql(pretty=True)\n)\n
\n
\n\n
\n
SELECT\n  (\n    "x"."a" <> 0 OR "x"."b" <> 0 OR "x"."c" <> 0\n  )\n  AND (\n    "x"."a" <> 0 OR "x"."b" <> 0 OR "x"."d" <> 0\n  ) AS "_col_0"\nFROM "x" AS "x"\nWHERE\n  CAST("x"."z" AS DATE) = CAST('2021-02-01' AS DATE)\n
\n
\n\n

AST Introspection

\n\n

You can see the AST version of the sql by calling repr:

\n\n
\n
from sqlglot import parse_one\nprint(repr(parse_one("SELECT a + 1 AS z")))\n
\n
\n\n
\n
(SELECT expressions:\n  (ALIAS this:\n    (ADD this:\n      (COLUMN this:\n        (IDENTIFIER this: a, quoted: False)), expression:\n      (LITERAL this: 1, is_string: False)), alias:\n    (IDENTIFIER this: z, quoted: False)))\n
\n
\n\n

AST Diff

\n\n

SQLGlot can calculate the difference between two expressions and output changes in a form of a sequence of actions needed to transform a source expression into a target one:

\n\n
\n
from sqlglot import diff, parse_one\ndiff(parse_one("SELECT a + b, c, d"), parse_one("SELECT c, a - b, d"))\n
\n
\n\n
\n
[\n  Remove(expression=(ADD this:\n    (COLUMN this:\n      (IDENTIFIER this: a, quoted: False)), expression:\n    (COLUMN this:\n      (IDENTIFIER this: b, quoted: False)))),\n  Insert(expression=(SUB this:\n    (COLUMN this:\n      (IDENTIFIER this: a, quoted: False)), expression:\n    (COLUMN this:\n      (IDENTIFIER this: b, quoted: False)))),\n  Move(expression=(COLUMN this:\n    (IDENTIFIER this: c, quoted: False))),\n  Keep(source=(IDENTIFIER this: b, quoted: False), target=(IDENTIFIER this: b, quoted: False)),\n  ...\n]\n
\n
\n\n

See also: Semantic Diff for SQL.

\n\n

Custom Dialects

\n\n

Dialects can be added by subclassing Dialect:

\n\n
\n
from sqlglot import exp\nfrom sqlglot.dialects.dialect import Dialect\nfrom sqlglot.generator import Generator\nfrom sqlglot.tokens import Tokenizer, TokenType\n\n\nclass Custom(Dialect):\n    class Tokenizer(Tokenizer):\n        QUOTES = ["'", '"']\n        IDENTIFIERS = ["`"]\n\n        KEYWORDS = {\n            **Tokenizer.KEYWORDS,\n            "INT64": TokenType.BIGINT,\n            "FLOAT64": TokenType.DOUBLE,\n        }\n\n    class Generator(Generator):\n        TRANSFORMS = {exp.Array: lambda self, e: f"[{self.expressions(e)}]"}\n\n        TYPE_MAPPING = {\n            exp.DataType.Type.TINYINT: "INT64",\n            exp.DataType.Type.SMALLINT: "INT64",\n            exp.DataType.Type.INT: "INT64",\n            exp.DataType.Type.BIGINT: "INT64",\n            exp.DataType.Type.DECIMAL: "NUMERIC",\n            exp.DataType.Type.FLOAT: "FLOAT64",\n            exp.DataType.Type.DOUBLE: "FLOAT64",\n            exp.DataType.Type.BOOLEAN: "BOOL",\n            exp.DataType.Type.TEXT: "STRING",\n        }\n\nprint(Dialect["custom"])\n
\n
\n\n
<class '__main__.Custom'>\n
\n\n

SQL Execution

\n\n

One can even interpret SQL queries using SQLGlot, where the tables are represented as Python dictionaries. Although the engine is not very fast (it's not supposed to be) and is in a relatively early stage of development, it can be useful for unit testing and running SQL natively across Python objects. Additionally, the foundation can be easily integrated with fast compute kernels (arrow, pandas). Below is an example showcasing the execution of a SELECT expression that involves aggregations and JOINs:

\n\n
\n
from sqlglot.executor import execute\n\ntables = {\n    "sushi": [\n        {"id": 1, "price": 1.0},\n        {"id": 2, "price": 2.0},\n        {"id": 3, "price": 3.0},\n    ],\n    "order_items": [\n        {"sushi_id": 1, "order_id": 1},\n        {"sushi_id": 1, "order_id": 1},\n        {"sushi_id": 2, "order_id": 1},\n        {"sushi_id": 3, "order_id": 2},\n    ],\n    "orders": [\n        {"id": 1, "user_id": 1},\n        {"id": 2, "user_id": 2},\n    ],\n}\n\nexecute(\n    """\n    SELECT\n      o.user_id,\n      SUM(s.price) AS price\n    FROM orders o\n    JOIN order_items i\n      ON o.id = i.order_id\n    JOIN sushi s\n      ON i.sushi_id = s.id\n    GROUP BY o.user_id\n    """,\n    tables=tables\n)\n
\n
\n\n
\n
user_id price\n      1   4.0\n      2   3.0\n
\n
\n\n

See also: Writing a Python SQL engine from scratch.

\n\n

Used By

\n\n\n\n

Documentation

\n\n

SQLGlot uses pdoc to serve its API documentation:

\n\n
make docs-serve\n
\n\n

Run Tests and Lint

\n\n
make check  # Set SKIP_INTEGRATION=1 to skip integration tests\n
\n\n

Benchmarks

\n\n

Benchmarks run on Python 3.10.5 in seconds.

\n\n\n\n\n \n \n \n \n \n \n \n\n\n\n\n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n\n\n
Querysqlglotsqlfluffsqltreesqlparsemoz_sql_parsersqloxide
tpch0.01308 (1.0)1.60626 (122.7)0.01168 (0.893)0.04958 (3.791)0.08543 (6.531)0.00136 (0.104)
short0.00109 (1.0)0.14134 (129.2)0.00099 (0.906)0.00342 (3.131)0.00652 (5.970)8.76E-5 (0.080)
long0.01399 (1.0)2.12632 (151.9)0.01126 (0.805)0.04410 (3.151)0.06671 (4.767)0.00107 (0.076)
crazy0.03969 (1.0)24.3777 (614.1)0.03917 (0.987)11.7043 (294.8)1.03280 (26.02)0.00625 (0.157)
\n\n

Optional Dependencies

\n\n

SQLGlot uses dateutil to simplify literal timedelta expressions. The optimizer will not simplify expressions like the following if the module cannot be found:

\n\n
\n
x + interval '1' month\n
\n
\n\n
\n"}, "sqlglot.pretty": {"fullname": "sqlglot.pretty", "modulename": "sqlglot", "qualname": "pretty", "kind": "variable", "doc": "

Whether to format generated SQL by default.

\n", "default_value": "False"}, "sqlglot.schema": {"fullname": "sqlglot.schema", "modulename": "sqlglot.schema", "kind": "module", "doc": "

\n"}, "sqlglot.parse": {"fullname": "sqlglot.parse", "modulename": "sqlglot", "qualname": "parse", "kind": "function", "doc": "

Parses the given SQL string into a collection of syntax trees, one per parsed SQL statement.

\n\n
Arguments:
\n\n
    \n
  • sql: the SQL code string to parse.
  • \n
  • read: the SQL dialect to apply during parsing (eg. \"spark\", \"hive\", \"presto\", \"mysql\").
  • \n
  • **opts: other sqlglot.parser.Parser options.
  • \n
\n\n
Returns:
\n\n
\n

The resulting syntax tree collection.

\n
\n", "signature": "(\tsql: str,\tread: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> List[Optional[sqlglot.expressions.Expression]]:", "funcdef": "def"}, "sqlglot.parse_one": {"fullname": "sqlglot.parse_one", "modulename": "sqlglot", "qualname": "parse_one", "kind": "function", "doc": "

Parses the given SQL string and returns a syntax tree for the first parsed SQL statement.

\n\n
Arguments:
\n\n
    \n
  • sql: the SQL code string to parse.
  • \n
  • read: the SQL dialect to apply during parsing (eg. \"spark\", \"hive\", \"presto\", \"mysql\").
  • \n
  • into: the SQLGlot Expression to parse into.
  • \n
  • **opts: other sqlglot.parser.Parser options.
  • \n
\n\n
Returns:
\n\n
\n

The syntax tree for the first parsed statement.

\n
\n", "signature": "(\tsql: str,\tread: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tinto: Union[str, Type[sqlglot.expressions.Expression], Collection[Union[str, Type[sqlglot.expressions.Expression]]], NoneType] = None,\t**opts) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transpile": {"fullname": "sqlglot.transpile", "modulename": "sqlglot", "qualname": "transpile", "kind": "function", "doc": "

Parses the given SQL string in accordance with the source dialect and returns a list of SQL strings transformed\nto conform to the target dialect. Each string in the returned list represents a single transformed SQL statement.

\n\n
Arguments:
\n\n
    \n
  • sql: the SQL code string to transpile.
  • \n
  • read: the source dialect used to parse the input string (eg. \"spark\", \"hive\", \"presto\", \"mysql\").
  • \n
  • write: the target dialect into which the input should be transformed (eg. \"spark\", \"hive\", \"presto\", \"mysql\").
  • \n
  • identity: if set to True and if the target dialect is not specified the source dialect will be used as both:\nthe source and the target dialect.
  • \n
  • error_level: the desired error level of the parser.
  • \n
  • **opts: other sqlglot.generator.Generator options.
  • \n
\n\n
Returns:
\n\n
\n

The list of transpiled SQL statements.

\n
\n", "signature": "(\tsql: str,\tread: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\twrite: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tidentity: bool = True,\terror_level: Optional[sqlglot.errors.ErrorLevel] = None,\t**opts) -> List[str]:", "funcdef": "def"}, "sqlglot.dataframe": {"fullname": "sqlglot.dataframe", "modulename": "sqlglot.dataframe", "kind": "module", "doc": "

PySpark DataFrame SQL Generator

\n\n

This is a drop-in replacement for the PySpark DataFrame API that will generate SQL instead of executing DataFrame operations directly. This, when combined with the transpiling support in SQLGlot, allows one to write PySpark DataFrame code and execute it on other engines like DuckDB, Presto, Spark, Snowflake, and BigQuery.

\n\n

Currently many of the common operations are covered and more functionality will be added over time. Please open an issue or PR with your feedback or contribution to help influence what should be prioritized next and make sure your use case is properly supported.

\n\n

How to use

\n\n

Instructions

\n\n
    \n
  • Install SQLGlot and that is all that is required to just generate SQL. The examples show generating SQL and then executing that SQL on a specific engine and that will require that engine's client library.
  • \n
  • Find/replace all from pyspark.sql with from sqlglot.dataframe.
  • \n
  • Prior to any spark.read.table or spark.table run sqlglot.schema.add_table('<table_name>', <column_structure>).\n
      \n
    • The column structure can be defined the following ways:\n
        \n
      • Dictionary where the keys are column names and values are string of the Spark SQL type name.\n
          \n
        • Ex: {'cola': 'string', 'colb': 'int'}
        • \n
      • \n
      • PySpark DataFrame StructType similar to when using createDataFrame.\n
          \n
        • Ex: StructType([StructField('cola', StringType()), StructField('colb', IntegerType())])
        • \n
      • \n
      • A string of names and types similar to what is supported in createDataFrame.\n
          \n
        • Ex: cola: STRING, colb: INT
        • \n
      • \n
      • [Not Recommended] A list of string column names without type.\n
          \n
        • Ex: ['cola', 'colb']
        • \n
        • The lack of types may limit functionality in future releases.
        • \n
      • \n
    • \n
    • See Registering Custom Schema for information on how to skip this step if the information is stored externally.
    • \n
  • \n
  • Add .sql(pretty=True) to your final DataFrame command to return a list of sql statements to run that command.\n
      \n
    • In most cases a single SQL statement is returned. Currently the only exception is when caching DataFrames which isn't supported in other dialects.
    • \n
    • Spark is the default output dialect. See dialects for a full list of dialects.
    • \n
    • Ex: .sql(pretty=True, dialect='bigquery')
    • \n
  • \n
\n\n

Examples

\n\n
\n
import sqlglot\nfrom sqlglot.dataframe.sql.session import SparkSession\nfrom sqlglot.dataframe.sql import functions as F\n\nsqlglot.schema.add_table('employee', {\n  'employee_id': 'INT',\n  'fname': 'STRING',\n  'lname': 'STRING',\n  'age': 'INT',\n})  # Register the table structure prior to reading from the table\n\nspark = SparkSession()\n\ndf = (\n    spark\n    .table('employee')\n    .groupBy(F.col("age"))\n    .agg(F.countDistinct(F.col("employee_id")).alias("num_employees")) \n)\n\nprint(df.sql(pretty=True))  # Spark will be the dialect used by default\n
\n
\n\n
SELECT\n  `employee`.`age` AS `age`,\n  COUNT(DISTINCT `employee`.`employee_id`) AS `num_employees`\nFROM `employee` AS `employee`\nGROUP BY\n  `employee`.`age`\n
\n\n

Registering Custom Schema Class

\n\n

The step of adding sqlglot.schema.add_table can be skipped if you have the column structure stored externally like in a file or from an external metadata table. This can be done by writing a class that implements the sqlglot.schema.Schema abstract class and then assigning that class to sqlglot.schema.

\n\n
\n
import sqlglot\nfrom sqlglot.dataframe.sql.session import SparkSession\nfrom sqlglot.dataframe.sql import functions as F\nfrom sqlglot.schema import Schema\n\n\nclass ExternalSchema(Schema):\n  ...\n\nsqlglot.schema = ExternalSchema()\n\nspark = SparkSession()\n\ndf = (\n    spark\n    .table('employee')\n    .groupBy(F.col("age"))\n    .agg(F.countDistinct(F.col("employee_id")).alias("num_employees")) \n)\n\nprint(df.sql(pretty=True))\n
\n
\n\n

Example Implementations

\n\n

Bigquery

\n\n
\n
from google.cloud import bigquery\nfrom sqlglot.dataframe.sql.session import SparkSession\nfrom sqlglot.dataframe.sql import types\nfrom sqlglot.dataframe.sql import functions as F\n\nclient = bigquery.Client()\n\ndata = [\n    (1, "Jack", "Shephard", 34),\n    (2, "John", "Locke", 48),\n    (3, "Kate", "Austen", 34),\n    (4, "Claire", "Littleton", 22),\n    (5, "Hugo", "Reyes", 26),\n]\nschema = types.StructType([\n    types.StructField('employee_id', types.IntegerType(), False),\n    types.StructField('fname', types.StringType(), False),\n    types.StructField('lname', types.StringType(), False),\n    types.StructField('age', types.IntegerType(), False),\n])\n\nsql_statements = (\n    SparkSession()\n    .createDataFrame(data, schema)\n    .groupBy(F.col("age"))\n    .agg(F.countDistinct(F.col("employee_id")).alias("num_employees"))\n    .sql(dialect="bigquery")\n)\n\nresult = None\nfor sql in sql_statements:\n  result = client.query(sql)\n\nassert result is not None\nfor row in client.query(result):\n    print(f"Age: {row['age']}, Num Employees: {row['num_employees']}")\n
\n
\n\n

Snowflake

\n\n
\n
import os\n\nimport snowflake.connector\nfrom sqlglot.dataframe.session import SparkSession\nfrom sqlglot.dataframe import types\nfrom sqlglot.dataframe import functions as F\n\nctx = snowflake.connector.connect(\n    user=os.environ["SNOWFLAKE_USER"],\n    password=os.environ["SNOWFLAKE_PASS"],\n    account=os.environ["SNOWFLAKE_ACCOUNT"]\n)\ncs = ctx.cursor()\n\ndata = [\n    (1, "Jack", "Shephard", 34),\n    (2, "John", "Locke", 48),\n    (3, "Kate", "Austen", 34),\n    (4, "Claire", "Littleton", 22),\n    (5, "Hugo", "Reyes", 26),\n]\nschema = types.StructType([\n    types.StructField('employee_id', types.IntegerType(), False),\n    types.StructField('fname', types.StringType(), False),\n    types.StructField('lname', types.StringType(), False),\n    types.StructField('age', types.IntegerType(), False),\n])\n\nsql_statements = (\n    SparkSession()\n    .createDataFrame(data, schema)\n    .groupBy(F.col("age"))\n    .agg(F.countDistinct(F.col("lname")).alias("num_employees"))\n    .sql(dialect="snowflake")\n)\n\ntry:\n    for sql in sql_statements:\n        cs.execute(sql)\n    results = cs.fetchall()\n    for row in results:\n        print(f"Age: {row[0]}, Num Employees: {row[1]}")\nfinally:\n    cs.close()\nctx.close()\n
\n
\n\n

Spark

\n\n
\n
from pyspark.sql.session import SparkSession as PySparkSession\nfrom sqlglot.dataframe.sql.session import SparkSession\nfrom sqlglot.dataframe.sql import types\nfrom sqlglot.dataframe.sql import functions as F\n\ndata = [\n    (1, "Jack", "Shephard", 34),\n    (2, "John", "Locke", 48),\n    (3, "Kate", "Austen", 34),\n    (4, "Claire", "Littleton", 22),\n    (5, "Hugo", "Reyes", 26),\n]\nschema = types.StructType([\n    types.StructField('employee_id', types.IntegerType(), False),\n    types.StructField('fname', types.StringType(), False),\n    types.StructField('lname', types.StringType(), False),\n    types.StructField('age', types.IntegerType(), False),\n])\n\nsql_statements = (\n    SparkSession()\n    .createDataFrame(data, schema)\n    .groupBy(F.col("age"))\n    .agg(F.countDistinct(F.col("employee_id")).alias("num_employees"))\n    .sql(dialect="spark")\n)\n\npyspark = PySparkSession.builder.master("local[*]").getOrCreate()\n\ndf = None\nfor sql in sql_statements:\n    df = pyspark.sql(sql)\n\nassert df is not None\ndf.show()\n
\n
\n\n

Unsupportable Operations

\n\n

Any operation that lacks a way to represent it in SQL cannot be supported by this tool. An example of this would be rdd operations. Since the DataFrame API though is mostly modeled around SQL concepts most operations can be supported.

\n"}, "sqlglot.dataframe.sql": {"fullname": "sqlglot.dataframe.sql", "modulename": "sqlglot.dataframe.sql", "kind": "module", "doc": "

\n"}, "sqlglot.dataframe.sql.SparkSession": {"fullname": "sqlglot.dataframe.sql.SparkSession", "modulename": "sqlglot.dataframe.sql", "qualname": "SparkSession", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.SparkSession.table": {"fullname": "sqlglot.dataframe.sql.SparkSession.table", "modulename": "sqlglot.dataframe.sql", "qualname": "SparkSession.table", "kind": "function", "doc": "

\n", "signature": "(self, tableName: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"fullname": "sqlglot.dataframe.sql.SparkSession.createDataFrame", "modulename": "sqlglot.dataframe.sql", "qualname": "SparkSession.createDataFrame", "kind": "function", "doc": "

\n", "signature": "(\tself,\tdata: Sequence[Union[Dict[str, <MagicMock id='140604724939168'>], List[<MagicMock id='140604724939168'>], Tuple]],\tschema: Optional[<MagicMock id='140604726296240'>] = None,\tsamplingRatio: Optional[float] = None,\tverifySchema: bool = False) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.SparkSession.sql": {"fullname": "sqlglot.dataframe.sql.SparkSession.sql", "modulename": "sqlglot.dataframe.sql", "qualname": "SparkSession.sql", "kind": "function", "doc": "

\n", "signature": "(self, sqlQuery: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame": {"fullname": "sqlglot.dataframe.sql.DataFrame", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.DataFrame.__init__": {"fullname": "sqlglot.dataframe.sql.DataFrame.__init__", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.__init__", "kind": "function", "doc": "

\n", "signature": "(\tspark: <MagicMock id='140604728440864'>,\texpression: sqlglot.expressions.Select,\tbranch_id: Optional[str] = None,\tsequence_id: Optional[str] = None,\tlast_op: sqlglot.dataframe.sql.operations.Operation = <Operation.INIT: -1>,\tpending_hints: Optional[List[sqlglot.expressions.Expression]] = None,\toutput_expression_container: Optional[<MagicMock id='140604729565168'>] = None,\t**kwargs)"}, "sqlglot.dataframe.sql.DataFrame.sql": {"fullname": "sqlglot.dataframe.sql.DataFrame.sql", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.sql", "kind": "function", "doc": "

\n", "signature": "(self, dialect='spark', optimize=True, **kwargs) -> List[str]:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.copy": {"fullname": "sqlglot.dataframe.sql.DataFrame.copy", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.copy", "kind": "function", "doc": "

\n", "signature": "(self, **kwargs) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.select": {"fullname": "sqlglot.dataframe.sql.DataFrame.select", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.select", "kind": "function", "doc": "

\n", "signature": "(self, *cols, **kwargs) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.alias": {"fullname": "sqlglot.dataframe.sql.DataFrame.alias", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.alias", "kind": "function", "doc": "

\n", "signature": "(self, name: str, **kwargs) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.where": {"fullname": "sqlglot.dataframe.sql.DataFrame.where", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.where", "kind": "function", "doc": "

\n", "signature": "(\tself,\tcolumn: Union[sqlglot.dataframe.sql.column.Column, bool],\t**kwargs) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.filter": {"fullname": "sqlglot.dataframe.sql.DataFrame.filter", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.filter", "kind": "function", "doc": "

\n", "signature": "(\tself,\tcolumn: Union[sqlglot.dataframe.sql.column.Column, bool],\t**kwargs) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"fullname": "sqlglot.dataframe.sql.DataFrame.groupBy", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.groupBy", "kind": "function", "doc": "

\n", "signature": "(self, *cols, **kwargs) -> sqlglot.dataframe.sql.group.GroupedData:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.agg": {"fullname": "sqlglot.dataframe.sql.DataFrame.agg", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.agg", "kind": "function", "doc": "

\n", "signature": "(self, *exprs, **kwargs) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.join": {"fullname": "sqlglot.dataframe.sql.DataFrame.join", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.join", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother_df: sqlglot.dataframe.sql.dataframe.DataFrame,\ton: Union[str, List[str], sqlglot.dataframe.sql.column.Column, List[sqlglot.dataframe.sql.column.Column]],\thow: str = 'inner',\t**kwargs) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"fullname": "sqlglot.dataframe.sql.DataFrame.orderBy", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.orderBy", "kind": "function", "doc": "

This implementation lets any ordered columns take priority over whatever is provided in ascending. Spark\nhas irregular behavior and can result in runtime errors. Users shouldn't be mixing the two anyways so this\nis unlikely to come up.

\n", "signature": "(\tself,\t*cols: Union[str, sqlglot.dataframe.sql.column.Column],\tascending: Union[Any, List[Any], NoneType] = None) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.sort": {"fullname": "sqlglot.dataframe.sql.DataFrame.sort", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.sort", "kind": "function", "doc": "

This implementation lets any ordered columns take priority over whatever is provided in ascending. Spark\nhas irregular behavior and can result in runtime errors. Users shouldn't be mixing the two anyways so this\nis unlikely to come up.

\n", "signature": "(\tself,\t*cols: Union[str, sqlglot.dataframe.sql.column.Column],\tascending: Union[Any, List[Any], NoneType] = None) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.union": {"fullname": "sqlglot.dataframe.sql.DataFrame.union", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.union", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: sqlglot.dataframe.sql.dataframe.DataFrame) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"fullname": "sqlglot.dataframe.sql.DataFrame.unionAll", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.unionAll", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: sqlglot.dataframe.sql.dataframe.DataFrame) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"fullname": "sqlglot.dataframe.sql.DataFrame.unionByName", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.unionByName", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: sqlglot.dataframe.sql.dataframe.DataFrame,\tallowMissingColumns: bool = False):", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.intersect": {"fullname": "sqlglot.dataframe.sql.DataFrame.intersect", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.intersect", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: sqlglot.dataframe.sql.dataframe.DataFrame) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"fullname": "sqlglot.dataframe.sql.DataFrame.intersectAll", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.intersectAll", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: sqlglot.dataframe.sql.dataframe.DataFrame) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"fullname": "sqlglot.dataframe.sql.DataFrame.exceptAll", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.exceptAll", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: sqlglot.dataframe.sql.dataframe.DataFrame) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.distinct": {"fullname": "sqlglot.dataframe.sql.DataFrame.distinct", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.distinct", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"fullname": "sqlglot.dataframe.sql.DataFrame.dropDuplicates", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.dropDuplicates", "kind": "function", "doc": "

\n", "signature": "(self, subset: Optional[List[str]] = None):", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.dropna": {"fullname": "sqlglot.dataframe.sql.DataFrame.dropna", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.dropna", "kind": "function", "doc": "

\n", "signature": "(\tself,\thow: str = 'any',\tthresh: Optional[int] = None,\tsubset: Union[str, Tuple[str, ...], List[str], NoneType] = None) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.fillna": {"fullname": "sqlglot.dataframe.sql.DataFrame.fillna", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.fillna", "kind": "function", "doc": "

Functionality Difference: If you provide a value to replace a null and that type conflicts\nwith the type of the column then PySpark will just ignore your replacement.\nThis will try to cast them to be the same in some cases. So they won't always match.\nBest to not mix types so make sure replacement is the same type as the column

\n\n

Possibility for improvement: Use typeof function to get the type of the column\nand check if it matches the type of the value provided. If not then make it null.

\n", "signature": "(\tself,\tvalue: <MagicMock id='140604723599328'>,\tsubset: Union[str, Tuple[str, ...], List[str], NoneType] = None) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.replace": {"fullname": "sqlglot.dataframe.sql.DataFrame.replace", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.replace", "kind": "function", "doc": "

\n", "signature": "(\tself,\tto_replace: Union[bool, int, float, str, List, Dict],\tvalue: Union[bool, int, float, str, List, NoneType] = None,\tsubset: Union[Collection[<MagicMock id='140604723535904'>], <MagicMock id='140604723535904'>, NoneType] = None) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"fullname": "sqlglot.dataframe.sql.DataFrame.withColumn", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.withColumn", "kind": "function", "doc": "

\n", "signature": "(\tself,\tcolName: str,\tcol: sqlglot.dataframe.sql.column.Column) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"fullname": "sqlglot.dataframe.sql.DataFrame.withColumnRenamed", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.withColumnRenamed", "kind": "function", "doc": "

\n", "signature": "(self, existing: str, new: str):", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.drop": {"fullname": "sqlglot.dataframe.sql.DataFrame.drop", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.drop", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*cols: Union[str, sqlglot.dataframe.sql.column.Column]) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.limit": {"fullname": "sqlglot.dataframe.sql.DataFrame.limit", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.limit", "kind": "function", "doc": "

\n", "signature": "(self, num: int) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.hint": {"fullname": "sqlglot.dataframe.sql.DataFrame.hint", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.hint", "kind": "function", "doc": "

\n", "signature": "(\tself,\tname: str,\t*parameters: Union[str, int, NoneType]) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.repartition": {"fullname": "sqlglot.dataframe.sql.DataFrame.repartition", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.repartition", "kind": "function", "doc": "

\n", "signature": "(\tself,\tnumPartitions: Union[int, <MagicMock id='140604723822992'>],\t*cols: <MagicMock id='140604723880880'>) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"fullname": "sqlglot.dataframe.sql.DataFrame.coalesce", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.coalesce", "kind": "function", "doc": "

\n", "signature": "(self, numPartitions: int) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.cache": {"fullname": "sqlglot.dataframe.sql.DataFrame.cache", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.cache", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.persist": {"fullname": "sqlglot.dataframe.sql.DataFrame.persist", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.persist", "kind": "function", "doc": "

Storage Level Options: https://spark.apache.org/docs/3.0.0-preview/sql-ref-syntax-aux-cache-cache-table.html

\n", "signature": "(\tself,\tstorageLevel: str = 'MEMORY_AND_DISK_SER') -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.GroupedData": {"fullname": "sqlglot.dataframe.sql.GroupedData", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.GroupedData.__init__": {"fullname": "sqlglot.dataframe.sql.GroupedData.__init__", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.__init__", "kind": "function", "doc": "

\n", "signature": "(\tdf: sqlglot.dataframe.sql.dataframe.DataFrame,\tgroup_by_cols: List[sqlglot.dataframe.sql.column.Column],\tlast_op: sqlglot.dataframe.sql.operations.Operation)"}, "sqlglot.dataframe.sql.GroupedData.agg": {"fullname": "sqlglot.dataframe.sql.GroupedData.agg", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.agg", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*exprs: Union[sqlglot.dataframe.sql.column.Column, Dict[str, str]]) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.GroupedData.count": {"fullname": "sqlglot.dataframe.sql.GroupedData.count", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.count", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.GroupedData.mean": {"fullname": "sqlglot.dataframe.sql.GroupedData.mean", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.mean", "kind": "function", "doc": "

\n", "signature": "(self, *cols: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.GroupedData.avg": {"fullname": "sqlglot.dataframe.sql.GroupedData.avg", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.avg", "kind": "function", "doc": "

\n", "signature": "(self, *cols: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.GroupedData.max": {"fullname": "sqlglot.dataframe.sql.GroupedData.max", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.max", "kind": "function", "doc": "

\n", "signature": "(self, *cols: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.GroupedData.min": {"fullname": "sqlglot.dataframe.sql.GroupedData.min", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.min", "kind": "function", "doc": "

\n", "signature": "(self, *cols: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.GroupedData.sum": {"fullname": "sqlglot.dataframe.sql.GroupedData.sum", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.sum", "kind": "function", "doc": "

\n", "signature": "(self, *cols: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.GroupedData.pivot": {"fullname": "sqlglot.dataframe.sql.GroupedData.pivot", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.pivot", "kind": "function", "doc": "

\n", "signature": "(self, *cols: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column": {"fullname": "sqlglot.dataframe.sql.Column", "modulename": "sqlglot.dataframe.sql", "qualname": "Column", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.Column.__init__": {"fullname": "sqlglot.dataframe.sql.Column.__init__", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.__init__", "kind": "function", "doc": "

\n", "signature": "(\texpression: Union[<MagicMock id='140604725847056'>, sqlglot.expressions.Expression, NoneType])"}, "sqlglot.dataframe.sql.Column.ensure_col": {"fullname": "sqlglot.dataframe.sql.Column.ensure_col", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.ensure_col", "kind": "function", "doc": "

\n", "signature": "(\tcls,\tvalue: Union[<MagicMock id='140604723943728'>, sqlglot.expressions.Expression, NoneType]):", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.ensure_cols": {"fullname": "sqlglot.dataframe.sql.Column.ensure_cols", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.ensure_cols", "kind": "function", "doc": "

\n", "signature": "(\tcls,\targs: List[Union[<MagicMock id='140604723933680'>, sqlglot.expressions.Expression]]) -> List[sqlglot.dataframe.sql.column.Column]:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"fullname": "sqlglot.dataframe.sql.Column.invoke_anonymous_function", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.invoke_anonymous_function", "kind": "function", "doc": "

\n", "signature": "(\tcls,\tcolumn: Optional[<MagicMock id='140604724460464'>],\tfunc_name: str,\t*args: Optional[<MagicMock id='140604724156480'>]) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"fullname": "sqlglot.dataframe.sql.Column.invoke_expression_over_column", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.invoke_expression_over_column", "kind": "function", "doc": "

\n", "signature": "(\tcls,\tcolumn: Optional[<MagicMock id='140604724090944'>],\tcallable_expression: Callable,\t**kwargs) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.binary_op": {"fullname": "sqlglot.dataframe.sql.Column.binary_op", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.binary_op", "kind": "function", "doc": "

\n", "signature": "(\tself,\tklass: Callable,\tother: <MagicMock id='140604724048944'>,\t**kwargs) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"fullname": "sqlglot.dataframe.sql.Column.inverse_binary_op", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.inverse_binary_op", "kind": "function", "doc": "

\n", "signature": "(\tself,\tklass: Callable,\tother: <MagicMock id='140604724339344'>,\t**kwargs) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.unary_op": {"fullname": "sqlglot.dataframe.sql.Column.unary_op", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.unary_op", "kind": "function", "doc": "

\n", "signature": "(self, klass: Callable, **kwargs) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.ensure_literal": {"fullname": "sqlglot.dataframe.sql.Column.ensure_literal", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.ensure_literal", "kind": "function", "doc": "

\n", "signature": "(cls, value) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.copy": {"fullname": "sqlglot.dataframe.sql.Column.copy", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.copy", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.set_table_name": {"fullname": "sqlglot.dataframe.sql.Column.set_table_name", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.set_table_name", "kind": "function", "doc": "

\n", "signature": "(self, table_name: str, copy=False) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.sql": {"fullname": "sqlglot.dataframe.sql.Column.sql", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.sql", "kind": "function", "doc": "

\n", "signature": "(self, **kwargs) -> str:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.alias": {"fullname": "sqlglot.dataframe.sql.Column.alias", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.alias", "kind": "function", "doc": "

\n", "signature": "(self, name: str) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.asc": {"fullname": "sqlglot.dataframe.sql.Column.asc", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.asc", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.desc": {"fullname": "sqlglot.dataframe.sql.Column.desc", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.desc", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"fullname": "sqlglot.dataframe.sql.Column.asc_nulls_first", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.asc_nulls_first", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"fullname": "sqlglot.dataframe.sql.Column.asc_nulls_last", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.asc_nulls_last", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"fullname": "sqlglot.dataframe.sql.Column.desc_nulls_first", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.desc_nulls_first", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"fullname": "sqlglot.dataframe.sql.Column.desc_nulls_last", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.desc_nulls_last", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.when": {"fullname": "sqlglot.dataframe.sql.Column.when", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.when", "kind": "function", "doc": "

\n", "signature": "(\tself,\tcondition: sqlglot.dataframe.sql.column.Column,\tvalue: Any) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.otherwise": {"fullname": "sqlglot.dataframe.sql.Column.otherwise", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.otherwise", "kind": "function", "doc": "

\n", "signature": "(self, value: Any) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.isNull": {"fullname": "sqlglot.dataframe.sql.Column.isNull", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.isNull", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.isNotNull": {"fullname": "sqlglot.dataframe.sql.Column.isNotNull", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.isNotNull", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.cast": {"fullname": "sqlglot.dataframe.sql.Column.cast", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.cast", "kind": "function", "doc": "

Functionality Difference: PySpark cast accepts a datatype instance of the datatype class\nSqlglot doesn't currently replicate this class so it only accepts a string

\n", "signature": "(self, dataType: Union[str, sqlglot.dataframe.sql.types.DataType]):", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.startswith": {"fullname": "sqlglot.dataframe.sql.Column.startswith", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.startswith", "kind": "function", "doc": "

\n", "signature": "(\tself,\tvalue: Union[str, sqlglot.dataframe.sql.column.Column]) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.endswith": {"fullname": "sqlglot.dataframe.sql.Column.endswith", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.endswith", "kind": "function", "doc": "

\n", "signature": "(\tself,\tvalue: Union[str, sqlglot.dataframe.sql.column.Column]) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.rlike": {"fullname": "sqlglot.dataframe.sql.Column.rlike", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.rlike", "kind": "function", "doc": "

\n", "signature": "(self, regexp: str) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.like": {"fullname": "sqlglot.dataframe.sql.Column.like", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.like", "kind": "function", "doc": "

\n", "signature": "(self, other: str):", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.ilike": {"fullname": "sqlglot.dataframe.sql.Column.ilike", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.ilike", "kind": "function", "doc": "

\n", "signature": "(self, other: str):", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.substr": {"fullname": "sqlglot.dataframe.sql.Column.substr", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.substr", "kind": "function", "doc": "

\n", "signature": "(\tself,\tstartPos: Union[int, sqlglot.dataframe.sql.column.Column],\tlength: Union[int, sqlglot.dataframe.sql.column.Column]) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.isin": {"fullname": "sqlglot.dataframe.sql.Column.isin", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.isin", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*cols: Union[<MagicMock id='140604722499344'>, Iterable[<MagicMock id='140604722499344'>]]):", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.between": {"fullname": "sqlglot.dataframe.sql.Column.between", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.between", "kind": "function", "doc": "

\n", "signature": "(\tself,\tlowerBound: <MagicMock id='140604722556992'>,\tupperBound: <MagicMock id='140604722616528'>) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.over": {"fullname": "sqlglot.dataframe.sql.Column.over", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.over", "kind": "function", "doc": "

\n", "signature": "(\tself,\twindow: <MagicMock id='140604722692160'>) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameNaFunctions": {"fullname": "sqlglot.dataframe.sql.DataFrameNaFunctions", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameNaFunctions", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"fullname": "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameNaFunctions.__init__", "kind": "function", "doc": "

\n", "signature": "(df: sqlglot.dataframe.sql.dataframe.DataFrame)"}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"fullname": "sqlglot.dataframe.sql.DataFrameNaFunctions.drop", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameNaFunctions.drop", "kind": "function", "doc": "

\n", "signature": "(\tself,\thow: str = 'any',\tthresh: Optional[int] = None,\tsubset: Union[str, Tuple[str, ...], List[str], NoneType] = None) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"fullname": "sqlglot.dataframe.sql.DataFrameNaFunctions.fill", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameNaFunctions.fill", "kind": "function", "doc": "

\n", "signature": "(\tself,\tvalue: Union[int, bool, float, str, Dict[str, Any]],\tsubset: Union[str, Tuple[str, ...], List[str], NoneType] = None) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"fullname": "sqlglot.dataframe.sql.DataFrameNaFunctions.replace", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameNaFunctions.replace", "kind": "function", "doc": "

\n", "signature": "(\tself,\tto_replace: Union[bool, int, float, str, List, Dict],\tvalue: Union[bool, int, float, str, List, NoneType] = None,\tsubset: Union[str, List[str], NoneType] = None) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.Window": {"fullname": "sqlglot.dataframe.sql.Window", "modulename": "sqlglot.dataframe.sql", "qualname": "Window", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.Window.partitionBy": {"fullname": "sqlglot.dataframe.sql.Window.partitionBy", "modulename": "sqlglot.dataframe.sql", "qualname": "Window.partitionBy", "kind": "function", "doc": "

\n", "signature": "(\tcls,\t*cols: Union[<MagicMock id='140604722964432'>, List[<MagicMock id='140604722964432'>]]) -> sqlglot.dataframe.sql.window.WindowSpec:", "funcdef": "def"}, "sqlglot.dataframe.sql.Window.orderBy": {"fullname": "sqlglot.dataframe.sql.Window.orderBy", "modulename": "sqlglot.dataframe.sql", "qualname": "Window.orderBy", "kind": "function", "doc": "

\n", "signature": "(\tcls,\t*cols: Union[<MagicMock id='140604723170512'>, List[<MagicMock id='140604723170512'>]]) -> sqlglot.dataframe.sql.window.WindowSpec:", "funcdef": "def"}, "sqlglot.dataframe.sql.Window.rowsBetween": {"fullname": "sqlglot.dataframe.sql.Window.rowsBetween", "modulename": "sqlglot.dataframe.sql", "qualname": "Window.rowsBetween", "kind": "function", "doc": "

\n", "signature": "(cls, start: int, end: int) -> sqlglot.dataframe.sql.window.WindowSpec:", "funcdef": "def"}, "sqlglot.dataframe.sql.Window.rangeBetween": {"fullname": "sqlglot.dataframe.sql.Window.rangeBetween", "modulename": "sqlglot.dataframe.sql", "qualname": "Window.rangeBetween", "kind": "function", "doc": "

\n", "signature": "(cls, start: int, end: int) -> sqlglot.dataframe.sql.window.WindowSpec:", "funcdef": "def"}, "sqlglot.dataframe.sql.WindowSpec": {"fullname": "sqlglot.dataframe.sql.WindowSpec", "modulename": "sqlglot.dataframe.sql", "qualname": "WindowSpec", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"fullname": "sqlglot.dataframe.sql.WindowSpec.__init__", "modulename": "sqlglot.dataframe.sql", "qualname": "WindowSpec.__init__", "kind": "function", "doc": "

\n", "signature": "(expression: sqlglot.expressions.Expression = (WINDOW ))"}, "sqlglot.dataframe.sql.WindowSpec.copy": {"fullname": "sqlglot.dataframe.sql.WindowSpec.copy", "modulename": "sqlglot.dataframe.sql", "qualname": "WindowSpec.copy", "kind": "function", "doc": "

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.dataframe.sql.WindowSpec.sql": {"fullname": "sqlglot.dataframe.sql.WindowSpec.sql", "modulename": "sqlglot.dataframe.sql", "qualname": "WindowSpec.sql", "kind": "function", "doc": "

\n", "signature": "(self, **kwargs) -> str:", "funcdef": "def"}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"fullname": "sqlglot.dataframe.sql.WindowSpec.partitionBy", "modulename": "sqlglot.dataframe.sql", "qualname": "WindowSpec.partitionBy", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*cols: Union[<MagicMock id='140604723060480'>, List[<MagicMock id='140604723060480'>]]) -> sqlglot.dataframe.sql.window.WindowSpec:", "funcdef": "def"}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"fullname": "sqlglot.dataframe.sql.WindowSpec.orderBy", "modulename": "sqlglot.dataframe.sql", "qualname": "WindowSpec.orderBy", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*cols: Union[<MagicMock id='140604722718768'>, List[<MagicMock id='140604722718768'>]]) -> sqlglot.dataframe.sql.window.WindowSpec:", "funcdef": "def"}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"fullname": "sqlglot.dataframe.sql.WindowSpec.rowsBetween", "modulename": "sqlglot.dataframe.sql", "qualname": "WindowSpec.rowsBetween", "kind": "function", "doc": "

\n", "signature": "(self, start: int, end: int) -> sqlglot.dataframe.sql.window.WindowSpec:", "funcdef": "def"}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"fullname": "sqlglot.dataframe.sql.WindowSpec.rangeBetween", "modulename": "sqlglot.dataframe.sql", "qualname": "WindowSpec.rangeBetween", "kind": "function", "doc": "

\n", "signature": "(self, start: int, end: int) -> sqlglot.dataframe.sql.window.WindowSpec:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameReader": {"fullname": "sqlglot.dataframe.sql.DataFrameReader", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameReader", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"fullname": "sqlglot.dataframe.sql.DataFrameReader.__init__", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameReader.__init__", "kind": "function", "doc": "

\n", "signature": "(spark: sqlglot.dataframe.sql.session.SparkSession)"}, "sqlglot.dataframe.sql.DataFrameReader.table": {"fullname": "sqlglot.dataframe.sql.DataFrameReader.table", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameReader.table", "kind": "function", "doc": "

\n", "signature": "(self, tableName: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameWriter": {"fullname": "sqlglot.dataframe.sql.DataFrameWriter", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameWriter", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"fullname": "sqlglot.dataframe.sql.DataFrameWriter.__init__", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameWriter.__init__", "kind": "function", "doc": "

\n", "signature": "(\tdf: sqlglot.dataframe.sql.dataframe.DataFrame,\tspark: Optional[sqlglot.dataframe.sql.session.SparkSession] = None,\tmode: Optional[str] = None,\tby_name: bool = False)"}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"fullname": "sqlglot.dataframe.sql.DataFrameWriter.copy", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameWriter.copy", "kind": "function", "doc": "

\n", "signature": "(self, **kwargs) -> sqlglot.dataframe.sql.readwriter.DataFrameWriter:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"fullname": "sqlglot.dataframe.sql.DataFrameWriter.sql", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameWriter.sql", "kind": "function", "doc": "

\n", "signature": "(self, **kwargs) -> List[str]:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"fullname": "sqlglot.dataframe.sql.DataFrameWriter.mode", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameWriter.mode", "kind": "function", "doc": "

\n", "signature": "(\tself,\tsaveMode: Optional[str]) -> sqlglot.dataframe.sql.readwriter.DataFrameWriter:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"fullname": "sqlglot.dataframe.sql.DataFrameWriter.insertInto", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameWriter.insertInto", "kind": "function", "doc": "

\n", "signature": "(\tself,\ttableName: str,\toverwrite: Optional[bool] = None) -> sqlglot.dataframe.sql.readwriter.DataFrameWriter:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"fullname": "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameWriter.saveAsTable", "kind": "function", "doc": "

\n", "signature": "(\tself,\tname: str,\tformat: Optional[str] = None,\tmode: Optional[str] = None):", "funcdef": "def"}, "sqlglot.dialects": {"fullname": "sqlglot.dialects", "modulename": "sqlglot.dialects", "kind": "module", "doc": "

Dialects

\n\n

While there is a SQL standard, most SQL engines support a variation of that standard. This makes it difficult\nto write portable SQL code. SQLGlot bridges all the different variations, called \"dialects\", with an extensible\nSQL transpilation framework.

\n\n

The base sqlglot.dialects.dialect.Dialect class implements a generic dialect that aims to be as universal as possible.

\n\n

Each SQL variation has its own Dialect subclass, extending the corresponding Tokenizer, Parser and Generator\nclasses as needed.

\n\n

Implementing a custom Dialect

\n\n

Consider the following example:

\n\n
\n
from sqlglot import exp\nfrom sqlglot.dialects.dialect import Dialect\nfrom sqlglot.generator import Generator\nfrom sqlglot.tokens import Tokenizer, TokenType\n\n\nclass Custom(Dialect):\n    class Tokenizer(Tokenizer):\n        QUOTES = ["'", '"']\n        IDENTIFIERS = ["`"]\n\n        KEYWORDS = {\n            **Tokenizer.KEYWORDS,\n            "INT64": TokenType.BIGINT,\n            "FLOAT64": TokenType.DOUBLE,\n        }\n\n    class Generator(Generator):\n        TRANSFORMS = {exp.Array: lambda self, e: f"[{self.expressions(e)}]"}\n\n        TYPE_MAPPING = {\n            exp.DataType.Type.TINYINT: "INT64",\n            exp.DataType.Type.SMALLINT: "INT64",\n            exp.DataType.Type.INT: "INT64",\n            exp.DataType.Type.BIGINT: "INT64",\n            exp.DataType.Type.DECIMAL: "NUMERIC",\n            exp.DataType.Type.FLOAT: "FLOAT64",\n            exp.DataType.Type.DOUBLE: "FLOAT64",\n            exp.DataType.Type.BOOLEAN: "BOOL",\n            exp.DataType.Type.TEXT: "STRING",\n        }\n
\n
\n\n

This is a typical example of adding a new dialect implementation in SQLGlot: we specify its identifier and string\ndelimiters, as well as what tokens it uses for its types and how they're associated with SQLGlot types. Since\nthe Expression classes are common for each dialect supported in SQLGlot, we may also need to override the generation\nlogic for some expressions; this is usually done by adding new entries to the TRANSFORMS mapping.

\n\n
\n"}, "sqlglot.dialects.bigquery": {"fullname": "sqlglot.dialects.bigquery", "modulename": "sqlglot.dialects.bigquery", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.bigquery.BigQuery": {"fullname": "sqlglot.dialects.bigquery.BigQuery", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Tokenizer", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Parser", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • \n
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • \n
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • \n
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • \n
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • \n
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • \n
  • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
  • \n
  • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
  • \n
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator.array_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Array) -> str:", "funcdef": "def"}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator.transaction_sql", "kind": "function", "doc": "

\n", "signature": "(self, *_) -> str:", "funcdef": "def"}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator.commit_sql", "kind": "function", "doc": "

\n", "signature": "(self, *_) -> str:", "funcdef": "def"}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator.rollback_sql", "kind": "function", "doc": "

\n", "signature": "(self, *_) -> str:", "funcdef": "def"}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator.in_unnest_op", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Unnest) -> str:", "funcdef": "def"}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator.except_op", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator.except_op", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Except) -> str:", "funcdef": "def"}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator.intersect_op", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Intersect) -> str:", "funcdef": "def"}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator.with_properties", "kind": "function", "doc": "

\n", "signature": "(self, properties: sqlglot.expressions.Properties) -> str:", "funcdef": "def"}, "sqlglot.dialects.clickhouse": {"fullname": "sqlglot.dialects.clickhouse", "modulename": "sqlglot.dialects.clickhouse", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.clickhouse.ClickHouse": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse.Parser", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse.Generator", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • \n
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • \n
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • \n
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • \n
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • \n
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • \n
  • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
  • \n
  • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
  • \n
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse.Generator.cte_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.CTE) -> str:", "funcdef": "def"}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse.Generator.after_limit_modifiers", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Expression) -> List[str]:", "funcdef": "def"}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse.Generator.parameterizedagg_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Anonymous) -> str:", "funcdef": "def"}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse.Generator.placeholder_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Placeholder) -> str:", "funcdef": "def"}, "sqlglot.dialects.databricks": {"fullname": "sqlglot.dialects.databricks", "modulename": "sqlglot.dialects.databricks", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.databricks.Databricks": {"fullname": "sqlglot.dialects.databricks.Databricks", "modulename": "sqlglot.dialects.databricks", "qualname": "Databricks", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.spark.Spark"}, "sqlglot.dialects.databricks.Databricks.Parser": {"fullname": "sqlglot.dialects.databricks.Databricks.Parser", "modulename": "sqlglot.dialects.databricks", "qualname": "Databricks.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.dialects.spark.Spark.Parser"}, "sqlglot.dialects.databricks.Databricks.Generator": {"fullname": "sqlglot.dialects.databricks.Databricks.Generator", "modulename": "sqlglot.dialects.databricks", "qualname": "Databricks.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • \n
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • \n
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • \n
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • \n
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • \n
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • \n
  • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
  • \n
  • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
  • \n
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.dialects.spark.Spark.Generator"}, "sqlglot.dialects.databricks.Databricks.Tokenizer": {"fullname": "sqlglot.dialects.databricks.Databricks.Tokenizer", "modulename": "sqlglot.dialects.databricks", "qualname": "Databricks.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.spark2.Spark2.Tokenizer"}, "sqlglot.dialects.dialect": {"fullname": "sqlglot.dialects.dialect", "modulename": "sqlglot.dialects.dialect", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.dialect.Dialects": {"fullname": "sqlglot.dialects.dialect.Dialects", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects", "kind": "class", "doc": "

An enumeration.

\n", "bases": "builtins.str, enum.Enum"}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"fullname": "sqlglot.dialects.dialect.Dialects.DIALECT", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.DIALECT", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.DIALECT: ''>"}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"fullname": "sqlglot.dialects.dialect.Dialects.BIGQUERY", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.BIGQUERY", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.BIGQUERY: 'bigquery'>"}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"fullname": "sqlglot.dialects.dialect.Dialects.CLICKHOUSE", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.CLICKHOUSE", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.CLICKHOUSE: 'clickhouse'>"}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"fullname": "sqlglot.dialects.dialect.Dialects.DUCKDB", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.DUCKDB", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.DUCKDB: 'duckdb'>"}, "sqlglot.dialects.dialect.Dialects.HIVE": {"fullname": "sqlglot.dialects.dialect.Dialects.HIVE", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.HIVE", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.HIVE: 'hive'>"}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"fullname": "sqlglot.dialects.dialect.Dialects.MYSQL", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.MYSQL", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.MYSQL: 'mysql'>"}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"fullname": "sqlglot.dialects.dialect.Dialects.ORACLE", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.ORACLE", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.ORACLE: 'oracle'>"}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"fullname": "sqlglot.dialects.dialect.Dialects.POSTGRES", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.POSTGRES", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.POSTGRES: 'postgres'>"}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"fullname": "sqlglot.dialects.dialect.Dialects.PRESTO", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.PRESTO", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.PRESTO: 'presto'>"}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"fullname": "sqlglot.dialects.dialect.Dialects.REDSHIFT", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.REDSHIFT", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.REDSHIFT: 'redshift'>"}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"fullname": "sqlglot.dialects.dialect.Dialects.SNOWFLAKE", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.SNOWFLAKE", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.SNOWFLAKE: 'snowflake'>"}, "sqlglot.dialects.dialect.Dialects.SPARK": {"fullname": "sqlglot.dialects.dialect.Dialects.SPARK", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.SPARK", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.SPARK: 'spark'>"}, "sqlglot.dialects.dialect.Dialects.SPARK2": {"fullname": "sqlglot.dialects.dialect.Dialects.SPARK2", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.SPARK2", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.SPARK2: 'spark2'>"}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"fullname": "sqlglot.dialects.dialect.Dialects.SQLITE", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.SQLITE", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.SQLITE: 'sqlite'>"}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"fullname": "sqlglot.dialects.dialect.Dialects.STARROCKS", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.STARROCKS", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.STARROCKS: 'starrocks'>"}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"fullname": "sqlglot.dialects.dialect.Dialects.TABLEAU", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.TABLEAU", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.TABLEAU: 'tableau'>"}, "sqlglot.dialects.dialect.Dialects.TRINO": {"fullname": "sqlglot.dialects.dialect.Dialects.TRINO", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.TRINO", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.TRINO: 'trino'>"}, "sqlglot.dialects.dialect.Dialects.TSQL": {"fullname": "sqlglot.dialects.dialect.Dialects.TSQL", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.TSQL", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.TSQL: 'tsql'>"}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"fullname": "sqlglot.dialects.dialect.Dialects.DATABRICKS", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.DATABRICKS", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.DATABRICKS: 'databricks'>"}, "sqlglot.dialects.dialect.Dialects.DRILL": {"fullname": "sqlglot.dialects.dialect.Dialects.DRILL", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.DRILL", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.DRILL: 'drill'>"}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"fullname": "sqlglot.dialects.dialect.Dialects.TERADATA", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.TERADATA", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.TERADATA: 'teradata'>"}, "sqlglot.dialects.dialect.Dialect": {"fullname": "sqlglot.dialects.dialect.Dialect", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect", "kind": "class", "doc": "

\n"}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"fullname": "sqlglot.dialects.dialect.Dialect.get_or_raise", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.get_or_raise", "kind": "function", "doc": "

\n", "signature": "(\tcls,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType]) -> Type[sqlglot.dialects.dialect.Dialect]:", "funcdef": "def"}, "sqlglot.dialects.dialect.Dialect.format_time": {"fullname": "sqlglot.dialects.dialect.Dialect.format_time", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.format_time", "kind": "function", "doc": "

\n", "signature": "(\tcls,\texpression: Union[str, sqlglot.expressions.Expression, NoneType]) -> Optional[sqlglot.expressions.Expression]:", "funcdef": "def"}, "sqlglot.dialects.dialect.Dialect.parse": {"fullname": "sqlglot.dialects.dialect.Dialect.parse", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.parse", "kind": "function", "doc": "

\n", "signature": "(self, sql: str, **opts) -> List[Optional[sqlglot.expressions.Expression]]:", "funcdef": "def"}, "sqlglot.dialects.dialect.Dialect.parse_into": {"fullname": "sqlglot.dialects.dialect.Dialect.parse_into", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.parse_into", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression_type: Union[str, Type[sqlglot.expressions.Expression], Collection[Union[str, Type[sqlglot.expressions.Expression]]]],\tsql: str,\t**opts) -> List[Optional[sqlglot.expressions.Expression]]:", "funcdef": "def"}, "sqlglot.dialects.dialect.Dialect.generate": {"fullname": "sqlglot.dialects.dialect.Dialect.generate", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.generate", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: Optional[sqlglot.expressions.Expression],\t**opts) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.Dialect.transpile": {"fullname": "sqlglot.dialects.dialect.Dialect.transpile", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.transpile", "kind": "function", "doc": "

\n", "signature": "(self, sql: str, **opts) -> List[str]:", "funcdef": "def"}, "sqlglot.dialects.dialect.Dialect.tokenize": {"fullname": "sqlglot.dialects.dialect.Dialect.tokenize", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.tokenize", "kind": "function", "doc": "

\n", "signature": "(self, sql: str) -> List[sqlglot.tokens.Token]:", "funcdef": "def"}, "sqlglot.dialects.dialect.Dialect.parser": {"fullname": "sqlglot.dialects.dialect.Dialect.parser", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.parser", "kind": "function", "doc": "

\n", "signature": "(self, **opts) -> sqlglot.parser.Parser:", "funcdef": "def"}, "sqlglot.dialects.dialect.Dialect.generator": {"fullname": "sqlglot.dialects.dialect.Dialect.generator", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.generator", "kind": "function", "doc": "

\n", "signature": "(self, **opts) -> sqlglot.generator.Generator:", "funcdef": "def"}, "sqlglot.dialects.dialect.rename_func": {"fullname": "sqlglot.dialects.dialect.rename_func", "modulename": "sqlglot.dialects.dialect", "qualname": "rename_func", "kind": "function", "doc": "

\n", "signature": "(\tname: str) -> Callable[[sqlglot.generator.Generator, sqlglot.expressions.Expression], str]:", "funcdef": "def"}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"fullname": "sqlglot.dialects.dialect.approx_count_distinct_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "approx_count_distinct_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.ApproxDistinct) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.if_sql": {"fullname": "sqlglot.dialects.dialect.if_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "if_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.If) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"fullname": "sqlglot.dialects.dialect.arrow_json_extract_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "arrow_json_extract_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.JSONExtract | sqlglot.expressions.JSONBExtract) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"fullname": "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "arrow_json_extract_scalar_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.JSONExtractScalar | sqlglot.expressions.JSONBExtractScalar) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.inline_array_sql": {"fullname": "sqlglot.dialects.dialect.inline_array_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "inline_array_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Array) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_ilike_sql": {"fullname": "sqlglot.dialects.dialect.no_ilike_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_ilike_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.ILike) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"fullname": "sqlglot.dialects.dialect.no_paren_current_date_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_paren_current_date_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.CurrentDate) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"fullname": "sqlglot.dialects.dialect.no_recursive_cte_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_recursive_cte_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.With) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"fullname": "sqlglot.dialects.dialect.no_safe_divide_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_safe_divide_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.SafeDivide) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_tablesample_sql": {"fullname": "sqlglot.dialects.dialect.no_tablesample_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_tablesample_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.TableSample) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_pivot_sql": {"fullname": "sqlglot.dialects.dialect.no_pivot_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_pivot_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Pivot) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_trycast_sql": {"fullname": "sqlglot.dialects.dialect.no_trycast_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_trycast_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.TryCast) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_properties_sql": {"fullname": "sqlglot.dialects.dialect.no_properties_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_properties_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Properties) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"fullname": "sqlglot.dialects.dialect.no_comment_column_constraint_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_comment_column_constraint_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.CommentColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.str_position_sql": {"fullname": "sqlglot.dialects.dialect.str_position_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "str_position_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.StrPosition) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.struct_extract_sql": {"fullname": "sqlglot.dialects.dialect.struct_extract_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "struct_extract_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.StructExtract) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.var_map_sql": {"fullname": "sqlglot.dialects.dialect.var_map_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "var_map_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Map | sqlglot.expressions.VarMap,\tmap_func_name: str = 'MAP') -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.format_time_lambda": {"fullname": "sqlglot.dialects.dialect.format_time_lambda", "modulename": "sqlglot.dialects.dialect", "qualname": "format_time_lambda", "kind": "function", "doc": "

Helper used for time expressions.

\n\n
Arguments:
\n\n
    \n
  • exp_class: the expression class to instantiate.
  • \n
  • dialect: target sql dialect.
  • \n
  • default: the default format, True being time.
  • \n
\n\n
Returns:
\n\n
\n

A callable that can be used to return the appropriately formatted time expression.

\n
\n", "signature": "(\texp_class: Type[~E],\tdialect: str,\tdefault: Union[bool, str, NoneType] = None) -> Callable[[List], ~E]:", "funcdef": "def"}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"fullname": "sqlglot.dialects.dialect.create_with_partitions_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "create_with_partitions_sql", "kind": "function", "doc": "

In Hive and Spark, the PARTITIONED BY property acts as an extension of a table's schema. When the\nPARTITIONED BY value is an array of column names, they are transformed into a schema. The corresponding\ncolumns are removed from the create statement.

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Create) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.parse_date_delta": {"fullname": "sqlglot.dialects.dialect.parse_date_delta", "modulename": "sqlglot.dialects.dialect", "qualname": "parse_date_delta", "kind": "function", "doc": "

\n", "signature": "(\texp_class: Type[~E],\tunit_mapping: Optional[Dict[str, str]] = None) -> Callable[[List], ~E]:", "funcdef": "def"}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"fullname": "sqlglot.dialects.dialect.parse_date_delta_with_interval", "modulename": "sqlglot.dialects.dialect", "qualname": "parse_date_delta_with_interval", "kind": "function", "doc": "

\n", "signature": "(expression_class: Type[~E]) -> Callable[[List], Optional[~E]]:", "funcdef": "def"}, "sqlglot.dialects.dialect.date_trunc_to_time": {"fullname": "sqlglot.dialects.dialect.date_trunc_to_time", "modulename": "sqlglot.dialects.dialect", "qualname": "date_trunc_to_time", "kind": "function", "doc": "

\n", "signature": "(\targs: List) -> sqlglot.expressions.DateTrunc | sqlglot.expressions.TimestampTrunc:", "funcdef": "def"}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"fullname": "sqlglot.dialects.dialect.timestamptrunc_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "timestamptrunc_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.TimestampTrunc) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.locate_to_strposition": {"fullname": "sqlglot.dialects.dialect.locate_to_strposition", "modulename": "sqlglot.dialects.dialect", "qualname": "locate_to_strposition", "kind": "function", "doc": "

\n", "signature": "(args: List) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"fullname": "sqlglot.dialects.dialect.strposition_to_locate_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "strposition_to_locate_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.StrPosition) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.left_to_substring_sql": {"fullname": "sqlglot.dialects.dialect.left_to_substring_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "left_to_substring_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Left) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.right_to_substring_sql": {"fullname": "sqlglot.dialects.dialect.right_to_substring_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "right_to_substring_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Left) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.timestrtotime_sql": {"fullname": "sqlglot.dialects.dialect.timestrtotime_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "timestrtotime_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.TimeStrToTime) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.datestrtodate_sql": {"fullname": "sqlglot.dialects.dialect.datestrtodate_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "datestrtodate_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.DateStrToDate) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.min_or_least": {"fullname": "sqlglot.dialects.dialect.min_or_least", "modulename": "sqlglot.dialects.dialect", "qualname": "min_or_least", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Min) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.max_or_greatest": {"fullname": "sqlglot.dialects.dialect.max_or_greatest", "modulename": "sqlglot.dialects.dialect", "qualname": "max_or_greatest", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Max) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.count_if_to_sum": {"fullname": "sqlglot.dialects.dialect.count_if_to_sum", "modulename": "sqlglot.dialects.dialect", "qualname": "count_if_to_sum", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.CountIf) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.trim_sql": {"fullname": "sqlglot.dialects.dialect.trim_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "trim_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Trim) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.str_to_time_sql": {"fullname": "sqlglot.dialects.dialect.str_to_time_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "str_to_time_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Expression) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"fullname": "sqlglot.dialects.dialect.ts_or_ds_to_date_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "ts_or_ds_to_date_sql", "kind": "function", "doc": "

\n", "signature": "(dialect: str) -> Callable:", "funcdef": "def"}, "sqlglot.dialects.dialect.pivot_column_names": {"fullname": "sqlglot.dialects.dialect.pivot_column_names", "modulename": "sqlglot.dialects.dialect", "qualname": "pivot_column_names", "kind": "function", "doc": "

\n", "signature": "(\taggregations: List[sqlglot.expressions.Expression],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType]) -> List[str]:", "funcdef": "def"}, "sqlglot.dialects.drill": {"fullname": "sqlglot.dialects.drill", "modulename": "sqlglot.dialects.drill", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.drill.Drill": {"fullname": "sqlglot.dialects.drill.Drill", "modulename": "sqlglot.dialects.drill", "qualname": "Drill", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.drill.Drill.Tokenizer": {"fullname": "sqlglot.dialects.drill.Drill.Tokenizer", "modulename": "sqlglot.dialects.drill", "qualname": "Drill.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.drill.Drill.Parser": {"fullname": "sqlglot.dialects.drill.Drill.Parser", "modulename": "sqlglot.dialects.drill", "qualname": "Drill.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.drill.Drill.Generator": {"fullname": "sqlglot.dialects.drill.Drill.Generator", "modulename": "sqlglot.dialects.drill", "qualname": "Drill.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • \n
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • \n
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • \n
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • \n
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • \n
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • \n
  • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
  • \n
  • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
  • \n
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"fullname": "sqlglot.dialects.drill.Drill.Generator.normalize_func", "modulename": "sqlglot.dialects.drill", "qualname": "Drill.Generator.normalize_func", "kind": "function", "doc": "

\n", "signature": "(self, name: str) -> str:", "funcdef": "def"}, "sqlglot.dialects.duckdb": {"fullname": "sqlglot.dialects.duckdb", "modulename": "sqlglot.dialects.duckdb", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.duckdb.DuckDB": {"fullname": "sqlglot.dialects.duckdb.DuckDB", "modulename": "sqlglot.dialects.duckdb", "qualname": "DuckDB", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"fullname": "sqlglot.dialects.duckdb.DuckDB.Tokenizer", "modulename": "sqlglot.dialects.duckdb", "qualname": "DuckDB.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"fullname": "sqlglot.dialects.duckdb.DuckDB.Parser", "modulename": "sqlglot.dialects.duckdb", "qualname": "DuckDB.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"fullname": "sqlglot.dialects.duckdb.DuckDB.Generator", "modulename": "sqlglot.dialects.duckdb", "qualname": "DuckDB.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • \n
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • \n
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • \n
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • \n
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • \n
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • \n
  • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
  • \n
  • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
  • \n
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"fullname": "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql", "modulename": "sqlglot.dialects.duckdb", "qualname": "DuckDB.Generator.tablesample_sql", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: sqlglot.expressions.TableSample,\tseed_prefix: str = 'SEED',\tsep: str = ' AS ') -> str:", "funcdef": "def"}, "sqlglot.dialects.hive": {"fullname": "sqlglot.dialects.hive", "modulename": "sqlglot.dialects.hive", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.hive.Hive": {"fullname": "sqlglot.dialects.hive.Hive", "modulename": "sqlglot.dialects.hive", "qualname": "Hive", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.hive.Hive.Tokenizer": {"fullname": "sqlglot.dialects.hive.Hive.Tokenizer", "modulename": "sqlglot.dialects.hive", "qualname": "Hive.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.hive.Hive.Parser": {"fullname": "sqlglot.dialects.hive.Hive.Parser", "modulename": "sqlglot.dialects.hive", "qualname": "Hive.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.hive.Hive.Generator": {"fullname": "sqlglot.dialects.hive.Hive.Generator", "modulename": "sqlglot.dialects.hive", "qualname": "Hive.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • \n
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • \n
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • \n
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • \n
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • \n
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • \n
  • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
  • \n
  • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
  • \n
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"fullname": "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql", "modulename": "sqlglot.dialects.hive", "qualname": "Hive.Generator.arrayagg_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ArrayAgg) -> str:", "funcdef": "def"}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"fullname": "sqlglot.dialects.hive.Hive.Generator.with_properties", "modulename": "sqlglot.dialects.hive", "qualname": "Hive.Generator.with_properties", "kind": "function", "doc": "

\n", "signature": "(self, properties: sqlglot.expressions.Properties) -> str:", "funcdef": "def"}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"fullname": "sqlglot.dialects.hive.Hive.Generator.datatype_sql", "modulename": "sqlglot.dialects.hive", "qualname": "Hive.Generator.datatype_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DataType) -> str:", "funcdef": "def"}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"fullname": "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers", "modulename": "sqlglot.dialects.hive", "qualname": "Hive.Generator.after_having_modifiers", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Expression) -> List[str]:", "funcdef": "def"}, "sqlglot.dialects.mysql": {"fullname": "sqlglot.dialects.mysql", "modulename": "sqlglot.dialects.mysql", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.mysql.MySQL": {"fullname": "sqlglot.dialects.mysql.MySQL", "modulename": "sqlglot.dialects.mysql", "qualname": "MySQL", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"fullname": "sqlglot.dialects.mysql.MySQL.Tokenizer", "modulename": "sqlglot.dialects.mysql", "qualname": "MySQL.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.mysql.MySQL.Parser": {"fullname": "sqlglot.dialects.mysql.MySQL.Parser", "modulename": "sqlglot.dialects.mysql", "qualname": "MySQL.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.mysql.MySQL.Generator": {"fullname": "sqlglot.dialects.mysql.MySQL.Generator", "modulename": "sqlglot.dialects.mysql", "qualname": "MySQL.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • \n
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • \n
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • \n
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • \n
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • \n
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • \n
  • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
  • \n
  • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
  • \n
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"fullname": "sqlglot.dialects.mysql.MySQL.Generator.show_sql", "modulename": "sqlglot.dialects.mysql", "qualname": "MySQL.Generator.show_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Show) -> str:", "funcdef": "def"}, "sqlglot.dialects.oracle": {"fullname": "sqlglot.dialects.oracle", "modulename": "sqlglot.dialects.oracle", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.oracle.Oracle": {"fullname": "sqlglot.dialects.oracle.Oracle", "modulename": "sqlglot.dialects.oracle", "qualname": "Oracle", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.oracle.Oracle.Parser": {"fullname": "sqlglot.dialects.oracle.Oracle.Parser", "modulename": "sqlglot.dialects.oracle", "qualname": "Oracle.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.oracle.Oracle.Generator": {"fullname": "sqlglot.dialects.oracle.Oracle.Generator", "modulename": "sqlglot.dialects.oracle", "qualname": "Oracle.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • \n
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • \n
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • \n
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • \n
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • \n
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • \n
  • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
  • \n
  • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
  • \n
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"fullname": "sqlglot.dialects.oracle.Oracle.Generator.offset_sql", "modulename": "sqlglot.dialects.oracle", "qualname": "Oracle.Generator.offset_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Offset) -> str:", "funcdef": "def"}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"fullname": "sqlglot.dialects.oracle.Oracle.Generator.column_sql", "modulename": "sqlglot.dialects.oracle", "qualname": "Oracle.Generator.column_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Column) -> str:", "funcdef": "def"}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"fullname": "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql", "modulename": "sqlglot.dialects.oracle", "qualname": "Oracle.Generator.xmltable_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.XMLTable) -> str:", "funcdef": "def"}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"fullname": "sqlglot.dialects.oracle.Oracle.Tokenizer", "modulename": "sqlglot.dialects.oracle", "qualname": "Oracle.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.postgres": {"fullname": "sqlglot.dialects.postgres", "modulename": "sqlglot.dialects.postgres", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.postgres.Postgres": {"fullname": "sqlglot.dialects.postgres.Postgres", "modulename": "sqlglot.dialects.postgres", "qualname": "Postgres", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"fullname": "sqlglot.dialects.postgres.Postgres.Tokenizer", "modulename": "sqlglot.dialects.postgres", "qualname": "Postgres.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.postgres.Postgres.Parser": {"fullname": "sqlglot.dialects.postgres.Postgres.Parser", "modulename": "sqlglot.dialects.postgres", "qualname": "Postgres.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.postgres.Postgres.Generator": {"fullname": "sqlglot.dialects.postgres.Postgres.Generator", "modulename": "sqlglot.dialects.postgres", "qualname": "Postgres.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • \n
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • \n
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • \n
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • \n
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • \n
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • \n
  • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
  • \n
  • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
  • \n
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.presto": {"fullname": "sqlglot.dialects.presto", "modulename": "sqlglot.dialects.presto", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.presto.Presto": {"fullname": "sqlglot.dialects.presto.Presto", "modulename": "sqlglot.dialects.presto", "qualname": "Presto", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.presto.Presto.Tokenizer": {"fullname": "sqlglot.dialects.presto.Presto.Tokenizer", "modulename": "sqlglot.dialects.presto", "qualname": "Presto.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.presto.Presto.Parser": {"fullname": "sqlglot.dialects.presto.Presto.Parser", "modulename": "sqlglot.dialects.presto", "qualname": "Presto.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.presto.Presto.Generator": {"fullname": "sqlglot.dialects.presto.Presto.Generator", "modulename": "sqlglot.dialects.presto", "qualname": "Presto.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • \n
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • \n
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • \n
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • \n
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • \n
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • \n
  • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
  • \n
  • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
  • \n
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"fullname": "sqlglot.dialects.presto.Presto.Generator.interval_sql", "modulename": "sqlglot.dialects.presto", "qualname": "Presto.Generator.interval_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Interval) -> str:", "funcdef": "def"}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"fullname": "sqlglot.dialects.presto.Presto.Generator.transaction_sql", "modulename": "sqlglot.dialects.presto", "qualname": "Presto.Generator.transaction_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Transaction) -> str:", "funcdef": "def"}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"fullname": "sqlglot.dialects.presto.Presto.Generator.generateseries_sql", "modulename": "sqlglot.dialects.presto", "qualname": "Presto.Generator.generateseries_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.GenerateSeries) -> str:", "funcdef": "def"}, "sqlglot.dialects.redshift": {"fullname": "sqlglot.dialects.redshift", "modulename": "sqlglot.dialects.redshift", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.redshift.Redshift": {"fullname": "sqlglot.dialects.redshift.Redshift", "modulename": "sqlglot.dialects.redshift", "qualname": "Redshift", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.postgres.Postgres"}, "sqlglot.dialects.redshift.Redshift.Parser": {"fullname": "sqlglot.dialects.redshift.Redshift.Parser", "modulename": "sqlglot.dialects.redshift", "qualname": "Redshift.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.dialects.postgres.Postgres.Parser"}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"fullname": "sqlglot.dialects.redshift.Redshift.Tokenizer", "modulename": "sqlglot.dialects.redshift", "qualname": "Redshift.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.postgres.Postgres.Tokenizer"}, "sqlglot.dialects.redshift.Redshift.Generator": {"fullname": "sqlglot.dialects.redshift.Redshift.Generator", "modulename": "sqlglot.dialects.redshift", "qualname": "Redshift.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • \n
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • \n
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • \n
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • \n
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • \n
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • \n
  • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
  • \n
  • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
  • \n
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.dialects.postgres.Postgres.Generator"}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"fullname": "sqlglot.dialects.redshift.Redshift.Generator.values_sql", "modulename": "sqlglot.dialects.redshift", "qualname": "Redshift.Generator.values_sql", "kind": "function", "doc": "

Converts VALUES... expression into a series of unions.

\n\n

Note: If you have a lot of unions then this will result in a large number of recursive statements to\nevaluate the expression. You may need to increase sys.setrecursionlimit to run and it can also be\nvery slow.

\n", "signature": "(self, expression: sqlglot.expressions.Values) -> str:", "funcdef": "def"}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"fullname": "sqlglot.dialects.redshift.Redshift.Generator.with_properties", "modulename": "sqlglot.dialects.redshift", "qualname": "Redshift.Generator.with_properties", "kind": "function", "doc": "

Redshift doesn't have WITH as part of their with_properties so we remove it

\n", "signature": "(self, properties: sqlglot.expressions.Properties) -> str:", "funcdef": "def"}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"fullname": "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql", "modulename": "sqlglot.dialects.redshift", "qualname": "Redshift.Generator.datatype_sql", "kind": "function", "doc": "

Redshift converts the TEXT data type to VARCHAR(255) by default when people more generally mean\nVARCHAR of max length which is VARCHAR(max) in Redshift. Therefore if we get a TEXT data type\nwithout precision we convert it to VARCHAR(max) and if it does have precision then we just convert\nTEXT to VARCHAR.

\n", "signature": "(self, expression: sqlglot.expressions.DataType) -> str:", "funcdef": "def"}, "sqlglot.dialects.snowflake": {"fullname": "sqlglot.dialects.snowflake", "modulename": "sqlglot.dialects.snowflake", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.snowflake.Snowflake": {"fullname": "sqlglot.dialects.snowflake.Snowflake", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Parser", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Tokenizer", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Generator", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • \n
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • \n
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • \n
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • \n
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • \n
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • \n
  • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
  • \n
  • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
  • \n
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Generator.except_op", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Generator.except_op", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Except) -> str:", "funcdef": "def"}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Generator.intersect_op", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Intersect) -> str:", "funcdef": "def"}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Generator.settag_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.SetTag) -> str:", "funcdef": "def"}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Generator.describe_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Describe) -> str:", "funcdef": "def"}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Generator.generatedasidentitycolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.dialects.spark": {"fullname": "sqlglot.dialects.spark", "modulename": "sqlglot.dialects.spark", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.spark.Spark": {"fullname": "sqlglot.dialects.spark.Spark", "modulename": "sqlglot.dialects.spark", "qualname": "Spark", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.spark2.Spark2"}, "sqlglot.dialects.spark.Spark.Parser": {"fullname": "sqlglot.dialects.spark.Spark.Parser", "modulename": "sqlglot.dialects.spark", "qualname": "Spark.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.dialects.spark2.Spark2.Parser"}, "sqlglot.dialects.spark.Spark.Generator": {"fullname": "sqlglot.dialects.spark.Spark.Generator", "modulename": "sqlglot.dialects.spark", "qualname": "Spark.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • \n
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • \n
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • \n
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • \n
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • \n
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • \n
  • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
  • \n
  • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
  • \n
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.dialects.spark2.Spark2.Generator"}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"fullname": "sqlglot.dialects.spark.Spark.Generator.datediff_sql", "modulename": "sqlglot.dialects.spark", "qualname": "Spark.Generator.datediff_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DateDiff) -> str:", "funcdef": "def"}, "sqlglot.dialects.spark2": {"fullname": "sqlglot.dialects.spark2", "modulename": "sqlglot.dialects.spark2", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.spark2.Spark2": {"fullname": "sqlglot.dialects.spark2.Spark2", "modulename": "sqlglot.dialects.spark2", "qualname": "Spark2", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.hive.Hive"}, "sqlglot.dialects.spark2.Spark2.Parser": {"fullname": "sqlglot.dialects.spark2.Spark2.Parser", "modulename": "sqlglot.dialects.spark2", "qualname": "Spark2.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.dialects.hive.Hive.Parser"}, "sqlglot.dialects.spark2.Spark2.Generator": {"fullname": "sqlglot.dialects.spark2.Spark2.Generator", "modulename": "sqlglot.dialects.spark2", "qualname": "Spark2.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • \n
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • \n
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • \n
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • \n
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • \n
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • \n
  • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
  • \n
  • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
  • \n
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.dialects.hive.Hive.Generator"}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"fullname": "sqlglot.dialects.spark2.Spark2.Generator.cast_sql", "modulename": "sqlglot.dialects.spark2", "qualname": "Spark2.Generator.cast_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Cast) -> str:", "funcdef": "def"}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"fullname": "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql", "modulename": "sqlglot.dialects.spark2", "qualname": "Spark2.Generator.columndef_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ColumnDef, sep: str = ' ') -> str:", "funcdef": "def"}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"fullname": "sqlglot.dialects.spark2.Spark2.Tokenizer", "modulename": "sqlglot.dialects.spark2", "qualname": "Spark2.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.hive.Hive.Tokenizer"}, "sqlglot.dialects.sqlite": {"fullname": "sqlglot.dialects.sqlite", "modulename": "sqlglot.dialects.sqlite", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.sqlite.SQLite": {"fullname": "sqlglot.dialects.sqlite.SQLite", "modulename": "sqlglot.dialects.sqlite", "qualname": "SQLite", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"fullname": "sqlglot.dialects.sqlite.SQLite.Tokenizer", "modulename": "sqlglot.dialects.sqlite", "qualname": "SQLite.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.sqlite.SQLite.Parser": {"fullname": "sqlglot.dialects.sqlite.SQLite.Parser", "modulename": "sqlglot.dialects.sqlite", "qualname": "SQLite.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.sqlite.SQLite.Generator": {"fullname": "sqlglot.dialects.sqlite.SQLite.Generator", "modulename": "sqlglot.dialects.sqlite", "qualname": "SQLite.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • \n
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • \n
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • \n
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • \n
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • \n
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • \n
  • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
  • \n
  • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
  • \n
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"fullname": "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql", "modulename": "sqlglot.dialects.sqlite", "qualname": "SQLite.Generator.cast_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Cast) -> str:", "funcdef": "def"}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"fullname": "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql", "modulename": "sqlglot.dialects.sqlite", "qualname": "SQLite.Generator.datediff_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DateDiff) -> str:", "funcdef": "def"}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"fullname": "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql", "modulename": "sqlglot.dialects.sqlite", "qualname": "SQLite.Generator.groupconcat_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.GroupConcat) -> str:", "funcdef": "def"}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"fullname": "sqlglot.dialects.sqlite.SQLite.Generator.least_sql", "modulename": "sqlglot.dialects.sqlite", "qualname": "SQLite.Generator.least_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Least) -> str:", "funcdef": "def"}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"fullname": "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql", "modulename": "sqlglot.dialects.sqlite", "qualname": "SQLite.Generator.transaction_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Transaction) -> str:", "funcdef": "def"}, "sqlglot.dialects.starrocks": {"fullname": "sqlglot.dialects.starrocks", "modulename": "sqlglot.dialects.starrocks", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.starrocks.StarRocks": {"fullname": "sqlglot.dialects.starrocks.StarRocks", "modulename": "sqlglot.dialects.starrocks", "qualname": "StarRocks", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.mysql.MySQL"}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"fullname": "sqlglot.dialects.starrocks.StarRocks.Parser", "modulename": "sqlglot.dialects.starrocks", "qualname": "StarRocks.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.dialects.mysql.MySQL.Parser"}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"fullname": "sqlglot.dialects.starrocks.StarRocks.Generator", "modulename": "sqlglot.dialects.starrocks", "qualname": "StarRocks.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • \n
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • \n
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • \n
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • \n
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • \n
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • \n
  • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
  • \n
  • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
  • \n
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.dialects.mysql.MySQL.Generator"}, "sqlglot.dialects.tableau": {"fullname": "sqlglot.dialects.tableau", "modulename": "sqlglot.dialects.tableau", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.tableau.Tableau": {"fullname": "sqlglot.dialects.tableau.Tableau", "modulename": "sqlglot.dialects.tableau", "qualname": "Tableau", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.tableau.Tableau.Generator": {"fullname": "sqlglot.dialects.tableau.Tableau.Generator", "modulename": "sqlglot.dialects.tableau", "qualname": "Tableau.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • \n
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • \n
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • \n
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • \n
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • \n
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • \n
  • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
  • \n
  • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
  • \n
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"fullname": "sqlglot.dialects.tableau.Tableau.Generator.if_sql", "modulename": "sqlglot.dialects.tableau", "qualname": "Tableau.Generator.if_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.If) -> str:", "funcdef": "def"}, "sqlglot.dialects.tableau.Tableau.Generator.coalesce_sql": {"fullname": "sqlglot.dialects.tableau.Tableau.Generator.coalesce_sql", "modulename": "sqlglot.dialects.tableau", "qualname": "Tableau.Generator.coalesce_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Coalesce) -> str:", "funcdef": "def"}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"fullname": "sqlglot.dialects.tableau.Tableau.Generator.count_sql", "modulename": "sqlglot.dialects.tableau", "qualname": "Tableau.Generator.count_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Count) -> str:", "funcdef": "def"}, "sqlglot.dialects.tableau.Tableau.Parser": {"fullname": "sqlglot.dialects.tableau.Tableau.Parser", "modulename": "sqlglot.dialects.tableau", "qualname": "Tableau.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.teradata": {"fullname": "sqlglot.dialects.teradata", "modulename": "sqlglot.dialects.teradata", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.teradata.Teradata": {"fullname": "sqlglot.dialects.teradata.Teradata", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.teradata.Teradata.Tokenizer": {"fullname": "sqlglot.dialects.teradata.Teradata.Tokenizer", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.teradata.Teradata.Parser": {"fullname": "sqlglot.dialects.teradata.Teradata.Parser", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.teradata.Teradata.Generator": {"fullname": "sqlglot.dialects.teradata.Teradata.Generator", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • \n
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • \n
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • \n
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • \n
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • \n
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • \n
  • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
  • \n
  • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
  • \n
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"fullname": "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata.Generator.partitionedbyproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.PartitionedByProperty) -> str:", "funcdef": "def"}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"fullname": "sqlglot.dialects.teradata.Teradata.Generator.update_sql", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata.Generator.update_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Update) -> str:", "funcdef": "def"}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"fullname": "sqlglot.dialects.teradata.Teradata.Generator.mod_sql", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata.Generator.mod_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Mod) -> str:", "funcdef": "def"}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"fullname": "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata.Generator.datatype_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DataType) -> str:", "funcdef": "def"}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"fullname": "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata.Generator.rangen_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.RangeN) -> str:", "funcdef": "def"}, "sqlglot.dialects.trino": {"fullname": "sqlglot.dialects.trino", "modulename": "sqlglot.dialects.trino", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.trino.Trino": {"fullname": "sqlglot.dialects.trino.Trino", "modulename": "sqlglot.dialects.trino", "qualname": "Trino", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.presto.Presto"}, "sqlglot.dialects.trino.Trino.Generator": {"fullname": "sqlglot.dialects.trino.Trino.Generator", "modulename": "sqlglot.dialects.trino", "qualname": "Trino.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • \n
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • \n
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • \n
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • \n
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • \n
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • \n
  • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
  • \n
  • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
  • \n
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.dialects.presto.Presto.Generator"}, "sqlglot.dialects.trino.Trino.Tokenizer": {"fullname": "sqlglot.dialects.trino.Trino.Tokenizer", "modulename": "sqlglot.dialects.trino", "qualname": "Trino.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.presto.Presto.Tokenizer"}, "sqlglot.dialects.tsql": {"fullname": "sqlglot.dialects.tsql", "modulename": "sqlglot.dialects.tsql", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"fullname": "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql", "modulename": "sqlglot.dialects.tsql", "qualname": "generate_date_delta_with_unit_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.DateAdd | sqlglot.expressions.DateDiff) -> str:", "funcdef": "def"}, "sqlglot.dialects.tsql.TSQL": {"fullname": "sqlglot.dialects.tsql.TSQL", "modulename": "sqlglot.dialects.tsql", "qualname": "TSQL", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"fullname": "sqlglot.dialects.tsql.TSQL.Tokenizer", "modulename": "sqlglot.dialects.tsql", "qualname": "TSQL.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.tsql.TSQL.Parser": {"fullname": "sqlglot.dialects.tsql.TSQL.Parser", "modulename": "sqlglot.dialects.tsql", "qualname": "TSQL.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.tsql.TSQL.Generator": {"fullname": "sqlglot.dialects.tsql.TSQL.Generator", "modulename": "sqlglot.dialects.tsql", "qualname": "TSQL.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • \n
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • \n
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • \n
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • \n
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • \n
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • \n
  • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
  • \n
  • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
  • \n
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"fullname": "sqlglot.dialects.tsql.TSQL.Generator.offset_sql", "modulename": "sqlglot.dialects.tsql", "qualname": "TSQL.Generator.offset_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Offset) -> str:", "funcdef": "def"}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"fullname": "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql", "modulename": "sqlglot.dialects.tsql", "qualname": "TSQL.Generator.systemtime_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.SystemTime) -> str:", "funcdef": "def"}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"fullname": "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql", "modulename": "sqlglot.dialects.tsql", "qualname": "TSQL.Generator.returnsproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ReturnsProperty) -> str:", "funcdef": "def"}, "sqlglot.diff": {"fullname": "sqlglot.diff", "modulename": "sqlglot.diff", "kind": "module", "doc": "

Semantic Diff for SQL

\n\n

by Iaroslav Zeigerman

\n\n

Motivation

\n\n

Software is constantly changing and evolving, and identifying what has changed and reviewing those changes is an integral part of the development process. SQL code is no exception to this.

\n\n

Text-based diff tools such as git diff, when applied to a code base, have certain limitations. First, they can only detect insertions and deletions, not movements or updates of individual pieces of code. Second, such tools can only detect changes between lines of text, which is too coarse for something as granular and detailed as source code. Additionally, the outcome of such a diff is dependent on the underlying code formatting, and yields different results if the formatting should change.

\n\n

Consider the following diff generated by Git:

\n\n

\"Git

\n\n

Semantically the query hasn\u2019t changed. The two arguments b and c have been swapped (moved), posing no impact on the output of the query. Yet Git replaced the whole affected expression alongside a bulk of unrelated elements.

\n\n

The alternative to text-based diffing is to compare Abstract Syntax Trees (AST) instead. The main advantage of ASTs are that they are a direct product of code parsing, which represents the underlying code structure at any desired level of granularity. Comparing ASTs may yield extremely precise diffs; changes such as code movements and updates can also be detected. Even more importantly, this approach facilitates additional use cases beyond eyeballing two versions of source code side by side.

\n\n

The use cases I had in mind for SQL when I decided to embark on this journey of semantic diffing were the following:

\n\n
    \n
  • Query similarity score. Identifying which parts the two queries have in common to automatically suggest opportunities for consolidation, creation of intermediate/staging tables, and so on.
  • \n
  • Differentiating between cosmetic / structural changes and functional ones. For example when a nested query is refactored into a common table expression (CTE), this kind of change doesn\u2019t have any functional impact on either a query or its outcome.
  • \n
  • Automatic suggestions about the need to retroactively backfill data. This is especially important for pipelines that populate very large tables for which restatement is a runtime-intensive procedure. The ability to discern between simple code movements and actual modifications can help assess the impact of a change and make suggestions accordingly.
  • \n
\n\n

The implementation discussed in this post is now a part of the SQLGlot library. You can find a complete source code in the diff.py module. The choice of SQLglot was an obvious one due to its simple but powerful API, lack of external dependencies and, more importantly, extensive list of supported SQL dialects.

\n\n

The Search for a Solution

\n\n

When it comes to any diffing tool (not just a semantic one), the primary challenge is to match as many elements of compared entities as possible. Once such a set of matching elements is available, deriving a sequence of changes becomes an easy task.

\n\n

If our elements have unique identifiers associated with them (for example, an element\u2019s ID in DOM), the matching problem is trivial. However, the SQL syntax trees that we are comparing have neither unique keys nor object identifiers that can be used for the purposes of matching. So, how do we suppose to find pairs of nodes that are related?

\n\n

To better illustrate the problem, consider comparing the following SQL expressions: SELECT a + b + c, d, e and SELECT a - b + c, e, f. Matching individual nodes from respective syntax trees can be visualized as follows:

\n\n

\"Figure\nFigure 1: Example of node matching for two SQL expression trees.

\n\n

By looking at the figure of node matching for two SQL expression trees above, we conclude that the following changes should be captured by our solution:

\n\n
    \n
  • Inserted nodes: Sub and f. These are the nodes from the target AST which do not have a matching node in the source AST.
  • \n
  • Removed nodes: Add and d. These are the nodes from the source AST which do not have a counterpart in the target AST.
  • \n
  • Remaining nodes must be identified as unchanged.
  • \n
\n\n

It should be clear at this point that if we manage to match nodes in the source tree with their counterparts in the target tree, then computing the diff becomes a trivial matter.

\n\n

Na\u00efve Brute-Force

\n\n

The na\u00efve solution would be to try all different permutations of node pair combinations, and see which set of pairs performs the best based on some type of heuristics. The runtime cost of such a solution quickly reaches the escape velocity; if both trees had only 10 nodes each, the number of such sets would approximately be 10! ^ 2 = 3.6M ^ 2 ~= 13 * 10^12. This is a very bad case of factorial complexity (to be precise, it\u2019s actually much worse - O(n! ^ 2) - but I couldn\u2019t come up with a name for it), so there is little need to explore this approach any further.

\n\n

Myers Algorithm

\n\n

After the na\u00efve approach was proven to be infeasible, the next question I asked myself was \u201chow does git diff work?\u201d. This question led me to discover the Myers diff algorithm [1]. This algorithm has been designed to compare sequences of strings. At its core, it\u2019s looking for the shortest path on a graph of possible edits that transform the first sequence into the second one, while heavily rewarding those paths that lead to longest subsequences of unchanged elements. There\u2019s a lot of material out there describing this algorithm in greater detail. I found James Coglan\u2019s series of blog posts to be the most comprehensive.

\n\n

Therefore, I had this \u201cbrilliant\u201d (actually not) idea to transform trees into sequences by traversing them in topological order, and then applying the Myers algorithm on resulting sequences while using a custom heuristics when checking the equality of two nodes. Unsurprisingly, comparing sequences of strings is quite different from comparing hierarchical tree structures, and by flattening trees into sequences, we lose a lot of relevant context. This resulted in a terrible performance of this algorithm on ASTs. It often matched completely unrelated nodes, even when the two trees were mostly the same, and produced extremely inaccurate lists of changes overall. After playing around with it a little and tweaking my equality heuristics to improve accuracy, I ultimately scrapped the whole implementation and went back to the drawing board.

\n\n

Change Distiller

\n\n

The algorithm I settled on at the end was Change Distiller, created by Fluri et al. [2], which in turn is an improvement over the core idea described by Chawathe et al. [3].

\n\n

The algorithm consists of two high-level steps:

\n\n
    \n
  1. Finding appropriate matchings between pairs of nodes that are part of compared ASTs. Identifying what is meant by \u201cappropriate\u201d matching is also a part of this step.
  2. \n
  3. Generating the so-called \u201cedit script\u201d from the matching set built in the 1st step. The edit script is a sequence of edit operations (for example, insert, remove, update, etc.) on individual tree nodes, such that when applied as transformations on the source AST, it eventually becomes the target AST. In general, the shorter the sequence, the better. The length of the edit script can be used to compare the performance of different algorithms, though this is not the only metric that matters.
  4. \n
\n\n

The rest of this section is dedicated to the Python implementation of the steps above using the AST implementation provided by the SQLGlot library.

\n\n

Building the Matching Set

\n\n

Matching Leaves

\n\n

We begin composing the matching set by matching the leaf nodes. Leaf nodes are the nodes that do not have any children nodes (such as literals, identifiers, etc.). In order to match them, we gather all the leaf nodes from the source tree and generate a cartesian product with all the leaves from the target tree, while comparing pairs created this way and assigning them a similarity score. During this stage, we also exclude pairs that don\u2019t pass basic matching criteria. Then, we pick pairs that scored the highest while making sure that each node is matched no more than once.

\n\n

Using the example provided at the beginning of the post, the process of building an initial set of candidate matchings can be seen on Figure 2.

\n\n

\"Figure\nFigure 2: Building a set of candidate matchings between leaf nodes. The third item in each triplet represents a similarity score between two nodes.

\n\n

First, let\u2019s analyze the similarity score. Then, we\u2019ll discuss matching criteria.

\n\n

The similarity score proposed by Fluri et al. [2] is a dice coefficient applied to bigrams of respective node values. A bigram is a sequence of two adjacent elements from a string computed in a sliding window fashion:

\n\n
\n
def bigram(string):\n    count = max(0, len(string) - 1)\n    return [string[i : i + 2] for i in range(count)]\n
\n
\n\n

For reasons that will become clear shortly, we actually need to compute bigram histograms rather than just sequences:

\n\n
\n
from collections import defaultdict\n\ndef bigram_histo(string):\n    count = max(0, len(string) - 1)\n    bigram_histo = defaultdict(int)\n    for i in range(count):\n        bigram_histo[string[i : i + 2]] += 1\n    return bigram_histo\n
\n
\n\n

The dice coefficient formula looks like following:

\n\n

\"Dice

\n\n

Where X is a bigram of the source node and Y is a bigram of the second one. What this essentially does is count the number of bigram elements the two nodes have in common, multiply it by 2, and then divide by the total number of elements in both bigrams. This is where bigram histograms come in handy:

\n\n
\n
def dice_coefficient(source, target):\n    source_histo = bigram_histo(source.sql())\n    target_histo = bigram_histo(target.sql())\n\n    total_grams = (\n        sum(source_histo.values()) + sum(target_histo.values())\n    )\n    if not total_grams:\n        return 1.0 if source == target else 0.0\n\n    overlap_len = 0\n    overlapping_grams = set(source_histo) & set(target_histo)\n    for g in overlapping_grams:\n        overlap_len += min(source_histo[g], target_histo[g])\n\n    return 2 * overlap_len / total_grams\n
\n
\n\n

To compute a bigram given a tree node, we first transform the node into its canonical SQL representation,so that the Literal(123) node becomes just \u201c123\u201d and the Identifier(\u201ca\u201d) node becomes just \u201ca\u201d. We also handle a scenario when strings are too short to derive bigrams. In this case, we fallback to checking the two nodes for equality.

\n\n

Now when we know how to compute the similarity score, we can take care of the matching criteria for leaf nodes. In the original paper [2], the matching criteria is formalized as follows:

\n\n

\"Matching

\n\n

The two nodes are matched if two conditions are met:

\n\n
    \n
  1. The node labels match (in our case labels are just node types).
  2. \n
  3. The similarity score for node values is greater than or equal to some threshold \u201cf\u201d. The authors of the paper recommend setting the value of \u201cf\u201d to 0.6.
  4. \n
\n\n

With building blocks in place, we can now build a matching set for leaf nodes. First, we generate a list of candidates for matching:

\n\n
\n
from heapq import heappush, heappop\n\ncandidate_matchings = []\nsource_leaves = _get_leaves(self._source)\ntarget_leaves = _get_leaves(self._target)\nfor source_leaf in source_leaves:\n    for target_leaf in target_leaves:\n        if _is_same_type(source_leaf, target_leaf):\n            similarity_score = dice_coefficient(\n                source_leaf, target_leaf\n            )\n            if similarity_score >= 0.6:\n                heappush(\n                    candidate_matchings,\n                    (\n                        -similarity_score,\n                        len(candidate_matchings),\n                        source_leaf,\n                        target_leaf,\n                    ),\n                )\n
\n
\n\n

In the implementation above, we push each matching pair onto the heap to automatically maintain the correct order based on the assigned similarity score.

\n\n

Finally, we build the initial matching set by picking leaf pairs with the highest score:

\n\n
\n
matching_set = set()\nwhile candidate_matchings:\n    _, _, source_leaf, target_leaf = heappop(candidate_matchings)\n    if (\n        source_leaf in unmatched_source_nodes\n        and target_leaf in unmatched_target_nodes\n    ):\n        matching_set.add((source_leaf, target_leaf))\n        unmatched_source_nodes.remove(source_leaf)\n        unmatched_target_nodes.remove(target_leaf)\n
\n
\n\n

To finalize the matching set, we should now proceed with matching inner nodes.

\n\n

Matching Inner Nodes

\n\n

Matching inner nodes is quite similar to matching leaf nodes, with the following two distinctions:

\n\n
    \n
  • Rather than ranking a set of possible candidates, we pick the first node pair that passes the matching criteria.
  • \n
  • The matching criteria itself has been extended to account for the number of leaf nodes the pair of inner nodes have in common.
  • \n
\n\n

\"Figure\nFigure 3: Matching inner nodes based on their type as well as how many of their leaf nodes have been previously matched.

\n\n

Let\u2019s start with the matching criteria. The criteria is formalized as follows:

\n\n

\"Matching

\n\n

Alongside already familiar similarity score and node type criteria, there is a new one in the middle: the ratio of leaf nodes that the two nodes have in common must exceed some threshold \u201ct\u201d. The recommended value for \u201ct\u201d is also 0.6. Counting the number of common leaf nodes is pretty straightforward, since we already have the complete matching set for leaves. All we need to do is count how many matching pairs do leaf nodes from the two compared inner nodes form.

\n\n

There are two additional heuristics associated with this matching criteria:

\n\n
    \n
  • Inner node similarity weighting: if the similarity score between the node values doesn\u2019t pass the threshold \u201cf\u201d but the ratio of common leaf nodes (\u201ct\u201d) is greater than or equal to 0.8, then the matching is considered successful.
  • \n
  • The threshold \u201ct\u201d is reduced to 0.4 for inner nodes with the number of leaf nodes equal to 4 or less, in order to decrease the false negative rate for small subtrees.
  • \n
\n\n

We now only have to iterate through the remaining unmatched nodes and form matching pairs based on the outlined criteria:

\n\n
\n
leaves_matching_set = matching_set.copy()\n\nfor source_node in unmatched_source_nodes.copy():\n    for target_node in unmatched_target_nodes:\n        if _is_same_type(source_node, target_node):\n            source_leaves = set(_get_leaves(source_node))\n            target_leaves = set(_get_leaves(target_node))\n\n            max_leaves_num = max(len(source_leaves), len(target_leaves))\n            if max_leaves_num:\n                common_leaves_num = sum(\n                    1 if s in source_leaves and t in target_leaves else 0\n                    for s, t in leaves_matching_set\n                )\n                leaf_similarity_score = common_leaves_num / max_leaves_num\n            else:\n                leaf_similarity_score = 0.0\n\n            adjusted_t = (\n                0.6\n                if min(len(source_leaves), len(target_leaves)) > 4\n                else 0.4\n            )\n\n            if leaf_similarity_score >= 0.8 or (\n                leaf_similarity_score >= adjusted_t\n                and dice_coefficient(source_node, target_node) >= 0.6\n            ):\n                matching_set.add((source_node, target_node))\n                unmatched_source_nodes.remove(source_node)\n                unmatched_target_nodes.remove(target_node)\n                break\n
\n
\n\n

After the matching set is formed, we can proceed with generation of the edit script, which will be the algorithm\u2019s output.

\n\n

Generating the Edit Script

\n\n

At this point, we should have the following 3 sets at our disposal:

\n\n
    \n
  • The set of matched node pairs.
  • \n
  • The set of remaining unmatched nodes from the source tree.
  • \n
  • The set of remaining unmatched nodes from the target tree.
  • \n
\n\n

We can derive 3 kinds of edits from the matching set: either the node\u2019s value was updated (Update), the node was moved to a different position within the tree (Move), or the node remained unchanged (Keep). Note that the Move case is not mutually exclusive with the other two. The node could have been updated or could have remained the same while at the same time its position within its parent node or the parent node itself could have changed. All unmatched nodes from the source tree are the ones that were removed (Remove), while unmatched nodes from the target tree are the ones that were inserted (Insert).

\n\n

The latter two cases are pretty straightforward to implement:

\n\n
\n
edit_script = []\n\nfor removed_node in unmatched_source_nodes:\n    edit_script.append(Remove(removed_node))\nfor inserted_node in unmatched_target_nodes:\n    edit_script.append(Insert(inserted_node))\n
\n
\n\n

Traversing the matching set requires a little more thought:

\n\n
\n
for source_node, target_node in matching_set:\n    if (\n        not isinstance(source_node, LEAF_EXPRESSION_TYPES)\n        or source_node == target_node\n    ):\n        move_edits = generate_move_edits(\n            source_node, target_node, matching_set\n        )\n        edit_script.extend(move_edits)\n        edit_script.append(Keep(source_node, target_node))\n    else:\n        edit_script.append(Update(source_node, target_node))\n
\n
\n\n

If a matching pair represents a pair of leaf nodes, we check if they are the same to decide whether an update took place. For inner node pairs, we also need to compare the positions of their respective children to detect node movements. Chawathe et al. [3] suggest applying the longest common subsequence (LCS) algorithm which, no surprise here, was described by Myers himself [1]. There is a small catch, however: instead of checking the equality of two children nodes, we need to check whether the two nodes form a pair that is a part of our matching set.

\n\n

Now with this knowledge, the implementation becomes straightforward:

\n\n
\n
def generate_move_edits(source, target, matching_set):\n    source_children = _get_child_nodes(source)\n    target_children = _get_child_nodes(target)\n\n    lcs = set(\n        _longest_common_subsequence(\n            source_children,\n            target_children,\n            lambda l, r: (l, r) in matching_set\n        )\n    )\n\n    move_edits = []\n    for node in source_children:\n        if node not in lcs and node not in unmatched_source_nodes:\n            move_edits.append(Move(node))\n\n    return move_edits\n
\n
\n\n

I left out the implementation of the LCS algorithm itself here, but there are plenty of implementation choices out there that can be easily looked up.

\n\n

Output

\n\n

The implemented algorithm produces the output that resembles the following:

\n\n
\n
>>> from sqlglot import parse_one, diff\n>>> diff(parse_one("SELECT a + b + c, d, e"), parse_one("SELECT a - b + c, e, f"))\n\nRemove(Add)\nRemove(Column(d))\nRemove(Identifier(d))\nInsert(Sub)\nInsert(Column(f))\nInsert(Identifier(f))\nKeep(Select, Select)\nKeep(Add, Add)\nKeep(Column(a), Column(a))\nKeep(Identifier(a), Identifier(a))\nKeep(Column(b), Column(b))\nKeep(Identifier(b), Identifier(b))\nKeep(Column(c), Column(c))\nKeep(Identifier(c), Identifier(c))\nKeep(Column(e), Column(e))\nKeep(Identifier(e), Identifier(e))\n
\n
\n\n

Note that the output above is abbreviated. The string representation of actual AST nodes is significantly more verbose.

\n\n

The implementation works especially well when coupled with the SQLGlot\u2019s query optimizer which can be used to produce canonical representations of compared queries:

\n\n
\n
>>> schema={"t": {"a": "INT", "b": "INT", "c": "INT", "d": "INT"}}\n>>> source = """\n... SELECT 1 + 1 + a\n... FROM t\n... WHERE b = 1 OR (c = 2 AND d = 3)\n... """\n>>> target = """\n... SELECT 2 + a\n... FROM t\n... WHERE (b = 1 OR c = 2) AND (b = 1 OR d = 3)\n... """\n>>> optimized_source = optimize(parse_one(source), schema=schema)\n>>> optimized_target = optimize(parse_one(target), schema=schema)\n>>> edit_script = diff(optimized_source, optimized_target)\n>>> sum(0 if isinstance(e, Keep) else 1 for e in edit_script)\n0\n
\n
\n\n

Optimizations

\n\n

The worst case runtime complexity of this algorithm is not exactly stellar: O(n^2 * log n^2). This is because of the leaf matching process, which involves ranking a cartesian product between all leaf nodes of compared trees. Unsurprisingly, the algorithm takes a considerable time to finish for bigger queries.

\n\n

There are still a few basic things we can do in our implementation to help improve performance:

\n\n
    \n
  • Refer to individual node objects using their identifiers (Python\u2019s id()) instead of direct references in sets. This helps avoid costly recursive hash calculations and equality checks.
  • \n
  • Cache bigram histograms to avoid computing them more than once for the same node.
  • \n
  • Compute the canonical SQL string representation for each tree once while caching string representations of all inner nodes. This prevents redundant tree traversals when bigrams are computed.
  • \n
\n\n

At the time of writing only the first two optimizations have been implemented, so there is an opportunity to contribute for anyone who\u2019s interested.

\n\n

Alternative Solutions

\n\n

This section is dedicated to solutions that I\u2019ve investigated, but haven\u2019t tried.

\n\n

First, this section wouldn\u2019t be complete without Tristan Hume\u2019s blog post. Tristan\u2019s solution has a lot in common with the Myers algorithm plus heuristics that is much more clever than what I came up with. The implementation relies on a combination of dynamic programming and A* search algorithm to explore the space of possible matchings and pick the best ones. It seemed to have worked well for Tistan\u2019s specific use case, but after my negative experience with the Myers algorithm, I decided to try something different.

\n\n

Another notable approach is the Gumtree algorithm by Falleri et al. [4]. I discovered this paper after I\u2019d already implemented the algorithm that is the main focus of this post. In sections 5.2 and 5.3 of their paper, the authors compare the two algorithms side by side and claim that Gumtree is significantly better in terms of both runtime performance and accuracy when evaluated on 12 792 pairs of Java source files. This doesn\u2019t surprise me, as the algorithm takes the height of subtrees into account. In my tests, I definitely saw scenarios in which this context would have helped. On top of that, the authors promise O(n^2) runtime complexity in the worst case which, given the Change Distiller's O(n^2 * log n^2), looks particularly tempting. I hope to try this algorithm out at some point, and there is a good chance you see me writing about it in my future posts.

\n\n

Conclusion

\n\n

The Change Distiller algorithm yielded quite satisfactory results in most of my tests. The scenarios in which it fell short mostly concerned identical (or very similar) subtrees located in different parts of the AST. In those cases, node mismatches were frequent and, as a result, edit scripts were somewhat suboptimal.

\n\n

Additionally, the runtime performance of the algorithm leaves a lot to be desired. On trees with 1000 leaf nodes each, the algorithm takes a little under 2 seconds to complete. My implementation still has room for improvement, but this should give you a rough idea of what to expect. It appears that the Gumtree algorithm [4] can help address both of these points. I hope to find bandwidth to work on it soon and then compare the two algorithms side-by-side to find out which one performs better on SQL specifically. In the meantime, Change Distiller definitely gets the job done, and I can now proceed with applying it to some of the use cases I mentioned at the beginning of this post.

\n\n

I\u2019m also curious to learn whether other folks in the industry faced a similar problem, and how they approached it. If you did something similar, I\u2019m interested to hear about your experience.

\n\n

References

\n\n

[1] Eugene W. Myers. An O(ND) Difference Algorithm and Its Variations. Algorithmica 1(2): 251-266 (1986)

\n\n

[2] B. Fluri, M. Wursch, M. Pinzger, and H. Gall. Change Distilling: Tree differencing for fine-grained source code change extraction. IEEE Trans. Software Eng., 33(11):725\u2013743, 2007.

\n\n

[3] S.S. Chawathe, A. Rajaraman, H. Garcia-Molina, and J. Widom. Change Detection in Hierarchically Structured Information. Proc. ACM Sigmod Int\u2019l Conf. Management of Data, pp. 493-504, June 1996

\n\n

[4] Jean-R\u00e9my Falleri, Flor\u00e9al Morandat, Xavier Blanc, Matias Martinez, Martin Monperrus. Fine-grained and Accurate Source Code Differencing. Proceedings of the International Conference on Automated Software Engineering, 2014, V\u00e4steras, Sweden. pp.313-324, 10.1145/2642937.2642982. hal-01054552

\n\n
\n"}, "sqlglot.diff.Insert": {"fullname": "sqlglot.diff.Insert", "modulename": "sqlglot.diff", "qualname": "Insert", "kind": "class", "doc": "

Indicates that a new node has been inserted

\n"}, "sqlglot.diff.Insert.__init__": {"fullname": "sqlglot.diff.Insert.__init__", "modulename": "sqlglot.diff", "qualname": "Insert.__init__", "kind": "function", "doc": "

\n", "signature": "(expression: sqlglot.expressions.Expression)"}, "sqlglot.diff.Remove": {"fullname": "sqlglot.diff.Remove", "modulename": "sqlglot.diff", "qualname": "Remove", "kind": "class", "doc": "

Indicates that an existing node has been removed

\n"}, "sqlglot.diff.Remove.__init__": {"fullname": "sqlglot.diff.Remove.__init__", "modulename": "sqlglot.diff", "qualname": "Remove.__init__", "kind": "function", "doc": "

\n", "signature": "(expression: sqlglot.expressions.Expression)"}, "sqlglot.diff.Move": {"fullname": "sqlglot.diff.Move", "modulename": "sqlglot.diff", "qualname": "Move", "kind": "class", "doc": "

Indicates that an existing node's position within the tree has changed

\n"}, "sqlglot.diff.Move.__init__": {"fullname": "sqlglot.diff.Move.__init__", "modulename": "sqlglot.diff", "qualname": "Move.__init__", "kind": "function", "doc": "

\n", "signature": "(expression: sqlglot.expressions.Expression)"}, "sqlglot.diff.Update": {"fullname": "sqlglot.diff.Update", "modulename": "sqlglot.diff", "qualname": "Update", "kind": "class", "doc": "

Indicates that an existing node has been updated

\n"}, "sqlglot.diff.Update.__init__": {"fullname": "sqlglot.diff.Update.__init__", "modulename": "sqlglot.diff", "qualname": "Update.__init__", "kind": "function", "doc": "

\n", "signature": "(\tsource: sqlglot.expressions.Expression,\ttarget: sqlglot.expressions.Expression)"}, "sqlglot.diff.Keep": {"fullname": "sqlglot.diff.Keep", "modulename": "sqlglot.diff", "qualname": "Keep", "kind": "class", "doc": "

Indicates that an existing node hasn't been changed

\n"}, "sqlglot.diff.Keep.__init__": {"fullname": "sqlglot.diff.Keep.__init__", "modulename": "sqlglot.diff", "qualname": "Keep.__init__", "kind": "function", "doc": "

\n", "signature": "(\tsource: sqlglot.expressions.Expression,\ttarget: sqlglot.expressions.Expression)"}, "sqlglot.diff.diff": {"fullname": "sqlglot.diff.diff", "modulename": "sqlglot.diff", "qualname": "diff", "kind": "function", "doc": "

Returns the list of changes between the source and the target expressions.

\n\n
Examples:
\n\n
\n
\n
>>> diff(parse_one("a + b"), parse_one("a + c"))\n[\n    Remove(expression=(COLUMN this: (IDENTIFIER this: b, quoted: False))),\n    Insert(expression=(COLUMN this: (IDENTIFIER this: c, quoted: False))),\n    Keep(\n        source=(ADD this: ...),\n        target=(ADD this: ...)\n    ),\n    Keep(\n        source=(COLUMN this: (IDENTIFIER this: a, quoted: False)),\n        target=(COLUMN this: (IDENTIFIER this: a, quoted: False))\n    ),\n]\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • source: the source expression.
  • \n
  • target: the target expression against which the diff should be calculated.
  • \n
  • matchings: the list of pre-matched node pairs which is used to help the algorithm's\nheuristics produce better results for subtrees that are known by a caller to be matching.\nNote: expression references in this list must refer to the same node objects that are\nreferenced in source / target trees.
  • \n
\n\n
Returns:
\n\n
\n

the list of Insert, Remove, Move, Update and Keep objects for each node in the source and the\n target expression trees. This list represents a sequence of steps needed to transform the source\n expression tree into the target one.

\n
\n", "signature": "(\tsource: sqlglot.expressions.Expression,\ttarget: sqlglot.expressions.Expression,\tmatchings: Optional[List[Tuple[sqlglot.expressions.Expression, sqlglot.expressions.Expression]]] = None,\t**kwargs: Any) -> List[Union[sqlglot.diff.Insert, sqlglot.diff.Remove, sqlglot.diff.Move, sqlglot.diff.Update, sqlglot.diff.Keep]]:", "funcdef": "def"}, "sqlglot.diff.ChangeDistiller": {"fullname": "sqlglot.diff.ChangeDistiller", "modulename": "sqlglot.diff", "qualname": "ChangeDistiller", "kind": "class", "doc": "

The implementation of the Change Distiller algorithm described by Beat Fluri and Martin Pinzger in\ntheir paper https://ieeexplore.ieee.org/document/4339230, which in turn is based on the algorithm by\nChawathe et al. described in http://ilpubs.stanford.edu:8090/115/1/1995-46.pdf.

\n"}, "sqlglot.diff.ChangeDistiller.__init__": {"fullname": "sqlglot.diff.ChangeDistiller.__init__", "modulename": "sqlglot.diff", "qualname": "ChangeDistiller.__init__", "kind": "function", "doc": "

\n", "signature": "(f: float = 0.6, t: float = 0.6)"}, "sqlglot.diff.ChangeDistiller.diff": {"fullname": "sqlglot.diff.ChangeDistiller.diff", "modulename": "sqlglot.diff", "qualname": "ChangeDistiller.diff", "kind": "function", "doc": "

\n", "signature": "(\tself,\tsource: sqlglot.expressions.Expression,\ttarget: sqlglot.expressions.Expression,\tmatchings: Optional[List[Tuple[sqlglot.expressions.Expression, sqlglot.expressions.Expression]]] = None) -> List[Union[sqlglot.diff.Insert, sqlglot.diff.Remove, sqlglot.diff.Move, sqlglot.diff.Update, sqlglot.diff.Keep]]:", "funcdef": "def"}, "sqlglot.errors": {"fullname": "sqlglot.errors", "modulename": "sqlglot.errors", "kind": "module", "doc": "

\n"}, "sqlglot.errors.ErrorLevel": {"fullname": "sqlglot.errors.ErrorLevel", "modulename": "sqlglot.errors", "qualname": "ErrorLevel", "kind": "class", "doc": "

An enumeration.

\n", "bases": "sqlglot.helper.AutoName"}, "sqlglot.errors.ErrorLevel.IGNORE": {"fullname": "sqlglot.errors.ErrorLevel.IGNORE", "modulename": "sqlglot.errors", "qualname": "ErrorLevel.IGNORE", "kind": "variable", "doc": "

Ignore all errors.

\n", "default_value": "<ErrorLevel.IGNORE: 'IGNORE'>"}, "sqlglot.errors.ErrorLevel.WARN": {"fullname": "sqlglot.errors.ErrorLevel.WARN", "modulename": "sqlglot.errors", "qualname": "ErrorLevel.WARN", "kind": "variable", "doc": "

Log all errors.

\n", "default_value": "<ErrorLevel.WARN: 'WARN'>"}, "sqlglot.errors.ErrorLevel.RAISE": {"fullname": "sqlglot.errors.ErrorLevel.RAISE", "modulename": "sqlglot.errors", "qualname": "ErrorLevel.RAISE", "kind": "variable", "doc": "

Collect all errors and raise a single exception.

\n", "default_value": "<ErrorLevel.RAISE: 'RAISE'>"}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"fullname": "sqlglot.errors.ErrorLevel.IMMEDIATE", "modulename": "sqlglot.errors", "qualname": "ErrorLevel.IMMEDIATE", "kind": "variable", "doc": "

Immediately raise an exception on the first error found.

\n", "default_value": "<ErrorLevel.IMMEDIATE: 'IMMEDIATE'>"}, "sqlglot.errors.SqlglotError": {"fullname": "sqlglot.errors.SqlglotError", "modulename": "sqlglot.errors", "qualname": "SqlglotError", "kind": "class", "doc": "

Common base class for all non-exit exceptions.

\n", "bases": "builtins.Exception"}, "sqlglot.errors.UnsupportedError": {"fullname": "sqlglot.errors.UnsupportedError", "modulename": "sqlglot.errors", "qualname": "UnsupportedError", "kind": "class", "doc": "

Common base class for all non-exit exceptions.

\n", "bases": "SqlglotError"}, "sqlglot.errors.ParseError": {"fullname": "sqlglot.errors.ParseError", "modulename": "sqlglot.errors", "qualname": "ParseError", "kind": "class", "doc": "

Common base class for all non-exit exceptions.

\n", "bases": "SqlglotError"}, "sqlglot.errors.ParseError.__init__": {"fullname": "sqlglot.errors.ParseError.__init__", "modulename": "sqlglot.errors", "qualname": "ParseError.__init__", "kind": "function", "doc": "

\n", "signature": "(message: str, errors: Optional[List[Dict[str, Any]]] = None)"}, "sqlglot.errors.ParseError.new": {"fullname": "sqlglot.errors.ParseError.new", "modulename": "sqlglot.errors", "qualname": "ParseError.new", "kind": "function", "doc": "

\n", "signature": "(\tcls,\tmessage: str,\tdescription: Optional[str] = None,\tline: Optional[int] = None,\tcol: Optional[int] = None,\tstart_context: Optional[str] = None,\thighlight: Optional[str] = None,\tend_context: Optional[str] = None,\tinto_expression: Optional[str] = None) -> sqlglot.errors.ParseError:", "funcdef": "def"}, "sqlglot.errors.TokenError": {"fullname": "sqlglot.errors.TokenError", "modulename": "sqlglot.errors", "qualname": "TokenError", "kind": "class", "doc": "

Common base class for all non-exit exceptions.

\n", "bases": "SqlglotError"}, "sqlglot.errors.OptimizeError": {"fullname": "sqlglot.errors.OptimizeError", "modulename": "sqlglot.errors", "qualname": "OptimizeError", "kind": "class", "doc": "

Common base class for all non-exit exceptions.

\n", "bases": "SqlglotError"}, "sqlglot.errors.SchemaError": {"fullname": "sqlglot.errors.SchemaError", "modulename": "sqlglot.errors", "qualname": "SchemaError", "kind": "class", "doc": "

Common base class for all non-exit exceptions.

\n", "bases": "SqlglotError"}, "sqlglot.errors.ExecuteError": {"fullname": "sqlglot.errors.ExecuteError", "modulename": "sqlglot.errors", "qualname": "ExecuteError", "kind": "class", "doc": "

Common base class for all non-exit exceptions.

\n", "bases": "SqlglotError"}, "sqlglot.errors.concat_messages": {"fullname": "sqlglot.errors.concat_messages", "modulename": "sqlglot.errors", "qualname": "concat_messages", "kind": "function", "doc": "

\n", "signature": "(errors: Sequence[Any], maximum: int) -> str:", "funcdef": "def"}, "sqlglot.errors.merge_errors": {"fullname": "sqlglot.errors.merge_errors", "modulename": "sqlglot.errors", "qualname": "merge_errors", "kind": "function", "doc": "

\n", "signature": "(errors: Sequence[sqlglot.errors.ParseError]) -> List[Dict[str, Any]]:", "funcdef": "def"}, "sqlglot.executor": {"fullname": "sqlglot.executor", "modulename": "sqlglot.executor", "kind": "module", "doc": "

Writing a Python SQL engine from scratch

\n\n

Toby Mao

\n\n

Introduction

\n\n

When I first started writing SQLGlot in early 2021, my goal was just to translate SQL queries from SparkSQL to Presto and vice versa. However, over the last year and a half, I've ended up with a full-fledged SQL engine. SQLGlot can now parse and transpile between 18 SQL dialects and can execute all 24 TPC-H SQL queries. The parser and engine are all written from scratch using Python.

\n\n

This post will cover why I went through the effort of creating a Python SQL engine and how a simple query goes from a string to actually transforming data. The following steps are briefly summarized:

\n\n\n\n

Why?

\n\n

I started working on SQLGlot because of my work on the experimentation and metrics platform at Netflix, where I built tools that allowed data scientists to define and compute SQL-based metrics. Netflix relied on multiple engines to query data (Spark, Presto, and Druid), so my team built the metrics platform around PyPika, a Python SQL query builder. This way, definitions could be reused across multiple engines. However, it became quickly apparent that writing python code to programmatically generate SQL was challenging for data scientists, especially those with academic backgrounds, since they were mostly familiar with R and SQL. At the time, the only Python SQL parser was sqlparse, which is not actually a parser but a tokenizer, so having users write raw SQL into the platform wasn't really an option. Some time later, I randomly stumbled across Crafting Interpreters and realized that I could use it as a guide towards creating my own SQL parser/transpiler.

\n\n

Why did I do this? Isn't a Python SQL engine going to be extremely slow?

\n\n

The main reason why I ended up building a SQL engine was...just for entertainment. It's been fun learning about all the things required to actually run a SQL query, and seeing it actually work is extremely rewarding. Before SQLGlot, I had zero experience with lexers, parsers, or compilers.

\n\n

In terms of practical use cases, I planned to use the Python SQL engine for unit testing SQL pipelines. Big data pipelines are tough to test because many of the engines are not open source and cannot be run locally. With SQLGlot, you can take a SQL query targeting a warehouse such as Snowflake and seamlessly run it in CI on mock Python data. It's easy to mock data and create arbitrary UDFs because everything is just Python. Although the implementation is slow and unsuitable for large amounts of data (> 1 million rows), there's very little overhead/startup and you can run queries on test data in a couple of milliseconds.

\n\n

Finally, the components that have been built to support execution can be used as a foundation for a faster engine. I'm inspired by what Apache Calcite has done for the JVM world. Even though Python is commonly used for data, there hasn't been a Calcite for Python. So, you could say that SQLGlot aims to be that framework. For example, it wouldn't take much work to replace the Python execution engine with numpy/pandas/arrow to become a respectably-performing query engine. The implementation would be able to leverage the parser, optimizer, and logical planner, only needing to implement physical execution. There is a lot of work in the Python ecosystem around high performance vectorized computation, which I think could benefit from a pure Python-based AST/plan. Parsing and planning doesn't have to be fast when the bottleneck of running queries is processing terabytes of data. So, having a Python-based ecosystem around SQL is beneficial given the ease of development in Python, despite not having bare metal performance.

\n\n

Parts of SQLGlot's toolkit are being used today by the following:

\n\n
    \n
  • Ibis: A Python library that provides a lightweight, universal interface for data wrangling.\n
      \n
    • Uses the Python SQL expression builder and leverages the optimizer/planner to convert SQL into dataframe operations.
    • \n
  • \n
  • mysql-mimic: Pure-Python implementation of the MySQL server wire protocol\n
      \n
    • Parses / transforms SQL and executes INFORMATION_SCHEMA queries.
    • \n
  • \n
  • Quokka: Push-based vectorized query engine\n
      \n
    • Parse and optimizes SQL.
    • \n
  • \n
  • Splink: Fast, accurate and scalable probabilistic data linkage using your choice of SQL backend.\n
      \n
    • Transpiles queries.
    • \n
  • \n
\n\n

How?

\n\n

There are many steps involved with actually running a simple query like:

\n\n
\n
SELECT\n  bar.a,\n  b + 1 AS b\nFROM bar\nJOIN baz\n  ON bar.a = baz.a\nWHERE bar.a > 1\n
\n
\n\n

In this post, I'll walk through all the steps SQLGlot takes to run this query over Python objects.

\n\n

Tokenizing

\n\n

The first step is to convert the sql string into a list of tokens. SQLGlot's tokenizer is quite simple and can be found here. In a while loop, it checks each character and either appends the character to the current token, or makes a new token.

\n\n

Running the SQLGlot tokenizer shows the output.

\n\n

\"Tokenizer

\n\n

Each keyword has been converted to a SQLGlot Token object. Each token has some metadata associated with it, like line/column information for error messages. Comments are also a part of the token, so that comments can be preserved.

\n\n

Parsing

\n\n

Once a SQL statement is tokenized, we don't need to worry about white space and other formatting, so it's easier to work with. We can now convert the list of tokens into an AST. The SQLGlot parser is a handwritten recursive descent parser.

\n\n

Similar to the tokenizer, it consumes the tokens sequentially, but it instead uses a recursive algorithm. The tokens are converted into a single AST node that presents the SQL query. The SQLGlot parser was designed to support various dialects, so it contains many options for overriding parsing functionality.

\n\n

\"Parser

\n\n

The AST is a generic representation of a given SQL query. Each dialect can override or implement its own generator, which can convert an AST object into syntatically-correct SQL.

\n\n

Optimizing

\n\n

Once we have our AST, we can transform it into an equivalent query that produces the same results more efficiently. When optimizing queries, most engines first convert the AST into a logical plan and then optimize the plan. However, I chose to optimize the AST directly for the following reasons:

\n\n
    \n
  1. It's easier to debug and validate the optimizations when the input and output are both SQL.

  2. \n
  3. Rules can be applied a la carte to transform SQL into a more desirable form.

  4. \n
  5. I wanted a way to generate 'canonical sql'. Having a canonical representation of SQL is useful for understanding if two queries are semantically equivalent (e.g. SELECT 1 + 1 and SELECT 2).

  6. \n
\n\n

I've yet to find another engine that takes this approach, but I'm quite happy with this decision. The optimizer currently does not perform any \"physical optimizations\" such as join reordering. Those are left to the execution layer, as additional statistics and information could become relevant.

\n\n

\"Optimizer

\n\n

The optimizer currently has 17 rules. Each of these rules is applied, transforming the AST in place. The combination of these rules creates \"canonical\" sql that can then be more easily converted into a logical plan and executed.

\n\n

Some example rules are:

\n\n

qualify_tables and qualify_columns

\n\n
    \n
  • Adds all db/catalog qualifiers to tables and forces an alias.
  • \n
  • Ensure each column is unambiguous and expand stars.
  • \n
\n\n
\n
SELECT * FROM x;\n\nSELECT "db"."x" AS "x";\n
\n
\n\n

simplify

\n\n

Boolean and math simplification. Check out all the test cases.

\n\n
\n
((NOT FALSE) AND (x = x)) AND (TRUE OR 1 <> 3);\nx = x;\n\n1 + 1;\n2;\n
\n
\n\n

normalize

\n\n

Attempts to convert all predicates into conjunctive normal form.

\n\n
\n
-- DNF\n(A AND B) OR (B AND C AND D);\n\n-- CNF\n(A OR C) AND (A OR D) AND B;\n
\n
\n\n

unnest_subqueries

\n\n

Converts subqueries in predicates into joins.

\n\n
\n
-- The subquery can be converted into a left join\nSELECT *\nFROM x AS x\nWHERE (\n  SELECT y.a AS a\n  FROM y AS y\n  WHERE x.a = y.a\n) = 1;\n\nSELECT *\nFROM x AS x\nLEFT JOIN (\n  SELECT y.a AS a\n  FROM y AS y\n  WHERE TRUE\n  GROUP BY y.a\n) AS "_u_0"\n  ON x.a = "_u_0".a\nWHERE ("_u_0".a = 1 AND NOT "_u_0".a IS NULL)\n
\n
\n\n

pushdown_predicates

\n\n

Push down filters into the innermost query.

\n\n
\n
SELECT *\nFROM (\n  SELECT *\n  FROM x AS x\n) AS y\nWHERE y.a = 1;\n\nSELECT *\nFROM (\n  SELECT *\n  FROM x AS x\n  WHERE y.a = 1\n) AS y WHERE TRUE\n
\n
\n\n

annotate_types

\n\n

Infer all types throughout the AST given schema information and function type definitions.

\n\n

Planning

\n\n

After the SQL AST has been \"optimized\", it's much easier to convert into a logical plan. The AST is traversed and converted into a DAG consisting of one of five steps. The different steps are:

\n\n

Scan

\n\n

Selects columns from a table, applies projections, and finally filters the table.

\n\n

Sort

\n\n

Sorts a table for order by expressions.

\n\n

Set

\n\n

Applies the operators union/union all/except/intersect.

\n\n

Aggregate

\n\n

Applies an aggregation/group by.

\n\n

Join

\n\n

Joins multiple tables together.

\n\n

\"Planner

\n\n

The logical plan is quite simple and contains the information required to convert it into a physical plan (execution).

\n\n

Executing

\n\n

Finally, we can actually execute the SQL query. The Python engine is not fast, but it's very small (~400 LOC)! It iterates the DAG with a queue and runs each step, passing each intermediary table to the next step.

\n\n

In order to keep things simple, it evaluates expressions with eval. Because SQLGlot was built primarily to be a transpiler, it was simple to create a \"Python SQL\" dialect. So a SQL expression x + 1 can just be converted into scope['x'] + 1.

\n\n

\"Executor

\n\n

What's next

\n\n

SQLGlot's main focus will always be on parsing/transpiling, but I plan to continue development on the execution engine. I'd like to pass TPC-DS. If someone doesn't beat me to it, I may even take a stab at writing a Pandas/Arrow execution engine.

\n\n

I'm hoping that over time, SQLGlot will spark the Python SQL ecosystem just like Calcite has for Java.

\n\n

Special thanks

\n\n

SQLGlot would not be what it is without it's core contributors. In particular, the execution engine would not exist without Barak Alon and George Sittas.

\n\n

Get in touch

\n\n

If you'd like to chat more about SQLGlot, please join my Slack Channel!

\n\n
\n"}, "sqlglot.executor.execute": {"fullname": "sqlglot.executor.execute", "modulename": "sqlglot.executor", "qualname": "execute", "kind": "function", "doc": "

Run a sql query against data.

\n\n
Arguments:
\n\n
    \n
  • sql: a sql statement.
  • \n
  • schema: database schema.\nThis can either be an instance of Schema or a mapping in one of the following forms:\n
      \n
    1. {table: {col: type}}
    2. \n
    3. {db: {table: {col: type}}}
    4. \n
    5. {catalog: {db: {table: {col: type}}}}
    6. \n
  • \n
  • read: the SQL dialect to apply during parsing (eg. \"spark\", \"hive\", \"presto\", \"mysql\").
  • \n
  • tables: additional tables to register.
  • \n
\n\n
Returns:
\n\n
\n

Simple columnar data structure.

\n
\n", "signature": "(\tsql: str | sqlglot.expressions.Expression,\tschema: Union[Dict, sqlglot.schema.Schema, NoneType] = None,\tread: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\ttables: Optional[Dict] = None) -> sqlglot.executor.table.Table:", "funcdef": "def"}, "sqlglot.executor.context": {"fullname": "sqlglot.executor.context", "modulename": "sqlglot.executor.context", "kind": "module", "doc": "

\n"}, "sqlglot.executor.context.Context": {"fullname": "sqlglot.executor.context.Context", "modulename": "sqlglot.executor.context", "qualname": "Context", "kind": "class", "doc": "

Execution context for sql expressions.

\n\n

Context is used to hold relevant data tables which can then be queried on with eval.

\n\n

References to columns can either be scalar or vectors. When set_row is used, column references\nevaluate to scalars while set_range evaluates to vectors. This allows convenient and efficient\nevaluation of aggregation functions.

\n"}, "sqlglot.executor.context.Context.__init__": {"fullname": "sqlglot.executor.context.Context.__init__", "modulename": "sqlglot.executor.context", "qualname": "Context.__init__", "kind": "function", "doc": "

Args\n tables: representing the scope of the current execution context.\n env: dictionary of functions within the execution context.

\n", "signature": "(\ttables: Dict[str, sqlglot.executor.table.Table],\tenv: Optional[Dict] = None)"}, "sqlglot.executor.context.Context.eval": {"fullname": "sqlglot.executor.context.Context.eval", "modulename": "sqlglot.executor.context", "qualname": "Context.eval", "kind": "function", "doc": "

\n", "signature": "(self, code):", "funcdef": "def"}, "sqlglot.executor.context.Context.eval_tuple": {"fullname": "sqlglot.executor.context.Context.eval_tuple", "modulename": "sqlglot.executor.context", "qualname": "Context.eval_tuple", "kind": "function", "doc": "

\n", "signature": "(self, codes):", "funcdef": "def"}, "sqlglot.executor.context.Context.add_columns": {"fullname": "sqlglot.executor.context.Context.add_columns", "modulename": "sqlglot.executor.context", "qualname": "Context.add_columns", "kind": "function", "doc": "

\n", "signature": "(self, *columns: str) -> None:", "funcdef": "def"}, "sqlglot.executor.context.Context.table_iter": {"fullname": "sqlglot.executor.context.Context.table_iter", "modulename": "sqlglot.executor.context", "qualname": "Context.table_iter", "kind": "function", "doc": "

\n", "signature": "(\tself,\ttable: str) -> Iterator[Tuple[sqlglot.executor.table.TableIter, sqlglot.executor.context.Context]]:", "funcdef": "def"}, "sqlglot.executor.context.Context.filter": {"fullname": "sqlglot.executor.context.Context.filter", "modulename": "sqlglot.executor.context", "qualname": "Context.filter", "kind": "function", "doc": "

\n", "signature": "(self, condition) -> None:", "funcdef": "def"}, "sqlglot.executor.context.Context.sort": {"fullname": "sqlglot.executor.context.Context.sort", "modulename": "sqlglot.executor.context", "qualname": "Context.sort", "kind": "function", "doc": "

\n", "signature": "(self, key) -> None:", "funcdef": "def"}, "sqlglot.executor.context.Context.set_row": {"fullname": "sqlglot.executor.context.Context.set_row", "modulename": "sqlglot.executor.context", "qualname": "Context.set_row", "kind": "function", "doc": "

\n", "signature": "(self, row: Tuple) -> None:", "funcdef": "def"}, "sqlglot.executor.context.Context.set_index": {"fullname": "sqlglot.executor.context.Context.set_index", "modulename": "sqlglot.executor.context", "qualname": "Context.set_index", "kind": "function", "doc": "

\n", "signature": "(self, index: int) -> None:", "funcdef": "def"}, "sqlglot.executor.context.Context.set_range": {"fullname": "sqlglot.executor.context.Context.set_range", "modulename": "sqlglot.executor.context", "qualname": "Context.set_range", "kind": "function", "doc": "

\n", "signature": "(self, start: int, end: int) -> None:", "funcdef": "def"}, "sqlglot.executor.env": {"fullname": "sqlglot.executor.env", "modulename": "sqlglot.executor.env", "kind": "module", "doc": "

\n"}, "sqlglot.executor.env.reverse_key": {"fullname": "sqlglot.executor.env.reverse_key", "modulename": "sqlglot.executor.env", "qualname": "reverse_key", "kind": "class", "doc": "

\n"}, "sqlglot.executor.env.reverse_key.__init__": {"fullname": "sqlglot.executor.env.reverse_key.__init__", "modulename": "sqlglot.executor.env", "qualname": "reverse_key.__init__", "kind": "function", "doc": "

\n", "signature": "(obj)"}, "sqlglot.executor.env.filter_nulls": {"fullname": "sqlglot.executor.env.filter_nulls", "modulename": "sqlglot.executor.env", "qualname": "filter_nulls", "kind": "function", "doc": "

\n", "signature": "(func, empty_null=True):", "funcdef": "def"}, "sqlglot.executor.env.null_if_any": {"fullname": "sqlglot.executor.env.null_if_any", "modulename": "sqlglot.executor.env", "qualname": "null_if_any", "kind": "function", "doc": "

Decorator that makes a function return None if any of the required arguments are None.

\n\n

This also supports decoration with no arguments, e.g.:

\n\n
@null_if_any\ndef foo(a, b): ...\n
\n\n

In which case all arguments are required.

\n", "signature": "(*required):", "funcdef": "def"}, "sqlglot.executor.env.str_position": {"fullname": "sqlglot.executor.env.str_position", "modulename": "sqlglot.executor.env", "qualname": "str_position", "kind": "function", "doc": "

\n", "signature": "(substr, this, position=None):", "funcdef": "def"}, "sqlglot.executor.env.substring": {"fullname": "sqlglot.executor.env.substring", "modulename": "sqlglot.executor.env", "qualname": "substring", "kind": "function", "doc": "

\n", "signature": "(this, start=None, length=None):", "funcdef": "def"}, "sqlglot.executor.env.cast": {"fullname": "sqlglot.executor.env.cast", "modulename": "sqlglot.executor.env", "qualname": "cast", "kind": "function", "doc": "

\n", "signature": "(this, to):", "funcdef": "def"}, "sqlglot.executor.env.ordered": {"fullname": "sqlglot.executor.env.ordered", "modulename": "sqlglot.executor.env", "qualname": "ordered", "kind": "function", "doc": "

\n", "signature": "(this, desc, nulls_first):", "funcdef": "def"}, "sqlglot.executor.env.interval": {"fullname": "sqlglot.executor.env.interval", "modulename": "sqlglot.executor.env", "qualname": "interval", "kind": "function", "doc": "

\n", "signature": "(this, unit):", "funcdef": "def"}, "sqlglot.executor.python": {"fullname": "sqlglot.executor.python", "modulename": "sqlglot.executor.python", "kind": "module", "doc": "

\n"}, "sqlglot.executor.python.PythonExecutor": {"fullname": "sqlglot.executor.python.PythonExecutor", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor", "kind": "class", "doc": "

\n"}, "sqlglot.executor.python.PythonExecutor.__init__": {"fullname": "sqlglot.executor.python.PythonExecutor.__init__", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.__init__", "kind": "function", "doc": "

\n", "signature": "(env=None, tables=None)"}, "sqlglot.executor.python.PythonExecutor.execute": {"fullname": "sqlglot.executor.python.PythonExecutor.execute", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.execute", "kind": "function", "doc": "

\n", "signature": "(self, plan):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.generate": {"fullname": "sqlglot.executor.python.PythonExecutor.generate", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.generate", "kind": "function", "doc": "

Convert a SQL expression into literal Python code and compile it into bytecode.

\n", "signature": "(self, expression):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"fullname": "sqlglot.executor.python.PythonExecutor.generate_tuple", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.generate_tuple", "kind": "function", "doc": "

Convert an array of SQL expressions into tuple of Python byte code.

\n", "signature": "(self, expressions):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.context": {"fullname": "sqlglot.executor.python.PythonExecutor.context", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.context", "kind": "function", "doc": "

\n", "signature": "(self, tables):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.table": {"fullname": "sqlglot.executor.python.PythonExecutor.table", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.table", "kind": "function", "doc": "

\n", "signature": "(self, expressions):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.scan": {"fullname": "sqlglot.executor.python.PythonExecutor.scan", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.scan", "kind": "function", "doc": "

\n", "signature": "(self, step, context):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.static": {"fullname": "sqlglot.executor.python.PythonExecutor.static", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.static", "kind": "function", "doc": "

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.scan_table": {"fullname": "sqlglot.executor.python.PythonExecutor.scan_table", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.scan_table", "kind": "function", "doc": "

\n", "signature": "(self, step):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"fullname": "sqlglot.executor.python.PythonExecutor.scan_csv", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.scan_csv", "kind": "function", "doc": "

\n", "signature": "(self, step):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.join": {"fullname": "sqlglot.executor.python.PythonExecutor.join", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.join", "kind": "function", "doc": "

\n", "signature": "(self, step, context):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"fullname": "sqlglot.executor.python.PythonExecutor.nested_loop_join", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.nested_loop_join", "kind": "function", "doc": "

\n", "signature": "(self, _join, source_context, join_context):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.hash_join": {"fullname": "sqlglot.executor.python.PythonExecutor.hash_join", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.hash_join", "kind": "function", "doc": "

\n", "signature": "(self, join, source_context, join_context):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.aggregate": {"fullname": "sqlglot.executor.python.PythonExecutor.aggregate", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.aggregate", "kind": "function", "doc": "

\n", "signature": "(self, step, context):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.sort": {"fullname": "sqlglot.executor.python.PythonExecutor.sort", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.sort", "kind": "function", "doc": "

\n", "signature": "(self, step, context):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.set_operation": {"fullname": "sqlglot.executor.python.PythonExecutor.set_operation", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.set_operation", "kind": "function", "doc": "

\n", "signature": "(self, step, context):", "funcdef": "def"}, "sqlglot.executor.python.Python": {"fullname": "sqlglot.executor.python.Python", "modulename": "sqlglot.executor.python", "qualname": "Python", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.executor.python.Python.Tokenizer": {"fullname": "sqlglot.executor.python.Python.Tokenizer", "modulename": "sqlglot.executor.python", "qualname": "Python.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.executor.python.Python.Generator": {"fullname": "sqlglot.executor.python.Python.Generator", "modulename": "sqlglot.executor.python", "qualname": "Python.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • \n
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • \n
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • \n
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • \n
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • \n
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • \n
  • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
  • \n
  • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
  • \n
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.executor.table": {"fullname": "sqlglot.executor.table", "modulename": "sqlglot.executor.table", "kind": "module", "doc": "

\n"}, "sqlglot.executor.table.Table": {"fullname": "sqlglot.executor.table.Table", "modulename": "sqlglot.executor.table", "qualname": "Table", "kind": "class", "doc": "

\n"}, "sqlglot.executor.table.Table.__init__": {"fullname": "sqlglot.executor.table.Table.__init__", "modulename": "sqlglot.executor.table", "qualname": "Table.__init__", "kind": "function", "doc": "

\n", "signature": "(columns, rows=None, column_range=None)"}, "sqlglot.executor.table.Table.add_columns": {"fullname": "sqlglot.executor.table.Table.add_columns", "modulename": "sqlglot.executor.table", "qualname": "Table.add_columns", "kind": "function", "doc": "

\n", "signature": "(self, *columns: str) -> None:", "funcdef": "def"}, "sqlglot.executor.table.Table.append": {"fullname": "sqlglot.executor.table.Table.append", "modulename": "sqlglot.executor.table", "qualname": "Table.append", "kind": "function", "doc": "

\n", "signature": "(self, row):", "funcdef": "def"}, "sqlglot.executor.table.Table.pop": {"fullname": "sqlglot.executor.table.Table.pop", "modulename": "sqlglot.executor.table", "qualname": "Table.pop", "kind": "function", "doc": "

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.executor.table.TableIter": {"fullname": "sqlglot.executor.table.TableIter", "modulename": "sqlglot.executor.table", "qualname": "TableIter", "kind": "class", "doc": "

\n"}, "sqlglot.executor.table.TableIter.__init__": {"fullname": "sqlglot.executor.table.TableIter.__init__", "modulename": "sqlglot.executor.table", "qualname": "TableIter.__init__", "kind": "function", "doc": "

\n", "signature": "(table)"}, "sqlglot.executor.table.RangeReader": {"fullname": "sqlglot.executor.table.RangeReader", "modulename": "sqlglot.executor.table", "qualname": "RangeReader", "kind": "class", "doc": "

\n"}, "sqlglot.executor.table.RangeReader.__init__": {"fullname": "sqlglot.executor.table.RangeReader.__init__", "modulename": "sqlglot.executor.table", "qualname": "RangeReader.__init__", "kind": "function", "doc": "

\n", "signature": "(table)"}, "sqlglot.executor.table.RowReader": {"fullname": "sqlglot.executor.table.RowReader", "modulename": "sqlglot.executor.table", "qualname": "RowReader", "kind": "class", "doc": "

\n"}, "sqlglot.executor.table.RowReader.__init__": {"fullname": "sqlglot.executor.table.RowReader.__init__", "modulename": "sqlglot.executor.table", "qualname": "RowReader.__init__", "kind": "function", "doc": "

\n", "signature": "(columns, column_range=None)"}, "sqlglot.executor.table.Tables": {"fullname": "sqlglot.executor.table.Tables", "modulename": "sqlglot.executor.table", "qualname": "Tables", "kind": "class", "doc": "

Abstract base class for generic types.

\n\n

A generic type is typically declared by inheriting from\nthis class parameterized with one or more type variables.\nFor example, a generic mapping type might be defined as::

\n\n

class Mapping(Generic[KT, VT]):\n def __getitem__(self, key: KT) -> VT:\n ...\n # Etc.

\n\n

This class can then be used as follows::

\n\n

def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT:\n try:\n return mapping[key]\n except KeyError:\n return default

\n", "bases": "sqlglot.schema.AbstractMappingSchema[sqlglot.executor.table.Table]"}, "sqlglot.executor.table.ensure_tables": {"fullname": "sqlglot.executor.table.ensure_tables", "modulename": "sqlglot.executor.table", "qualname": "ensure_tables", "kind": "function", "doc": "

\n", "signature": "(d: Optional[Dict]) -> sqlglot.executor.table.Tables:", "funcdef": "def"}, "sqlglot.expressions": {"fullname": "sqlglot.expressions", "modulename": "sqlglot.expressions", "kind": "module", "doc": "

Expressions

\n\n

Every AST node in SQLGlot is represented by a subclass of Expression.

\n\n

This module contains the implementation of all supported Expression types. Additionally,\nit exposes a number of helper functions, which are mainly used to programmatically build\nSQL expressions, such as sqlglot.expressions.select.

\n\n
\n"}, "sqlglot.expressions.Expression": {"fullname": "sqlglot.expressions.Expression", "modulename": "sqlglot.expressions", "qualname": "Expression", "kind": "class", "doc": "

The base class for all expressions in a syntax tree. Each Expression encapsulates any necessary\ncontext, such as its child expressions, their names (arg keys), and whether a given child expression\nis optional or not.

\n\n
Attributes:
\n\n
    \n
  • key: a unique key for each class in the Expression hierarchy. This is useful for hashing\nand representing expressions as strings.
  • \n
  • arg_types: determines what arguments (child nodes) are supported by an expression. It\nmaps arg keys to booleans that indicate whether the corresponding args are optional.
  • \n
  • parent: a reference to the parent expression (or None, in case of root expressions).
  • \n
  • arg_key: the arg key an expression is associated with, i.e. the name its parent expression\nuses to refer to it.
  • \n
  • comments: a list of comments that are associated with a given expression. This is used in\norder to preserve comments when transpiling SQL code.
  • \n
  • _type: the sqlglot.expressions.DataType type of an expression. This is inferred by the\noptimizer, in order to enable some transformations that require type information.
  • \n
\n\n
Example:
\n\n
\n
\n
>>> class Foo(Expression):\n...     arg_types = {"this": True, "expression": False}\n
\n
\n \n

The above definition informs us that Foo is an Expression that requires an argument called\n \"this\" and may also optionally receive an argument called \"expression\".

\n
\n\n
Arguments:
\n\n
    \n
  • args: a mapping used for retrieving the arguments of an expression, given their arg keys.
  • \n
\n"}, "sqlglot.expressions.Expression.__init__": {"fullname": "sqlglot.expressions.Expression.__init__", "modulename": "sqlglot.expressions", "qualname": "Expression.__init__", "kind": "function", "doc": "

\n", "signature": "(**args: Any)"}, "sqlglot.expressions.Expression.this": {"fullname": "sqlglot.expressions.Expression.this", "modulename": "sqlglot.expressions", "qualname": "Expression.this", "kind": "variable", "doc": "

Retrieves the argument with key \"this\".

\n"}, "sqlglot.expressions.Expression.expression": {"fullname": "sqlglot.expressions.Expression.expression", "modulename": "sqlglot.expressions", "qualname": "Expression.expression", "kind": "variable", "doc": "

Retrieves the argument with key \"expression\".

\n"}, "sqlglot.expressions.Expression.expressions": {"fullname": "sqlglot.expressions.Expression.expressions", "modulename": "sqlglot.expressions", "qualname": "Expression.expressions", "kind": "variable", "doc": "

Retrieves the argument with key \"expressions\".

\n"}, "sqlglot.expressions.Expression.text": {"fullname": "sqlglot.expressions.Expression.text", "modulename": "sqlglot.expressions", "qualname": "Expression.text", "kind": "function", "doc": "

Returns a textual representation of the argument corresponding to \"key\". This can only be used\nfor args that are strings or leaf Expression instances, such as identifiers and literals.

\n", "signature": "(self, key) -> str:", "funcdef": "def"}, "sqlglot.expressions.Expression.is_string": {"fullname": "sqlglot.expressions.Expression.is_string", "modulename": "sqlglot.expressions", "qualname": "Expression.is_string", "kind": "variable", "doc": "

Checks whether a Literal expression is a string.

\n", "annotation": ": bool"}, "sqlglot.expressions.Expression.is_number": {"fullname": "sqlglot.expressions.Expression.is_number", "modulename": "sqlglot.expressions", "qualname": "Expression.is_number", "kind": "variable", "doc": "

Checks whether a Literal expression is a number.

\n", "annotation": ": bool"}, "sqlglot.expressions.Expression.is_int": {"fullname": "sqlglot.expressions.Expression.is_int", "modulename": "sqlglot.expressions", "qualname": "Expression.is_int", "kind": "variable", "doc": "

Checks whether a Literal expression is an integer.

\n", "annotation": ": bool"}, "sqlglot.expressions.Expression.is_star": {"fullname": "sqlglot.expressions.Expression.is_star", "modulename": "sqlglot.expressions", "qualname": "Expression.is_star", "kind": "variable", "doc": "

Checks whether an expression is a star.

\n", "annotation": ": bool"}, "sqlglot.expressions.Expression.alias": {"fullname": "sqlglot.expressions.Expression.alias", "modulename": "sqlglot.expressions", "qualname": "Expression.alias", "kind": "variable", "doc": "

Returns the alias of the expression, or an empty string if it's not aliased.

\n", "annotation": ": str"}, "sqlglot.expressions.Expression.output_name": {"fullname": "sqlglot.expressions.Expression.output_name", "modulename": "sqlglot.expressions", "qualname": "Expression.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n", "annotation": ": str"}, "sqlglot.expressions.Expression.copy": {"fullname": "sqlglot.expressions.Expression.copy", "modulename": "sqlglot.expressions", "qualname": "Expression.copy", "kind": "function", "doc": "

Returns a deep copy of the expression.

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.expressions.Expression.add_comments": {"fullname": "sqlglot.expressions.Expression.add_comments", "modulename": "sqlglot.expressions", "qualname": "Expression.add_comments", "kind": "function", "doc": "

\n", "signature": "(self, comments: Optional[List[str]]) -> None:", "funcdef": "def"}, "sqlglot.expressions.Expression.append": {"fullname": "sqlglot.expressions.Expression.append", "modulename": "sqlglot.expressions", "qualname": "Expression.append", "kind": "function", "doc": "

Appends value to arg_key if it's a list or sets it as a new list.

\n\n
Arguments:
\n\n
    \n
  • arg_key (str): name of the list expression arg
  • \n
  • value (Any): value to append to the list
  • \n
\n", "signature": "(self, arg_key: str, value: Any) -> None:", "funcdef": "def"}, "sqlglot.expressions.Expression.set": {"fullname": "sqlglot.expressions.Expression.set", "modulename": "sqlglot.expressions", "qualname": "Expression.set", "kind": "function", "doc": "

Sets arg_key to value.

\n\n
Arguments:
\n\n
    \n
  • arg_key (str): name of the expression arg.
  • \n
  • value: value to set the arg to.
  • \n
\n", "signature": "(self, arg_key: str, value: Any) -> None:", "funcdef": "def"}, "sqlglot.expressions.Expression.depth": {"fullname": "sqlglot.expressions.Expression.depth", "modulename": "sqlglot.expressions", "qualname": "Expression.depth", "kind": "variable", "doc": "

Returns the depth of this tree.

\n", "annotation": ": int"}, "sqlglot.expressions.Expression.iter_expressions": {"fullname": "sqlglot.expressions.Expression.iter_expressions", "modulename": "sqlglot.expressions", "qualname": "Expression.iter_expressions", "kind": "function", "doc": "

Yields the key and expression for all arguments, exploding list args.

\n", "signature": "(self) -> Iterator[Tuple[str, sqlglot.expressions.Expression]]:", "funcdef": "def"}, "sqlglot.expressions.Expression.find": {"fullname": "sqlglot.expressions.Expression.find", "modulename": "sqlglot.expressions", "qualname": "Expression.find", "kind": "function", "doc": "

Returns the first node in this tree which matches at least one of\nthe specified types.

\n\n
Arguments:
\n\n
    \n
  • expression_types: the expression type(s) to match.
  • \n
  • bfs: whether to search the AST using the BFS algorithm (DFS is used if false).
  • \n
\n\n
Returns:
\n\n
\n

The node which matches the criteria or None if no such node was found.

\n
\n", "signature": "(self, *expression_types: Type[~E], bfs: bool = True) -> Optional[~E]:", "funcdef": "def"}, "sqlglot.expressions.Expression.find_all": {"fullname": "sqlglot.expressions.Expression.find_all", "modulename": "sqlglot.expressions", "qualname": "Expression.find_all", "kind": "function", "doc": "

Returns a generator object which visits all nodes in this tree and only\nyields those that match at least one of the specified expression types.

\n\n
Arguments:
\n\n
    \n
  • expression_types: the expression type(s) to match.
  • \n
  • bfs: whether to search the AST using the BFS algorithm (DFS is used if false).
  • \n
\n\n
Returns:
\n\n
\n

The generator object.

\n
\n", "signature": "(self, *expression_types: Type[~E], bfs: bool = True) -> Iterator[~E]:", "funcdef": "def"}, "sqlglot.expressions.Expression.find_ancestor": {"fullname": "sqlglot.expressions.Expression.find_ancestor", "modulename": "sqlglot.expressions", "qualname": "Expression.find_ancestor", "kind": "function", "doc": "

Returns a nearest parent matching expression_types.

\n\n
Arguments:
\n\n
    \n
  • expression_types: the expression type(s) to match.
  • \n
\n\n
Returns:
\n\n
\n

The parent node.

\n
\n", "signature": "(self, *expression_types: Type[~E]) -> Optional[~E]:", "funcdef": "def"}, "sqlglot.expressions.Expression.parent_select": {"fullname": "sqlglot.expressions.Expression.parent_select", "modulename": "sqlglot.expressions", "qualname": "Expression.parent_select", "kind": "variable", "doc": "

Returns the parent select statement.

\n", "annotation": ": Optional[sqlglot.expressions.Select]"}, "sqlglot.expressions.Expression.same_parent": {"fullname": "sqlglot.expressions.Expression.same_parent", "modulename": "sqlglot.expressions", "qualname": "Expression.same_parent", "kind": "variable", "doc": "

Returns if the parent is the same class as itself.

\n", "annotation": ": bool"}, "sqlglot.expressions.Expression.root": {"fullname": "sqlglot.expressions.Expression.root", "modulename": "sqlglot.expressions", "qualname": "Expression.root", "kind": "function", "doc": "

Returns the root expression of this tree.

\n", "signature": "(self) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.expressions.Expression.walk": {"fullname": "sqlglot.expressions.Expression.walk", "modulename": "sqlglot.expressions", "qualname": "Expression.walk", "kind": "function", "doc": "

Returns a generator object which visits all nodes in this tree.

\n\n
Arguments:
\n\n
    \n
  • bfs (bool): if set to True the BFS traversal order will be applied,\notherwise the DFS traversal will be used instead.
  • \n
  • prune ((node, parent, arg_key) -> bool): callable that returns True if\nthe generator should stop traversing this branch of the tree.
  • \n
\n\n
Returns:
\n\n
\n

the generator object.

\n
\n", "signature": "(self, bfs=True, prune=None):", "funcdef": "def"}, "sqlglot.expressions.Expression.dfs": {"fullname": "sqlglot.expressions.Expression.dfs", "modulename": "sqlglot.expressions", "qualname": "Expression.dfs", "kind": "function", "doc": "

Returns a generator object which visits all nodes in this tree in\nthe DFS (Depth-first) order.

\n\n
Returns:
\n\n
\n

The generator object.

\n
\n", "signature": "(self, parent=None, key=None, prune=None):", "funcdef": "def"}, "sqlglot.expressions.Expression.bfs": {"fullname": "sqlglot.expressions.Expression.bfs", "modulename": "sqlglot.expressions", "qualname": "Expression.bfs", "kind": "function", "doc": "

Returns a generator object which visits all nodes in this tree in\nthe BFS (Breadth-first) order.

\n\n
Returns:
\n\n
\n

The generator object.

\n
\n", "signature": "(self, prune=None):", "funcdef": "def"}, "sqlglot.expressions.Expression.unnest": {"fullname": "sqlglot.expressions.Expression.unnest", "modulename": "sqlglot.expressions", "qualname": "Expression.unnest", "kind": "function", "doc": "

Returns the first non parenthesis child or self.

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.expressions.Expression.unalias": {"fullname": "sqlglot.expressions.Expression.unalias", "modulename": "sqlglot.expressions", "qualname": "Expression.unalias", "kind": "function", "doc": "

Returns the inner expression if this is an Alias.

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.expressions.Expression.unnest_operands": {"fullname": "sqlglot.expressions.Expression.unnest_operands", "modulename": "sqlglot.expressions", "qualname": "Expression.unnest_operands", "kind": "function", "doc": "

Returns unnested operands as a tuple.

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.expressions.Expression.flatten": {"fullname": "sqlglot.expressions.Expression.flatten", "modulename": "sqlglot.expressions", "qualname": "Expression.flatten", "kind": "function", "doc": "

Returns a generator which yields child nodes who's parents are the same class.

\n\n

A AND B AND C -> [A, B, C]

\n", "signature": "(self, unnest=True):", "funcdef": "def"}, "sqlglot.expressions.Expression.sql": {"fullname": "sqlglot.expressions.Expression.sql", "modulename": "sqlglot.expressions", "qualname": "Expression.sql", "kind": "function", "doc": "

Returns SQL string representation of this tree.

\n\n
Arguments:
\n\n
    \n
  • dialect: the dialect of the output SQL string (eg. \"spark\", \"hive\", \"presto\", \"mysql\").
  • \n
  • opts: other sqlglot.generator.Generator options.
  • \n
\n\n
Returns:
\n\n
\n

The SQL string.

\n
\n", "signature": "(\tself,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> str:", "funcdef": "def"}, "sqlglot.expressions.Expression.transform": {"fullname": "sqlglot.expressions.Expression.transform", "modulename": "sqlglot.expressions", "qualname": "Expression.transform", "kind": "function", "doc": "

Recursively visits all tree nodes (excluding already transformed ones)\nand applies the given transformation function to each node.

\n\n
Arguments:
\n\n
    \n
  • fun (function): a function which takes a node as an argument and returns a\nnew transformed node or the same node without modifications. If the function\nreturns None, then the corresponding node will be removed from the syntax tree.
  • \n
  • copy (bool): if set to True a new tree instance is constructed, otherwise the tree is\nmodified in place.
  • \n
\n\n
Returns:
\n\n
\n

The transformed tree.

\n
\n", "signature": "(self, fun, *args, copy=True, **kwargs):", "funcdef": "def"}, "sqlglot.expressions.Expression.replace": {"fullname": "sqlglot.expressions.Expression.replace", "modulename": "sqlglot.expressions", "qualname": "Expression.replace", "kind": "function", "doc": "

Swap out this expression with a new expression.

\n\n

For example::

\n\n
>>> tree = Select().select(\"x\").from_(\"tbl\")\n>>> tree.find(Column).replace(Column(this=\"y\"))\n(COLUMN this: y)\n>>> tree.sql()\n'SELECT y FROM tbl'\n
\n\n
Arguments:
\n\n
    \n
  • expression: new node
  • \n
\n\n
Returns:
\n\n
\n

The new expression or expressions.

\n
\n", "signature": "(self, expression):", "funcdef": "def"}, "sqlglot.expressions.Expression.pop": {"fullname": "sqlglot.expressions.Expression.pop", "modulename": "sqlglot.expressions", "qualname": "Expression.pop", "kind": "function", "doc": "

Remove this expression from its AST.

\n\n
Returns:
\n\n
\n

The popped expression.

\n
\n", "signature": "(self: ~E) -> ~E:", "funcdef": "def"}, "sqlglot.expressions.Expression.assert_is": {"fullname": "sqlglot.expressions.Expression.assert_is", "modulename": "sqlglot.expressions", "qualname": "Expression.assert_is", "kind": "function", "doc": "

Assert that this Expression is an instance of type_.

\n\n

If it is NOT an instance of type_, this raises an assertion error.\nOtherwise, this returns this expression.

\n\n
Examples:
\n\n
\n

This is useful for type security in chained expressions:

\n \n
\n
>>> import sqlglot\n>>> sqlglot.parse_one("SELECT x from y").assert_is(Select).select("z").sql()\n'SELECT x, z FROM y'\n
\n
\n
\n", "signature": "(self, type_: Type[~E]) -> ~E:", "funcdef": "def"}, "sqlglot.expressions.Expression.error_messages": {"fullname": "sqlglot.expressions.Expression.error_messages", "modulename": "sqlglot.expressions", "qualname": "Expression.error_messages", "kind": "function", "doc": "

Checks if this expression is valid (e.g. all mandatory args are set).

\n\n
Arguments:
\n\n
    \n
  • args: a sequence of values that were used to instantiate a Func expression. This is used\nto check that the provided arguments don't exceed the function argument limit.
  • \n
\n\n
Returns:
\n\n
\n

A list of error messages for all possible errors that were found.

\n
\n", "signature": "(self, args: Optional[Sequence] = None) -> List[str]:", "funcdef": "def"}, "sqlglot.expressions.Expression.dump": {"fullname": "sqlglot.expressions.Expression.dump", "modulename": "sqlglot.expressions", "qualname": "Expression.dump", "kind": "function", "doc": "

Dump this Expression to a JSON-serializable dict.

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.expressions.Expression.load": {"fullname": "sqlglot.expressions.Expression.load", "modulename": "sqlglot.expressions", "qualname": "Expression.load", "kind": "function", "doc": "

Load a dict (as returned by Expression.dump) into an Expression instance.

\n", "signature": "(cls, obj):", "funcdef": "def"}, "sqlglot.expressions.Condition": {"fullname": "sqlglot.expressions.Condition", "modulename": "sqlglot.expressions", "qualname": "Condition", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Condition.and_": {"fullname": "sqlglot.expressions.Condition.and_", "modulename": "sqlglot.expressions", "qualname": "Condition.and_", "kind": "function", "doc": "

AND this condition with one or multiple expressions.

\n\n
Example:
\n\n
\n
\n
>>> condition("x=1").and_("y=1").sql()\n'x = 1 AND y = 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: whether or not to copy the involved expressions (only applies to Expressions).
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The new And condition.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Condition:", "funcdef": "def"}, "sqlglot.expressions.Condition.or_": {"fullname": "sqlglot.expressions.Condition.or_", "modulename": "sqlglot.expressions", "qualname": "Condition.or_", "kind": "function", "doc": "

OR this condition with one or multiple expressions.

\n\n
Example:
\n\n
\n
\n
>>> condition("x=1").or_("y=1").sql()\n'x = 1 OR y = 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: whether or not to copy the involved expressions (only applies to Expressions).
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The new Or condition.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Condition:", "funcdef": "def"}, "sqlglot.expressions.Condition.not_": {"fullname": "sqlglot.expressions.Condition.not_", "modulename": "sqlglot.expressions", "qualname": "Condition.not_", "kind": "function", "doc": "

Wrap this condition with NOT.

\n\n
Example:
\n\n
\n
\n
>>> condition("x=1").not_().sql()\n'NOT x = 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • copy: whether or not to copy this object.
  • \n
\n\n
Returns:
\n\n
\n

The new Not instance.

\n
\n", "signature": "(self, copy: bool = True):", "funcdef": "def"}, "sqlglot.expressions.Condition.as_": {"fullname": "sqlglot.expressions.Condition.as_", "modulename": "sqlglot.expressions", "qualname": "Condition.as_", "kind": "function", "doc": "

\n", "signature": "(\tself,\talias: str | sqlglot.expressions.Identifier,\tquoted: Optional[bool] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Alias:", "funcdef": "def"}, "sqlglot.expressions.Condition.isin": {"fullname": "sqlglot.expressions.Condition.isin", "modulename": "sqlglot.expressions", "qualname": "Condition.isin", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*expressions: Any,\tquery: Union[str, sqlglot.expressions.Expression, NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.In:", "funcdef": "def"}, "sqlglot.expressions.Condition.between": {"fullname": "sqlglot.expressions.Condition.between", "modulename": "sqlglot.expressions", "qualname": "Condition.between", "kind": "function", "doc": "

\n", "signature": "(\tself,\tlow: Any,\thigh: Any,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Between:", "funcdef": "def"}, "sqlglot.expressions.Condition.is_": {"fullname": "sqlglot.expressions.Condition.is_", "modulename": "sqlglot.expressions", "qualname": "Condition.is_", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: Union[str, sqlglot.expressions.Expression]) -> sqlglot.expressions.Is:", "funcdef": "def"}, "sqlglot.expressions.Condition.like": {"fullname": "sqlglot.expressions.Condition.like", "modulename": "sqlglot.expressions", "qualname": "Condition.like", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: Union[str, sqlglot.expressions.Expression]) -> sqlglot.expressions.Like:", "funcdef": "def"}, "sqlglot.expressions.Condition.ilike": {"fullname": "sqlglot.expressions.Condition.ilike", "modulename": "sqlglot.expressions", "qualname": "Condition.ilike", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: Union[str, sqlglot.expressions.Expression]) -> sqlglot.expressions.ILike:", "funcdef": "def"}, "sqlglot.expressions.Condition.eq": {"fullname": "sqlglot.expressions.Condition.eq", "modulename": "sqlglot.expressions", "qualname": "Condition.eq", "kind": "function", "doc": "

\n", "signature": "(self, other: Any) -> sqlglot.expressions.EQ:", "funcdef": "def"}, "sqlglot.expressions.Condition.neq": {"fullname": "sqlglot.expressions.Condition.neq", "modulename": "sqlglot.expressions", "qualname": "Condition.neq", "kind": "function", "doc": "

\n", "signature": "(self, other: Any) -> sqlglot.expressions.NEQ:", "funcdef": "def"}, "sqlglot.expressions.Condition.rlike": {"fullname": "sqlglot.expressions.Condition.rlike", "modulename": "sqlglot.expressions", "qualname": "Condition.rlike", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: Union[str, sqlglot.expressions.Expression]) -> sqlglot.expressions.RegexpLike:", "funcdef": "def"}, "sqlglot.expressions.Predicate": {"fullname": "sqlglot.expressions.Predicate", "modulename": "sqlglot.expressions", "qualname": "Predicate", "kind": "class", "doc": "

Relationships like x = y, x > 1, x >= y.

\n", "bases": "Condition"}, "sqlglot.expressions.DerivedTable": {"fullname": "sqlglot.expressions.DerivedTable", "modulename": "sqlglot.expressions", "qualname": "DerivedTable", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Unionable": {"fullname": "sqlglot.expressions.Unionable", "modulename": "sqlglot.expressions", "qualname": "Unionable", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Unionable.union": {"fullname": "sqlglot.expressions.Unionable.union", "modulename": "sqlglot.expressions", "qualname": "Unionable.union", "kind": "function", "doc": "

Builds a UNION expression.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sqlglot.parse_one("SELECT * FROM foo").union("SELECT * FROM bla").sql()\n'SELECT * FROM foo UNION SELECT * FROM bla'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code string.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • distinct: set the DISTINCT flag if and only if this is true.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The new Union expression.

\n
\n", "signature": "(\tself,\texpression: Union[str, sqlglot.expressions.Expression],\tdistinct: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> sqlglot.expressions.Unionable:", "funcdef": "def"}, "sqlglot.expressions.Unionable.intersect": {"fullname": "sqlglot.expressions.Unionable.intersect", "modulename": "sqlglot.expressions", "qualname": "Unionable.intersect", "kind": "function", "doc": "

Builds an INTERSECT expression.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sqlglot.parse_one("SELECT * FROM foo").intersect("SELECT * FROM bla").sql()\n'SELECT * FROM foo INTERSECT SELECT * FROM bla'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code string.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • distinct: set the DISTINCT flag if and only if this is true.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The new Intersect expression.

\n
\n", "signature": "(\tself,\texpression: Union[str, sqlglot.expressions.Expression],\tdistinct: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> sqlglot.expressions.Unionable:", "funcdef": "def"}, "sqlglot.expressions.Unionable.except_": {"fullname": "sqlglot.expressions.Unionable.except_", "modulename": "sqlglot.expressions", "qualname": "Unionable.except_", "kind": "function", "doc": "

Builds an EXCEPT expression.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sqlglot.parse_one("SELECT * FROM foo").except_("SELECT * FROM bla").sql()\n'SELECT * FROM foo EXCEPT SELECT * FROM bla'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code string.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • distinct: set the DISTINCT flag if and only if this is true.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The new Except expression.

\n
\n", "signature": "(\tself,\texpression: Union[str, sqlglot.expressions.Expression],\tdistinct: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> sqlglot.expressions.Unionable:", "funcdef": "def"}, "sqlglot.expressions.UDTF": {"fullname": "sqlglot.expressions.UDTF", "modulename": "sqlglot.expressions", "qualname": "UDTF", "kind": "class", "doc": "

\n", "bases": "DerivedTable, Unionable"}, "sqlglot.expressions.Cache": {"fullname": "sqlglot.expressions.Cache", "modulename": "sqlglot.expressions", "qualname": "Cache", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Uncache": {"fullname": "sqlglot.expressions.Uncache", "modulename": "sqlglot.expressions", "qualname": "Uncache", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Create": {"fullname": "sqlglot.expressions.Create", "modulename": "sqlglot.expressions", "qualname": "Create", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Clone": {"fullname": "sqlglot.expressions.Clone", "modulename": "sqlglot.expressions", "qualname": "Clone", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Describe": {"fullname": "sqlglot.expressions.Describe", "modulename": "sqlglot.expressions", "qualname": "Describe", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Pragma": {"fullname": "sqlglot.expressions.Pragma", "modulename": "sqlglot.expressions", "qualname": "Pragma", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Set": {"fullname": "sqlglot.expressions.Set", "modulename": "sqlglot.expressions", "qualname": "Set", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.SetItem": {"fullname": "sqlglot.expressions.SetItem", "modulename": "sqlglot.expressions", "qualname": "SetItem", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Show": {"fullname": "sqlglot.expressions.Show", "modulename": "sqlglot.expressions", "qualname": "Show", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.UserDefinedFunction": {"fullname": "sqlglot.expressions.UserDefinedFunction", "modulename": "sqlglot.expressions", "qualname": "UserDefinedFunction", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.CharacterSet": {"fullname": "sqlglot.expressions.CharacterSet", "modulename": "sqlglot.expressions", "qualname": "CharacterSet", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.With": {"fullname": "sqlglot.expressions.With", "modulename": "sqlglot.expressions", "qualname": "With", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.WithinGroup": {"fullname": "sqlglot.expressions.WithinGroup", "modulename": "sqlglot.expressions", "qualname": "WithinGroup", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.CTE": {"fullname": "sqlglot.expressions.CTE", "modulename": "sqlglot.expressions", "qualname": "CTE", "kind": "class", "doc": "

\n", "bases": "DerivedTable"}, "sqlglot.expressions.TableAlias": {"fullname": "sqlglot.expressions.TableAlias", "modulename": "sqlglot.expressions", "qualname": "TableAlias", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.BitString": {"fullname": "sqlglot.expressions.BitString", "modulename": "sqlglot.expressions", "qualname": "BitString", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.HexString": {"fullname": "sqlglot.expressions.HexString", "modulename": "sqlglot.expressions", "qualname": "HexString", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.ByteString": {"fullname": "sqlglot.expressions.ByteString", "modulename": "sqlglot.expressions", "qualname": "ByteString", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.RawString": {"fullname": "sqlglot.expressions.RawString", "modulename": "sqlglot.expressions", "qualname": "RawString", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.Column": {"fullname": "sqlglot.expressions.Column", "modulename": "sqlglot.expressions", "qualname": "Column", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.Column.output_name": {"fullname": "sqlglot.expressions.Column.output_name", "modulename": "sqlglot.expressions", "qualname": "Column.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n", "annotation": ": str"}, "sqlglot.expressions.Column.parts": {"fullname": "sqlglot.expressions.Column.parts", "modulename": "sqlglot.expressions", "qualname": "Column.parts", "kind": "variable", "doc": "

Return the parts of a column in order catalog, db, table, name.

\n", "annotation": ": List[sqlglot.expressions.Identifier]"}, "sqlglot.expressions.Column.to_dot": {"fullname": "sqlglot.expressions.Column.to_dot", "modulename": "sqlglot.expressions", "qualname": "Column.to_dot", "kind": "function", "doc": "

Converts the column into a dot expression.

\n", "signature": "(self) -> sqlglot.expressions.Dot:", "funcdef": "def"}, "sqlglot.expressions.ColumnPosition": {"fullname": "sqlglot.expressions.ColumnPosition", "modulename": "sqlglot.expressions", "qualname": "ColumnPosition", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.ColumnDef": {"fullname": "sqlglot.expressions.ColumnDef", "modulename": "sqlglot.expressions", "qualname": "ColumnDef", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.AlterColumn": {"fullname": "sqlglot.expressions.AlterColumn", "modulename": "sqlglot.expressions", "qualname": "AlterColumn", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.RenameTable": {"fullname": "sqlglot.expressions.RenameTable", "modulename": "sqlglot.expressions", "qualname": "RenameTable", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.SetTag": {"fullname": "sqlglot.expressions.SetTag", "modulename": "sqlglot.expressions", "qualname": "SetTag", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Comment": {"fullname": "sqlglot.expressions.Comment", "modulename": "sqlglot.expressions", "qualname": "Comment", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.MergeTreeTTLAction": {"fullname": "sqlglot.expressions.MergeTreeTTLAction", "modulename": "sqlglot.expressions", "qualname": "MergeTreeTTLAction", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.MergeTreeTTL": {"fullname": "sqlglot.expressions.MergeTreeTTL", "modulename": "sqlglot.expressions", "qualname": "MergeTreeTTL", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.ColumnConstraint": {"fullname": "sqlglot.expressions.ColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "ColumnConstraint", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.ColumnConstraintKind": {"fullname": "sqlglot.expressions.ColumnConstraintKind", "modulename": "sqlglot.expressions", "qualname": "ColumnConstraintKind", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.AutoIncrementColumnConstraint": {"fullname": "sqlglot.expressions.AutoIncrementColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "AutoIncrementColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.CaseSpecificColumnConstraint": {"fullname": "sqlglot.expressions.CaseSpecificColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "CaseSpecificColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.CharacterSetColumnConstraint": {"fullname": "sqlglot.expressions.CharacterSetColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "CharacterSetColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.CheckColumnConstraint": {"fullname": "sqlglot.expressions.CheckColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "CheckColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.CollateColumnConstraint": {"fullname": "sqlglot.expressions.CollateColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "CollateColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.CommentColumnConstraint": {"fullname": "sqlglot.expressions.CommentColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "CommentColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.CompressColumnConstraint": {"fullname": "sqlglot.expressions.CompressColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "CompressColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.DateFormatColumnConstraint": {"fullname": "sqlglot.expressions.DateFormatColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "DateFormatColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.DefaultColumnConstraint": {"fullname": "sqlglot.expressions.DefaultColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "DefaultColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.EncodeColumnConstraint": {"fullname": "sqlglot.expressions.EncodeColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "EncodeColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.GeneratedAsIdentityColumnConstraint": {"fullname": "sqlglot.expressions.GeneratedAsIdentityColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "GeneratedAsIdentityColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.InlineLengthColumnConstraint": {"fullname": "sqlglot.expressions.InlineLengthColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "InlineLengthColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.NotNullColumnConstraint": {"fullname": "sqlglot.expressions.NotNullColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "NotNullColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.OnUpdateColumnConstraint": {"fullname": "sqlglot.expressions.OnUpdateColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "OnUpdateColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.PrimaryKeyColumnConstraint": {"fullname": "sqlglot.expressions.PrimaryKeyColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "PrimaryKeyColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.TitleColumnConstraint": {"fullname": "sqlglot.expressions.TitleColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "TitleColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.UniqueColumnConstraint": {"fullname": "sqlglot.expressions.UniqueColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "UniqueColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.UppercaseColumnConstraint": {"fullname": "sqlglot.expressions.UppercaseColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "UppercaseColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.PathColumnConstraint": {"fullname": "sqlglot.expressions.PathColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "PathColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.Constraint": {"fullname": "sqlglot.expressions.Constraint", "modulename": "sqlglot.expressions", "qualname": "Constraint", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Delete": {"fullname": "sqlglot.expressions.Delete", "modulename": "sqlglot.expressions", "qualname": "Delete", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Delete.delete": {"fullname": "sqlglot.expressions.Delete.delete", "modulename": "sqlglot.expressions", "qualname": "Delete.delete", "kind": "function", "doc": "

Create a DELETE expression or replace the table on an existing DELETE expression.

\n\n
Example:
\n\n
\n
\n
>>> delete("tbl").sql()\n'DELETE FROM tbl'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • table: the table from which to delete.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Delete: the modified expression.

\n
\n", "signature": "(\tself,\ttable: Union[str, sqlglot.expressions.Expression],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Delete:", "funcdef": "def"}, "sqlglot.expressions.Delete.where": {"fullname": "sqlglot.expressions.Delete.where", "modulename": "sqlglot.expressions", "qualname": "Delete.where", "kind": "function", "doc": "

Append to or set the WHERE expressions.

\n\n
Example:
\n\n
\n
\n
>>> delete("tbl").where("x = 'a' OR x < 'b'").sql()\n"DELETE FROM tbl WHERE x = 'a' OR x < 'b'"\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.\nMultiple expressions are combined with an AND operator.
  • \n
  • append: if True, AND the new expressions to any existing expression.\nOtherwise, this resets the expression.
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Delete: the modified expression.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Delete:", "funcdef": "def"}, "sqlglot.expressions.Delete.returning": {"fullname": "sqlglot.expressions.Delete.returning", "modulename": "sqlglot.expressions", "qualname": "Delete.returning", "kind": "function", "doc": "

Set the RETURNING expression. Not supported by all dialects.

\n\n
Example:
\n\n
\n
\n
>>> delete("tbl").returning("*", dialect="postgres").sql()\n'DELETE FROM tbl RETURNING *'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Delete: the modified expression.

\n
\n", "signature": "(\tself,\texpression: Union[str, sqlglot.expressions.Expression],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Delete:", "funcdef": "def"}, "sqlglot.expressions.Drop": {"fullname": "sqlglot.expressions.Drop", "modulename": "sqlglot.expressions", "qualname": "Drop", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Filter": {"fullname": "sqlglot.expressions.Filter", "modulename": "sqlglot.expressions", "qualname": "Filter", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Check": {"fullname": "sqlglot.expressions.Check", "modulename": "sqlglot.expressions", "qualname": "Check", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Directory": {"fullname": "sqlglot.expressions.Directory", "modulename": "sqlglot.expressions", "qualname": "Directory", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.ForeignKey": {"fullname": "sqlglot.expressions.ForeignKey", "modulename": "sqlglot.expressions", "qualname": "ForeignKey", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.PrimaryKey": {"fullname": "sqlglot.expressions.PrimaryKey", "modulename": "sqlglot.expressions", "qualname": "PrimaryKey", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Into": {"fullname": "sqlglot.expressions.Into", "modulename": "sqlglot.expressions", "qualname": "Into", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.From": {"fullname": "sqlglot.expressions.From", "modulename": "sqlglot.expressions", "qualname": "From", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Having": {"fullname": "sqlglot.expressions.Having", "modulename": "sqlglot.expressions", "qualname": "Having", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Hint": {"fullname": "sqlglot.expressions.Hint", "modulename": "sqlglot.expressions", "qualname": "Hint", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.JoinHint": {"fullname": "sqlglot.expressions.JoinHint", "modulename": "sqlglot.expressions", "qualname": "JoinHint", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Identifier": {"fullname": "sqlglot.expressions.Identifier", "modulename": "sqlglot.expressions", "qualname": "Identifier", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Identifier.output_name": {"fullname": "sqlglot.expressions.Identifier.output_name", "modulename": "sqlglot.expressions", "qualname": "Identifier.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n", "annotation": ": str"}, "sqlglot.expressions.Index": {"fullname": "sqlglot.expressions.Index", "modulename": "sqlglot.expressions", "qualname": "Index", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Insert": {"fullname": "sqlglot.expressions.Insert", "modulename": "sqlglot.expressions", "qualname": "Insert", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Insert.with_": {"fullname": "sqlglot.expressions.Insert.with_", "modulename": "sqlglot.expressions", "qualname": "Insert.with_", "kind": "function", "doc": "

Append to or set the common table expressions.

\n\n
Example:
\n\n
\n
\n
>>> insert("SELECT x FROM cte", "t").with_("cte", as_="SELECT * FROM tbl").sql()\n'WITH cte AS (SELECT * FROM tbl) INSERT INTO t SELECT x FROM cte'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • alias: the SQL code string to parse as the table name.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • as_: the SQL code string to parse as the table expression.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • recursive: set the RECURSIVE part of the expression. Defaults to False.
  • \n
  • append: if True, add to any existing expressions.\nOtherwise, this resets the expressions.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified expression.

\n
\n", "signature": "(\tself,\talias: Union[str, sqlglot.expressions.Expression],\tas_: Union[str, sqlglot.expressions.Expression],\trecursive: Optional[bool] = None,\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Insert:", "funcdef": "def"}, "sqlglot.expressions.OnConflict": {"fullname": "sqlglot.expressions.OnConflict", "modulename": "sqlglot.expressions", "qualname": "OnConflict", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Returning": {"fullname": "sqlglot.expressions.Returning", "modulename": "sqlglot.expressions", "qualname": "Returning", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Introducer": {"fullname": "sqlglot.expressions.Introducer", "modulename": "sqlglot.expressions", "qualname": "Introducer", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.National": {"fullname": "sqlglot.expressions.National", "modulename": "sqlglot.expressions", "qualname": "National", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.LoadData": {"fullname": "sqlglot.expressions.LoadData", "modulename": "sqlglot.expressions", "qualname": "LoadData", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Partition": {"fullname": "sqlglot.expressions.Partition", "modulename": "sqlglot.expressions", "qualname": "Partition", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Fetch": {"fullname": "sqlglot.expressions.Fetch", "modulename": "sqlglot.expressions", "qualname": "Fetch", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Group": {"fullname": "sqlglot.expressions.Group", "modulename": "sqlglot.expressions", "qualname": "Group", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Lambda": {"fullname": "sqlglot.expressions.Lambda", "modulename": "sqlglot.expressions", "qualname": "Lambda", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Limit": {"fullname": "sqlglot.expressions.Limit", "modulename": "sqlglot.expressions", "qualname": "Limit", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Literal": {"fullname": "sqlglot.expressions.Literal", "modulename": "sqlglot.expressions", "qualname": "Literal", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.Literal.number": {"fullname": "sqlglot.expressions.Literal.number", "modulename": "sqlglot.expressions", "qualname": "Literal.number", "kind": "function", "doc": "

\n", "signature": "(cls, number) -> sqlglot.expressions.Literal:", "funcdef": "def"}, "sqlglot.expressions.Literal.string": {"fullname": "sqlglot.expressions.Literal.string", "modulename": "sqlglot.expressions", "qualname": "Literal.string", "kind": "function", "doc": "

\n", "signature": "(cls, string) -> sqlglot.expressions.Literal:", "funcdef": "def"}, "sqlglot.expressions.Literal.output_name": {"fullname": "sqlglot.expressions.Literal.output_name", "modulename": "sqlglot.expressions", "qualname": "Literal.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n", "annotation": ": str"}, "sqlglot.expressions.Join": {"fullname": "sqlglot.expressions.Join", "modulename": "sqlglot.expressions", "qualname": "Join", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Join.on": {"fullname": "sqlglot.expressions.Join.on", "modulename": "sqlglot.expressions", "qualname": "Join.on", "kind": "function", "doc": "

Append to or set the ON expressions.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sqlglot.parse_one("JOIN x", into=Join).on("y = 1").sql()\n'JOIN x ON y = 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.\nMultiple expressions are combined with an AND operator.
  • \n
  • append: if True, AND the new expressions to any existing expression.\nOtherwise, this resets the expression.
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified Join expression.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Join:", "funcdef": "def"}, "sqlglot.expressions.Join.using": {"fullname": "sqlglot.expressions.Join.using", "modulename": "sqlglot.expressions", "qualname": "Join.using", "kind": "function", "doc": "

Append to or set the USING expressions.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sqlglot.parse_one("JOIN x", into=Join).using("foo", "bla").sql()\n'JOIN x USING (foo, bla)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • append: if True, concatenate the new expressions to the existing \"using\" list.\nOtherwise, this resets the expression.
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified Join expression.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Join:", "funcdef": "def"}, "sqlglot.expressions.Lateral": {"fullname": "sqlglot.expressions.Lateral", "modulename": "sqlglot.expressions", "qualname": "Lateral", "kind": "class", "doc": "

\n", "bases": "UDTF"}, "sqlglot.expressions.MatchRecognize": {"fullname": "sqlglot.expressions.MatchRecognize", "modulename": "sqlglot.expressions", "qualname": "MatchRecognize", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Final": {"fullname": "sqlglot.expressions.Final", "modulename": "sqlglot.expressions", "qualname": "Final", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Offset": {"fullname": "sqlglot.expressions.Offset", "modulename": "sqlglot.expressions", "qualname": "Offset", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Order": {"fullname": "sqlglot.expressions.Order", "modulename": "sqlglot.expressions", "qualname": "Order", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Cluster": {"fullname": "sqlglot.expressions.Cluster", "modulename": "sqlglot.expressions", "qualname": "Cluster", "kind": "class", "doc": "

\n", "bases": "Order"}, "sqlglot.expressions.Distribute": {"fullname": "sqlglot.expressions.Distribute", "modulename": "sqlglot.expressions", "qualname": "Distribute", "kind": "class", "doc": "

\n", "bases": "Order"}, "sqlglot.expressions.Sort": {"fullname": "sqlglot.expressions.Sort", "modulename": "sqlglot.expressions", "qualname": "Sort", "kind": "class", "doc": "

\n", "bases": "Order"}, "sqlglot.expressions.Ordered": {"fullname": "sqlglot.expressions.Ordered", "modulename": "sqlglot.expressions", "qualname": "Ordered", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Property": {"fullname": "sqlglot.expressions.Property", "modulename": "sqlglot.expressions", "qualname": "Property", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.AlgorithmProperty": {"fullname": "sqlglot.expressions.AlgorithmProperty", "modulename": "sqlglot.expressions", "qualname": "AlgorithmProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.AutoIncrementProperty": {"fullname": "sqlglot.expressions.AutoIncrementProperty", "modulename": "sqlglot.expressions", "qualname": "AutoIncrementProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.BlockCompressionProperty": {"fullname": "sqlglot.expressions.BlockCompressionProperty", "modulename": "sqlglot.expressions", "qualname": "BlockCompressionProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.CharacterSetProperty": {"fullname": "sqlglot.expressions.CharacterSetProperty", "modulename": "sqlglot.expressions", "qualname": "CharacterSetProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.ChecksumProperty": {"fullname": "sqlglot.expressions.ChecksumProperty", "modulename": "sqlglot.expressions", "qualname": "ChecksumProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.CollateProperty": {"fullname": "sqlglot.expressions.CollateProperty", "modulename": "sqlglot.expressions", "qualname": "CollateProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.DataBlocksizeProperty": {"fullname": "sqlglot.expressions.DataBlocksizeProperty", "modulename": "sqlglot.expressions", "qualname": "DataBlocksizeProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.DefinerProperty": {"fullname": "sqlglot.expressions.DefinerProperty", "modulename": "sqlglot.expressions", "qualname": "DefinerProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.DistKeyProperty": {"fullname": "sqlglot.expressions.DistKeyProperty", "modulename": "sqlglot.expressions", "qualname": "DistKeyProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.DistStyleProperty": {"fullname": "sqlglot.expressions.DistStyleProperty", "modulename": "sqlglot.expressions", "qualname": "DistStyleProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.EngineProperty": {"fullname": "sqlglot.expressions.EngineProperty", "modulename": "sqlglot.expressions", "qualname": "EngineProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.ExecuteAsProperty": {"fullname": "sqlglot.expressions.ExecuteAsProperty", "modulename": "sqlglot.expressions", "qualname": "ExecuteAsProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.ExternalProperty": {"fullname": "sqlglot.expressions.ExternalProperty", "modulename": "sqlglot.expressions", "qualname": "ExternalProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.FallbackProperty": {"fullname": "sqlglot.expressions.FallbackProperty", "modulename": "sqlglot.expressions", "qualname": "FallbackProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.FileFormatProperty": {"fullname": "sqlglot.expressions.FileFormatProperty", "modulename": "sqlglot.expressions", "qualname": "FileFormatProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.FreespaceProperty": {"fullname": "sqlglot.expressions.FreespaceProperty", "modulename": "sqlglot.expressions", "qualname": "FreespaceProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.InputOutputFormat": {"fullname": "sqlglot.expressions.InputOutputFormat", "modulename": "sqlglot.expressions", "qualname": "InputOutputFormat", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.IsolatedLoadingProperty": {"fullname": "sqlglot.expressions.IsolatedLoadingProperty", "modulename": "sqlglot.expressions", "qualname": "IsolatedLoadingProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.JournalProperty": {"fullname": "sqlglot.expressions.JournalProperty", "modulename": "sqlglot.expressions", "qualname": "JournalProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.LanguageProperty": {"fullname": "sqlglot.expressions.LanguageProperty", "modulename": "sqlglot.expressions", "qualname": "LanguageProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.DictProperty": {"fullname": "sqlglot.expressions.DictProperty", "modulename": "sqlglot.expressions", "qualname": "DictProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.DictSubProperty": {"fullname": "sqlglot.expressions.DictSubProperty", "modulename": "sqlglot.expressions", "qualname": "DictSubProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.DictRange": {"fullname": "sqlglot.expressions.DictRange", "modulename": "sqlglot.expressions", "qualname": "DictRange", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.LikeProperty": {"fullname": "sqlglot.expressions.LikeProperty", "modulename": "sqlglot.expressions", "qualname": "LikeProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.LocationProperty": {"fullname": "sqlglot.expressions.LocationProperty", "modulename": "sqlglot.expressions", "qualname": "LocationProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.LockingProperty": {"fullname": "sqlglot.expressions.LockingProperty", "modulename": "sqlglot.expressions", "qualname": "LockingProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.LogProperty": {"fullname": "sqlglot.expressions.LogProperty", "modulename": "sqlglot.expressions", "qualname": "LogProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.MaterializedProperty": {"fullname": "sqlglot.expressions.MaterializedProperty", "modulename": "sqlglot.expressions", "qualname": "MaterializedProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.MergeBlockRatioProperty": {"fullname": "sqlglot.expressions.MergeBlockRatioProperty", "modulename": "sqlglot.expressions", "qualname": "MergeBlockRatioProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.NoPrimaryIndexProperty": {"fullname": "sqlglot.expressions.NoPrimaryIndexProperty", "modulename": "sqlglot.expressions", "qualname": "NoPrimaryIndexProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.OnCommitProperty": {"fullname": "sqlglot.expressions.OnCommitProperty", "modulename": "sqlglot.expressions", "qualname": "OnCommitProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.PartitionedByProperty": {"fullname": "sqlglot.expressions.PartitionedByProperty", "modulename": "sqlglot.expressions", "qualname": "PartitionedByProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.ReturnsProperty": {"fullname": "sqlglot.expressions.ReturnsProperty", "modulename": "sqlglot.expressions", "qualname": "ReturnsProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.RowFormatProperty": {"fullname": "sqlglot.expressions.RowFormatProperty", "modulename": "sqlglot.expressions", "qualname": "RowFormatProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.RowFormatDelimitedProperty": {"fullname": "sqlglot.expressions.RowFormatDelimitedProperty", "modulename": "sqlglot.expressions", "qualname": "RowFormatDelimitedProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.RowFormatSerdeProperty": {"fullname": "sqlglot.expressions.RowFormatSerdeProperty", "modulename": "sqlglot.expressions", "qualname": "RowFormatSerdeProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.SchemaCommentProperty": {"fullname": "sqlglot.expressions.SchemaCommentProperty", "modulename": "sqlglot.expressions", "qualname": "SchemaCommentProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.SerdeProperties": {"fullname": "sqlglot.expressions.SerdeProperties", "modulename": "sqlglot.expressions", "qualname": "SerdeProperties", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.SetProperty": {"fullname": "sqlglot.expressions.SetProperty", "modulename": "sqlglot.expressions", "qualname": "SetProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.SettingsProperty": {"fullname": "sqlglot.expressions.SettingsProperty", "modulename": "sqlglot.expressions", "qualname": "SettingsProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.SortKeyProperty": {"fullname": "sqlglot.expressions.SortKeyProperty", "modulename": "sqlglot.expressions", "qualname": "SortKeyProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.SqlSecurityProperty": {"fullname": "sqlglot.expressions.SqlSecurityProperty", "modulename": "sqlglot.expressions", "qualname": "SqlSecurityProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.StabilityProperty": {"fullname": "sqlglot.expressions.StabilityProperty", "modulename": "sqlglot.expressions", "qualname": "StabilityProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.TemporaryProperty": {"fullname": "sqlglot.expressions.TemporaryProperty", "modulename": "sqlglot.expressions", "qualname": "TemporaryProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.TransientProperty": {"fullname": "sqlglot.expressions.TransientProperty", "modulename": "sqlglot.expressions", "qualname": "TransientProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.VolatileProperty": {"fullname": "sqlglot.expressions.VolatileProperty", "modulename": "sqlglot.expressions", "qualname": "VolatileProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.WithDataProperty": {"fullname": "sqlglot.expressions.WithDataProperty", "modulename": "sqlglot.expressions", "qualname": "WithDataProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.WithJournalTableProperty": {"fullname": "sqlglot.expressions.WithJournalTableProperty", "modulename": "sqlglot.expressions", "qualname": "WithJournalTableProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.Properties": {"fullname": "sqlglot.expressions.Properties", "modulename": "sqlglot.expressions", "qualname": "Properties", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Properties.Location": {"fullname": "sqlglot.expressions.Properties.Location", "modulename": "sqlglot.expressions", "qualname": "Properties.Location", "kind": "class", "doc": "

An enumeration.

\n", "bases": "sqlglot.helper.AutoName"}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"fullname": "sqlglot.expressions.Properties.Location.POST_CREATE", "modulename": "sqlglot.expressions", "qualname": "Properties.Location.POST_CREATE", "kind": "variable", "doc": "

\n", "default_value": "<Location.POST_CREATE: 'POST_CREATE'>"}, "sqlglot.expressions.Properties.Location.POST_NAME": {"fullname": "sqlglot.expressions.Properties.Location.POST_NAME", "modulename": "sqlglot.expressions", "qualname": "Properties.Location.POST_NAME", "kind": "variable", "doc": "

\n", "default_value": "<Location.POST_NAME: 'POST_NAME'>"}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"fullname": "sqlglot.expressions.Properties.Location.POST_SCHEMA", "modulename": "sqlglot.expressions", "qualname": "Properties.Location.POST_SCHEMA", "kind": "variable", "doc": "

\n", "default_value": "<Location.POST_SCHEMA: 'POST_SCHEMA'>"}, "sqlglot.expressions.Properties.Location.POST_WITH": {"fullname": "sqlglot.expressions.Properties.Location.POST_WITH", "modulename": "sqlglot.expressions", "qualname": "Properties.Location.POST_WITH", "kind": "variable", "doc": "

\n", "default_value": "<Location.POST_WITH: 'POST_WITH'>"}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"fullname": "sqlglot.expressions.Properties.Location.POST_ALIAS", "modulename": "sqlglot.expressions", "qualname": "Properties.Location.POST_ALIAS", "kind": "variable", "doc": "

\n", "default_value": "<Location.POST_ALIAS: 'POST_ALIAS'>"}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"fullname": "sqlglot.expressions.Properties.Location.POST_EXPRESSION", "modulename": "sqlglot.expressions", "qualname": "Properties.Location.POST_EXPRESSION", "kind": "variable", "doc": "

\n", "default_value": "<Location.POST_EXPRESSION: 'POST_EXPRESSION'>"}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"fullname": "sqlglot.expressions.Properties.Location.POST_INDEX", "modulename": "sqlglot.expressions", "qualname": "Properties.Location.POST_INDEX", "kind": "variable", "doc": "

\n", "default_value": "<Location.POST_INDEX: 'POST_INDEX'>"}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"fullname": "sqlglot.expressions.Properties.Location.UNSUPPORTED", "modulename": "sqlglot.expressions", "qualname": "Properties.Location.UNSUPPORTED", "kind": "variable", "doc": "

\n", "default_value": "<Location.UNSUPPORTED: 'UNSUPPORTED'>"}, "sqlglot.expressions.Properties.from_dict": {"fullname": "sqlglot.expressions.Properties.from_dict", "modulename": "sqlglot.expressions", "qualname": "Properties.from_dict", "kind": "function", "doc": "

\n", "signature": "(cls, properties_dict: Dict) -> sqlglot.expressions.Properties:", "funcdef": "def"}, "sqlglot.expressions.Qualify": {"fullname": "sqlglot.expressions.Qualify", "modulename": "sqlglot.expressions", "qualname": "Qualify", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Return": {"fullname": "sqlglot.expressions.Return", "modulename": "sqlglot.expressions", "qualname": "Return", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Reference": {"fullname": "sqlglot.expressions.Reference", "modulename": "sqlglot.expressions", "qualname": "Reference", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Tuple": {"fullname": "sqlglot.expressions.Tuple", "modulename": "sqlglot.expressions", "qualname": "Tuple", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Tuple.isin": {"fullname": "sqlglot.expressions.Tuple.isin", "modulename": "sqlglot.expressions", "qualname": "Tuple.isin", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*expressions: Any,\tquery: Union[str, sqlglot.expressions.Expression, NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.In:", "funcdef": "def"}, "sqlglot.expressions.Subqueryable": {"fullname": "sqlglot.expressions.Subqueryable", "modulename": "sqlglot.expressions", "qualname": "Subqueryable", "kind": "class", "doc": "

\n", "bases": "Unionable"}, "sqlglot.expressions.Subqueryable.subquery": {"fullname": "sqlglot.expressions.Subqueryable.subquery", "modulename": "sqlglot.expressions", "qualname": "Subqueryable.subquery", "kind": "function", "doc": "

Convert this expression to an aliased expression that can be used as a Subquery.

\n\n
Example:
\n\n
\n
\n
>>> subquery = Select().select("x").from_("tbl").subquery()\n>>> Select().select("x").from_(subquery).sql()\n'SELECT x FROM (SELECT x FROM tbl)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • alias (str | Identifier): an optional alias for the subquery
  • \n
  • copy (bool): if False, modify this expression instance in-place.
  • \n
\n\n
Returns:
\n\n
\n

Alias: the subquery

\n
\n", "signature": "(\tself,\talias: Union[str, sqlglot.expressions.Expression, NoneType] = None,\tcopy: bool = True) -> sqlglot.expressions.Subquery:", "funcdef": "def"}, "sqlglot.expressions.Subqueryable.limit": {"fullname": "sqlglot.expressions.Subqueryable.limit", "modulename": "sqlglot.expressions", "qualname": "Subqueryable.limit", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: Union[str, sqlglot.expressions.Expression, int],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Subqueryable.with_": {"fullname": "sqlglot.expressions.Subqueryable.with_", "modulename": "sqlglot.expressions", "qualname": "Subqueryable.with_", "kind": "function", "doc": "

Append to or set the common table expressions.

\n\n
Example:
\n\n
\n
\n
>>> Select().with_("tbl2", as_="SELECT * FROM tbl").select("x").from_("tbl2").sql()\n'WITH tbl2 AS (SELECT * FROM tbl) SELECT x FROM tbl2'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • alias: the SQL code string to parse as the table name.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • as_: the SQL code string to parse as the table expression.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • recursive: set the RECURSIVE part of the expression. Defaults to False.
  • \n
  • append: if True, add to any existing expressions.\nOtherwise, this resets the expressions.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified expression.

\n
\n", "signature": "(\tself,\talias: Union[str, sqlglot.expressions.Expression],\tas_: Union[str, sqlglot.expressions.Expression],\trecursive: Optional[bool] = None,\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Subqueryable:", "funcdef": "def"}, "sqlglot.expressions.Table": {"fullname": "sqlglot.expressions.Table", "modulename": "sqlglot.expressions", "qualname": "Table", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Table.parts": {"fullname": "sqlglot.expressions.Table.parts", "modulename": "sqlglot.expressions", "qualname": "Table.parts", "kind": "variable", "doc": "

Return the parts of a table in order catalog, db, table.

\n", "annotation": ": List[sqlglot.expressions.Identifier]"}, "sqlglot.expressions.SystemTime": {"fullname": "sqlglot.expressions.SystemTime", "modulename": "sqlglot.expressions", "qualname": "SystemTime", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Union": {"fullname": "sqlglot.expressions.Union", "modulename": "sqlglot.expressions", "qualname": "Union", "kind": "class", "doc": "

\n", "bases": "Subqueryable"}, "sqlglot.expressions.Union.limit": {"fullname": "sqlglot.expressions.Union.limit", "modulename": "sqlglot.expressions", "qualname": "Union.limit", "kind": "function", "doc": "

Set the LIMIT expression.

\n\n
Example:
\n\n
\n
\n
>>> select("1").union(select("1")).limit(1).sql()\n'SELECT * FROM (SELECT 1 UNION SELECT 1) AS _l_0 LIMIT 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code string to parse.\nThis can also be an integer.\nIf a Limit instance is passed, this is used as-is.\nIf another Expression instance is passed, it will be wrapped in a Limit.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The limited subqueryable.

\n
\n", "signature": "(\tself,\texpression: Union[str, sqlglot.expressions.Expression, int],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Union.select": {"fullname": "sqlglot.expressions.Union.select", "modulename": "sqlglot.expressions", "qualname": "Union.select", "kind": "function", "doc": "

Append to or set the SELECT of the union recursively.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("select a from x union select a from y union select a from z").select("b").sql()\n'SELECT a, b FROM x UNION SELECT a, b FROM y UNION SELECT a, b FROM z'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • append: if True, add to any existing expressions.\nOtherwise, this resets the expressions.
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Union: the modified expression.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Union:", "funcdef": "def"}, "sqlglot.expressions.Union.is_star": {"fullname": "sqlglot.expressions.Union.is_star", "modulename": "sqlglot.expressions", "qualname": "Union.is_star", "kind": "variable", "doc": "

Checks whether an expression is a star.

\n", "annotation": ": bool"}, "sqlglot.expressions.Except": {"fullname": "sqlglot.expressions.Except", "modulename": "sqlglot.expressions", "qualname": "Except", "kind": "class", "doc": "

\n", "bases": "Union"}, "sqlglot.expressions.Intersect": {"fullname": "sqlglot.expressions.Intersect", "modulename": "sqlglot.expressions", "qualname": "Intersect", "kind": "class", "doc": "

\n", "bases": "Union"}, "sqlglot.expressions.Unnest": {"fullname": "sqlglot.expressions.Unnest", "modulename": "sqlglot.expressions", "qualname": "Unnest", "kind": "class", "doc": "

\n", "bases": "UDTF"}, "sqlglot.expressions.Update": {"fullname": "sqlglot.expressions.Update", "modulename": "sqlglot.expressions", "qualname": "Update", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Values": {"fullname": "sqlglot.expressions.Values", "modulename": "sqlglot.expressions", "qualname": "Values", "kind": "class", "doc": "

\n", "bases": "UDTF"}, "sqlglot.expressions.Var": {"fullname": "sqlglot.expressions.Var", "modulename": "sqlglot.expressions", "qualname": "Var", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Schema": {"fullname": "sqlglot.expressions.Schema", "modulename": "sqlglot.expressions", "qualname": "Schema", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Lock": {"fullname": "sqlglot.expressions.Lock", "modulename": "sqlglot.expressions", "qualname": "Lock", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Select": {"fullname": "sqlglot.expressions.Select", "modulename": "sqlglot.expressions", "qualname": "Select", "kind": "class", "doc": "

\n", "bases": "Subqueryable"}, "sqlglot.expressions.Select.from_": {"fullname": "sqlglot.expressions.Select.from_", "modulename": "sqlglot.expressions", "qualname": "Select.from_", "kind": "function", "doc": "

Set the FROM expression.

\n\n
Example:
\n\n
\n
\n
>>> Select().from_("tbl").select("x").sql()\n'SELECT x FROM tbl'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression : the SQL code strings to parse.\nIf a From instance is passed, this is used as-is.\nIf another Expression instance is passed, it will be wrapped in a From.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified Select expression.

\n
\n", "signature": "(\tself,\texpression: Union[str, sqlglot.expressions.Expression],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.group_by": {"fullname": "sqlglot.expressions.Select.group_by", "modulename": "sqlglot.expressions", "qualname": "Select.group_by", "kind": "function", "doc": "

Set the GROUP BY expression.

\n\n
Example:
\n\n
\n
\n
>>> Select().from_("tbl").select("x", "COUNT(1)").group_by("x").sql()\n'SELECT x, COUNT(1) FROM tbl GROUP BY x'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf a Group instance is passed, this is used as-is.\nIf another Expression instance is passed, it will be wrapped in a Group.\nIf nothing is passed in then a group by is not applied to the expression
  • \n
  • append: if True, add to any existing expressions.\nOtherwise, this flattens all the Group expression into a single expression.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified Select expression.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.order_by": {"fullname": "sqlglot.expressions.Select.order_by", "modulename": "sqlglot.expressions", "qualname": "Select.order_by", "kind": "function", "doc": "

Set the ORDER BY expression.

\n\n
Example:
\n\n
\n
\n
>>> Select().from_("tbl").select("x").order_by("x DESC").sql()\n'SELECT x FROM tbl ORDER BY x DESC'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf a Group instance is passed, this is used as-is.\nIf another Expression instance is passed, it will be wrapped in a Order.
  • \n
  • append: if True, add to any existing expressions.\nOtherwise, this flattens all the Order expression into a single expression.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified Select expression.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.sort_by": {"fullname": "sqlglot.expressions.Select.sort_by", "modulename": "sqlglot.expressions", "qualname": "Select.sort_by", "kind": "function", "doc": "

Set the SORT BY expression.

\n\n
Example:
\n\n
\n
\n
>>> Select().from_("tbl").select("x").sort_by("x DESC").sql(dialect="hive")\n'SELECT x FROM tbl SORT BY x DESC'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf a Group instance is passed, this is used as-is.\nIf another Expression instance is passed, it will be wrapped in a SORT.
  • \n
  • append: if True, add to any existing expressions.\nOtherwise, this flattens all the Order expression into a single expression.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified Select expression.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.cluster_by": {"fullname": "sqlglot.expressions.Select.cluster_by", "modulename": "sqlglot.expressions", "qualname": "Select.cluster_by", "kind": "function", "doc": "

Set the CLUSTER BY expression.

\n\n
Example:
\n\n
\n
\n
>>> Select().from_("tbl").select("x").cluster_by("x DESC").sql(dialect="hive")\n'SELECT x FROM tbl CLUSTER BY x DESC'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf a Group instance is passed, this is used as-is.\nIf another Expression instance is passed, it will be wrapped in a Cluster.
  • \n
  • append: if True, add to any existing expressions.\nOtherwise, this flattens all the Order expression into a single expression.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified Select expression.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.limit": {"fullname": "sqlglot.expressions.Select.limit", "modulename": "sqlglot.expressions", "qualname": "Select.limit", "kind": "function", "doc": "

Set the LIMIT expression.

\n\n
Example:
\n\n
\n
\n
>>> Select().from_("tbl").select("x").limit(10).sql()\n'SELECT x FROM tbl LIMIT 10'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code string to parse.\nThis can also be an integer.\nIf a Limit instance is passed, this is used as-is.\nIf another Expression instance is passed, it will be wrapped in a Limit.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Select: the modified expression.

\n
\n", "signature": "(\tself,\texpression: Union[str, sqlglot.expressions.Expression, int],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.offset": {"fullname": "sqlglot.expressions.Select.offset", "modulename": "sqlglot.expressions", "qualname": "Select.offset", "kind": "function", "doc": "

Set the OFFSET expression.

\n\n
Example:
\n\n
\n
\n
>>> Select().from_("tbl").select("x").offset(10).sql()\n'SELECT x FROM tbl OFFSET 10'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code string to parse.\nThis can also be an integer.\nIf a Offset instance is passed, this is used as-is.\nIf another Expression instance is passed, it will be wrapped in a Offset.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified Select expression.

\n
\n", "signature": "(\tself,\texpression: Union[str, sqlglot.expressions.Expression, int],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.select": {"fullname": "sqlglot.expressions.Select.select", "modulename": "sqlglot.expressions", "qualname": "Select.select", "kind": "function", "doc": "

Append to or set the SELECT expressions.

\n\n
Example:
\n\n
\n
\n
>>> Select().select("x", "y").sql()\n'SELECT x, y'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • append: if True, add to any existing expressions.\nOtherwise, this resets the expressions.
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified Select expression.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.lateral": {"fullname": "sqlglot.expressions.Select.lateral", "modulename": "sqlglot.expressions", "qualname": "Select.lateral", "kind": "function", "doc": "

Append to or set the LATERAL expressions.

\n\n
Example:
\n\n
\n
\n
>>> Select().select("x").lateral("OUTER explode(y) tbl2 AS z").from_("tbl").sql()\n'SELECT x FROM tbl LATERAL VIEW OUTER EXPLODE(y) tbl2 AS z'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • append: if True, add to any existing expressions.\nOtherwise, this resets the expressions.
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified Select expression.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.join": {"fullname": "sqlglot.expressions.Select.join", "modulename": "sqlglot.expressions", "qualname": "Select.join", "kind": "function", "doc": "

Append to or set the JOIN expressions.

\n\n
Example:
\n\n
\n
\n
>>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y").sql()\n'SELECT * FROM tbl JOIN tbl2 ON tbl1.y = tbl2.y'\n
\n
\n \n
\n
>>> Select().select("1").from_("a").join("b", using=["x", "y", "z"]).sql()\n'SELECT 1 FROM a JOIN b USING (x, y, z)'\n
\n
\n \n

Use join_type to change the type of join:

\n \n
\n
>>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y", join_type="left outer").sql()\n'SELECT * FROM tbl LEFT OUTER JOIN tbl2 ON tbl1.y = tbl2.y'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code string to parse.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • on: optionally specify the join \"on\" criteria as a SQL string.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • using: optionally specify the join \"using\" criteria as a SQL string.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • append: if True, add to any existing expressions.\nOtherwise, this resets the expressions.
  • \n
  • join_type: if set, alter the parsed join type.
  • \n
  • join_alias: an optional alias for the joined source.
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Select: the modified expression.

\n
\n", "signature": "(\tself,\texpression: Union[str, sqlglot.expressions.Expression],\ton: Union[str, sqlglot.expressions.Expression, NoneType] = None,\tusing: Union[str, sqlglot.expressions.Expression, List[Union[str, sqlglot.expressions.Expression]], NoneType] = None,\tappend: bool = True,\tjoin_type: Optional[str] = None,\tjoin_alias: Union[sqlglot.expressions.Identifier, str, NoneType] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.where": {"fullname": "sqlglot.expressions.Select.where", "modulename": "sqlglot.expressions", "qualname": "Select.where", "kind": "function", "doc": "

Append to or set the WHERE expressions.

\n\n
Example:
\n\n
\n
\n
>>> Select().select("x").from_("tbl").where("x = 'a' OR x < 'b'").sql()\n"SELECT x FROM tbl WHERE x = 'a' OR x < 'b'"\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.\nMultiple expressions are combined with an AND operator.
  • \n
  • append: if True, AND the new expressions to any existing expression.\nOtherwise, this resets the expression.
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Select: the modified expression.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.having": {"fullname": "sqlglot.expressions.Select.having", "modulename": "sqlglot.expressions", "qualname": "Select.having", "kind": "function", "doc": "

Append to or set the HAVING expressions.

\n\n
Example:
\n\n
\n
\n
>>> Select().select("x", "COUNT(y)").from_("tbl").group_by("x").having("COUNT(y) > 3").sql()\n'SELECT x, COUNT(y) FROM tbl GROUP BY x HAVING COUNT(y) > 3'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.\nMultiple expressions are combined with an AND operator.
  • \n
  • append: if True, AND the new expressions to any existing expression.\nOtherwise, this resets the expression.
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified Select expression.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.window": {"fullname": "sqlglot.expressions.Select.window", "modulename": "sqlglot.expressions", "qualname": "Select.window", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.qualify": {"fullname": "sqlglot.expressions.Select.qualify", "modulename": "sqlglot.expressions", "qualname": "Select.qualify", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.distinct": {"fullname": "sqlglot.expressions.Select.distinct", "modulename": "sqlglot.expressions", "qualname": "Select.distinct", "kind": "function", "doc": "

Set the OFFSET expression.

\n\n
Example:
\n\n
\n
\n
>>> Select().from_("tbl").select("x").distinct().sql()\n'SELECT DISTINCT x FROM tbl'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • ons: the expressions to distinct on
  • \n
  • distinct: whether the Select should be distinct
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
\n\n
Returns:
\n\n
\n

Select: the modified expression.

\n
\n", "signature": "(\tself,\t*ons: Union[str, sqlglot.expressions.Expression, NoneType],\tdistinct: bool = True,\tcopy: bool = True) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.ctas": {"fullname": "sqlglot.expressions.Select.ctas", "modulename": "sqlglot.expressions", "qualname": "Select.ctas", "kind": "function", "doc": "

Convert this expression to a CREATE TABLE AS statement.

\n\n
Example:
\n\n
\n
\n
>>> Select().select("*").from_("tbl").ctas("x").sql()\n'CREATE TABLE x AS SELECT * FROM tbl'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • table: the SQL code string to parse as the table name.\nIf another Expression instance is passed, it will be used as-is.
  • \n
  • properties: an optional mapping of table properties
  • \n
  • dialect: the dialect used to parse the input table.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input table.
  • \n
\n\n
Returns:
\n\n
\n

The new Create expression.

\n
\n", "signature": "(\tself,\ttable: Union[str, sqlglot.expressions.Expression],\tproperties: Optional[Dict] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Create:", "funcdef": "def"}, "sqlglot.expressions.Select.lock": {"fullname": "sqlglot.expressions.Select.lock", "modulename": "sqlglot.expressions", "qualname": "Select.lock", "kind": "function", "doc": "

Set the locking read mode for this expression.

\n\n
Examples:
\n\n
\n
\n
>>> Select().select("x").from_("tbl").where("x = 'a'").lock().sql("mysql")\n"SELECT x FROM tbl WHERE x = 'a' FOR UPDATE"\n
\n
\n \n
\n
>>> Select().select("x").from_("tbl").where("x = 'a'").lock(update=False).sql("mysql")\n"SELECT x FROM tbl WHERE x = 'a' FOR SHARE"\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • update: if True, the locking type will be FOR UPDATE, else it will be FOR SHARE.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
\n\n
Returns:
\n\n
\n

The modified expression.

\n
\n", "signature": "(\tself,\tupdate: bool = True,\tcopy: bool = True) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.is_star": {"fullname": "sqlglot.expressions.Select.is_star", "modulename": "sqlglot.expressions", "qualname": "Select.is_star", "kind": "variable", "doc": "

Checks whether an expression is a star.

\n", "annotation": ": bool"}, "sqlglot.expressions.Subquery": {"fullname": "sqlglot.expressions.Subquery", "modulename": "sqlglot.expressions", "qualname": "Subquery", "kind": "class", "doc": "

\n", "bases": "DerivedTable, Unionable"}, "sqlglot.expressions.Subquery.unnest": {"fullname": "sqlglot.expressions.Subquery.unnest", "modulename": "sqlglot.expressions", "qualname": "Subquery.unnest", "kind": "function", "doc": "

Returns the first non subquery.

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.expressions.Subquery.is_star": {"fullname": "sqlglot.expressions.Subquery.is_star", "modulename": "sqlglot.expressions", "qualname": "Subquery.is_star", "kind": "variable", "doc": "

Checks whether an expression is a star.

\n", "annotation": ": bool"}, "sqlglot.expressions.Subquery.output_name": {"fullname": "sqlglot.expressions.Subquery.output_name", "modulename": "sqlglot.expressions", "qualname": "Subquery.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n", "annotation": ": str"}, "sqlglot.expressions.TableSample": {"fullname": "sqlglot.expressions.TableSample", "modulename": "sqlglot.expressions", "qualname": "TableSample", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Tag": {"fullname": "sqlglot.expressions.Tag", "modulename": "sqlglot.expressions", "qualname": "Tag", "kind": "class", "doc": "

Tags are used for generating arbitrary sql like SELECT x.

\n", "bases": "Expression"}, "sqlglot.expressions.Pivot": {"fullname": "sqlglot.expressions.Pivot", "modulename": "sqlglot.expressions", "qualname": "Pivot", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Window": {"fullname": "sqlglot.expressions.Window", "modulename": "sqlglot.expressions", "qualname": "Window", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.WindowSpec": {"fullname": "sqlglot.expressions.WindowSpec", "modulename": "sqlglot.expressions", "qualname": "WindowSpec", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Where": {"fullname": "sqlglot.expressions.Where", "modulename": "sqlglot.expressions", "qualname": "Where", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Star": {"fullname": "sqlglot.expressions.Star", "modulename": "sqlglot.expressions", "qualname": "Star", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Star.output_name": {"fullname": "sqlglot.expressions.Star.output_name", "modulename": "sqlglot.expressions", "qualname": "Star.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n", "annotation": ": str"}, "sqlglot.expressions.Parameter": {"fullname": "sqlglot.expressions.Parameter", "modulename": "sqlglot.expressions", "qualname": "Parameter", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.SessionParameter": {"fullname": "sqlglot.expressions.SessionParameter", "modulename": "sqlglot.expressions", "qualname": "SessionParameter", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Placeholder": {"fullname": "sqlglot.expressions.Placeholder", "modulename": "sqlglot.expressions", "qualname": "Placeholder", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Null": {"fullname": "sqlglot.expressions.Null", "modulename": "sqlglot.expressions", "qualname": "Null", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.Boolean": {"fullname": "sqlglot.expressions.Boolean", "modulename": "sqlglot.expressions", "qualname": "Boolean", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.DataTypeSize": {"fullname": "sqlglot.expressions.DataTypeSize", "modulename": "sqlglot.expressions", "qualname": "DataTypeSize", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.DataType": {"fullname": "sqlglot.expressions.DataType", "modulename": "sqlglot.expressions", "qualname": "DataType", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.DataType.Type": {"fullname": "sqlglot.expressions.DataType.Type", "modulename": "sqlglot.expressions", "qualname": "DataType.Type", "kind": "class", "doc": "

An enumeration.

\n", "bases": "sqlglot.helper.AutoName"}, "sqlglot.expressions.DataType.Type.ARRAY": {"fullname": "sqlglot.expressions.DataType.Type.ARRAY", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.ARRAY", "kind": "variable", "doc": "

\n", "default_value": "<Type.ARRAY: 'ARRAY'>"}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"fullname": "sqlglot.expressions.DataType.Type.BIGDECIMAL", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.BIGDECIMAL", "kind": "variable", "doc": "

\n", "default_value": "<Type.BIGDECIMAL: 'BIGDECIMAL'>"}, "sqlglot.expressions.DataType.Type.BIGINT": {"fullname": "sqlglot.expressions.DataType.Type.BIGINT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.BIGINT", "kind": "variable", "doc": "

\n", "default_value": "<Type.BIGINT: 'BIGINT'>"}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"fullname": "sqlglot.expressions.DataType.Type.BIGSERIAL", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.BIGSERIAL", "kind": "variable", "doc": "

\n", "default_value": "<Type.BIGSERIAL: 'BIGSERIAL'>"}, "sqlglot.expressions.DataType.Type.BINARY": {"fullname": "sqlglot.expressions.DataType.Type.BINARY", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.BINARY", "kind": "variable", "doc": "

\n", "default_value": "<Type.BINARY: 'BINARY'>"}, "sqlglot.expressions.DataType.Type.BIT": {"fullname": "sqlglot.expressions.DataType.Type.BIT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.BIT", "kind": "variable", "doc": "

\n", "default_value": "<Type.BIT: 'BIT'>"}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"fullname": "sqlglot.expressions.DataType.Type.BOOLEAN", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.BOOLEAN", "kind": "variable", "doc": "

\n", "default_value": "<Type.BOOLEAN: 'BOOLEAN'>"}, "sqlglot.expressions.DataType.Type.CHAR": {"fullname": "sqlglot.expressions.DataType.Type.CHAR", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.CHAR", "kind": "variable", "doc": "

\n", "default_value": "<Type.CHAR: 'CHAR'>"}, "sqlglot.expressions.DataType.Type.DATE": {"fullname": "sqlglot.expressions.DataType.Type.DATE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.DATE", "kind": "variable", "doc": "

\n", "default_value": "<Type.DATE: 'DATE'>"}, "sqlglot.expressions.DataType.Type.DATETIME": {"fullname": "sqlglot.expressions.DataType.Type.DATETIME", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.DATETIME", "kind": "variable", "doc": "

\n", "default_value": "<Type.DATETIME: 'DATETIME'>"}, "sqlglot.expressions.DataType.Type.DATETIME64": {"fullname": "sqlglot.expressions.DataType.Type.DATETIME64", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.DATETIME64", "kind": "variable", "doc": "

\n", "default_value": "<Type.DATETIME64: 'DATETIME64'>"}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"fullname": "sqlglot.expressions.DataType.Type.INT4RANGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.INT4RANGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.INT4RANGE: 'INT4RANGE'>"}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"fullname": "sqlglot.expressions.DataType.Type.INT4MULTIRANGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.INT4MULTIRANGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.INT4MULTIRANGE: 'INT4MULTIRANGE'>"}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"fullname": "sqlglot.expressions.DataType.Type.INT8RANGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.INT8RANGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.INT8RANGE: 'INT8RANGE'>"}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"fullname": "sqlglot.expressions.DataType.Type.INT8MULTIRANGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.INT8MULTIRANGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.INT8MULTIRANGE: 'INT8MULTIRANGE'>"}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"fullname": "sqlglot.expressions.DataType.Type.NUMRANGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.NUMRANGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.NUMRANGE: 'NUMRANGE'>"}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"fullname": "sqlglot.expressions.DataType.Type.NUMMULTIRANGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.NUMMULTIRANGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.NUMMULTIRANGE: 'NUMMULTIRANGE'>"}, "sqlglot.expressions.DataType.Type.TSRANGE": {"fullname": "sqlglot.expressions.DataType.Type.TSRANGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TSRANGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.TSRANGE: 'TSRANGE'>"}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"fullname": "sqlglot.expressions.DataType.Type.TSMULTIRANGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TSMULTIRANGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.TSMULTIRANGE: 'TSMULTIRANGE'>"}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"fullname": "sqlglot.expressions.DataType.Type.TSTZRANGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TSTZRANGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.TSTZRANGE: 'TSTZRANGE'>"}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"fullname": "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TSTZMULTIRANGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>"}, "sqlglot.expressions.DataType.Type.DATERANGE": {"fullname": "sqlglot.expressions.DataType.Type.DATERANGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.DATERANGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.DATERANGE: 'DATERANGE'>"}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"fullname": "sqlglot.expressions.DataType.Type.DATEMULTIRANGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.DATEMULTIRANGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.DATEMULTIRANGE: 'DATEMULTIRANGE'>"}, "sqlglot.expressions.DataType.Type.DECIMAL": {"fullname": "sqlglot.expressions.DataType.Type.DECIMAL", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.DECIMAL", "kind": "variable", "doc": "

\n", "default_value": "<Type.DECIMAL: 'DECIMAL'>"}, "sqlglot.expressions.DataType.Type.DOUBLE": {"fullname": "sqlglot.expressions.DataType.Type.DOUBLE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.DOUBLE", "kind": "variable", "doc": "

\n", "default_value": "<Type.DOUBLE: 'DOUBLE'>"}, "sqlglot.expressions.DataType.Type.FLOAT": {"fullname": "sqlglot.expressions.DataType.Type.FLOAT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.FLOAT", "kind": "variable", "doc": "

\n", "default_value": "<Type.FLOAT: 'FLOAT'>"}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"fullname": "sqlglot.expressions.DataType.Type.GEOGRAPHY", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.GEOGRAPHY", "kind": "variable", "doc": "

\n", "default_value": "<Type.GEOGRAPHY: 'GEOGRAPHY'>"}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"fullname": "sqlglot.expressions.DataType.Type.GEOMETRY", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.GEOMETRY", "kind": "variable", "doc": "

\n", "default_value": "<Type.GEOMETRY: 'GEOMETRY'>"}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"fullname": "sqlglot.expressions.DataType.Type.HLLSKETCH", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.HLLSKETCH", "kind": "variable", "doc": "

\n", "default_value": "<Type.HLLSKETCH: 'HLLSKETCH'>"}, "sqlglot.expressions.DataType.Type.HSTORE": {"fullname": "sqlglot.expressions.DataType.Type.HSTORE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.HSTORE", "kind": "variable", "doc": "

\n", "default_value": "<Type.HSTORE: 'HSTORE'>"}, "sqlglot.expressions.DataType.Type.IMAGE": {"fullname": "sqlglot.expressions.DataType.Type.IMAGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.IMAGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.IMAGE: 'IMAGE'>"}, "sqlglot.expressions.DataType.Type.INET": {"fullname": "sqlglot.expressions.DataType.Type.INET", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.INET", "kind": "variable", "doc": "

\n", "default_value": "<Type.INET: 'INET'>"}, "sqlglot.expressions.DataType.Type.INT": {"fullname": "sqlglot.expressions.DataType.Type.INT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.INT", "kind": "variable", "doc": "

\n", "default_value": "<Type.INT: 'INT'>"}, "sqlglot.expressions.DataType.Type.INT128": {"fullname": "sqlglot.expressions.DataType.Type.INT128", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.INT128", "kind": "variable", "doc": "

\n", "default_value": "<Type.INT128: 'INT128'>"}, "sqlglot.expressions.DataType.Type.INT256": {"fullname": "sqlglot.expressions.DataType.Type.INT256", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.INT256", "kind": "variable", "doc": "

\n", "default_value": "<Type.INT256: 'INT256'>"}, "sqlglot.expressions.DataType.Type.INTERVAL": {"fullname": "sqlglot.expressions.DataType.Type.INTERVAL", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.INTERVAL", "kind": "variable", "doc": "

\n", "default_value": "<Type.INTERVAL: 'INTERVAL'>"}, "sqlglot.expressions.DataType.Type.JSON": {"fullname": "sqlglot.expressions.DataType.Type.JSON", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.JSON", "kind": "variable", "doc": "

\n", "default_value": "<Type.JSON: 'JSON'>"}, "sqlglot.expressions.DataType.Type.JSONB": {"fullname": "sqlglot.expressions.DataType.Type.JSONB", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.JSONB", "kind": "variable", "doc": "

\n", "default_value": "<Type.JSONB: 'JSONB'>"}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"fullname": "sqlglot.expressions.DataType.Type.LONGBLOB", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.LONGBLOB", "kind": "variable", "doc": "

\n", "default_value": "<Type.LONGBLOB: 'LONGBLOB'>"}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"fullname": "sqlglot.expressions.DataType.Type.LONGTEXT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.LONGTEXT", "kind": "variable", "doc": "

\n", "default_value": "<Type.LONGTEXT: 'LONGTEXT'>"}, "sqlglot.expressions.DataType.Type.MAP": {"fullname": "sqlglot.expressions.DataType.Type.MAP", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.MAP", "kind": "variable", "doc": "

\n", "default_value": "<Type.MAP: 'MAP'>"}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"fullname": "sqlglot.expressions.DataType.Type.MEDIUMBLOB", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.MEDIUMBLOB", "kind": "variable", "doc": "

\n", "default_value": "<Type.MEDIUMBLOB: 'MEDIUMBLOB'>"}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"fullname": "sqlglot.expressions.DataType.Type.MEDIUMTEXT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.MEDIUMTEXT", "kind": "variable", "doc": "

\n", "default_value": "<Type.MEDIUMTEXT: 'MEDIUMTEXT'>"}, "sqlglot.expressions.DataType.Type.MONEY": {"fullname": "sqlglot.expressions.DataType.Type.MONEY", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.MONEY", "kind": "variable", "doc": "

\n", "default_value": "<Type.MONEY: 'MONEY'>"}, "sqlglot.expressions.DataType.Type.NCHAR": {"fullname": "sqlglot.expressions.DataType.Type.NCHAR", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.NCHAR", "kind": "variable", "doc": "

\n", "default_value": "<Type.NCHAR: 'NCHAR'>"}, "sqlglot.expressions.DataType.Type.NULL": {"fullname": "sqlglot.expressions.DataType.Type.NULL", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.NULL", "kind": "variable", "doc": "

\n", "default_value": "<Type.NULL: 'NULL'>"}, "sqlglot.expressions.DataType.Type.NULLABLE": {"fullname": "sqlglot.expressions.DataType.Type.NULLABLE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.NULLABLE", "kind": "variable", "doc": "

\n", "default_value": "<Type.NULLABLE: 'NULLABLE'>"}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"fullname": "sqlglot.expressions.DataType.Type.NVARCHAR", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.NVARCHAR", "kind": "variable", "doc": "

\n", "default_value": "<Type.NVARCHAR: 'NVARCHAR'>"}, "sqlglot.expressions.DataType.Type.OBJECT": {"fullname": "sqlglot.expressions.DataType.Type.OBJECT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.OBJECT", "kind": "variable", "doc": "

\n", "default_value": "<Type.OBJECT: 'OBJECT'>"}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"fullname": "sqlglot.expressions.DataType.Type.ROWVERSION", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.ROWVERSION", "kind": "variable", "doc": "

\n", "default_value": "<Type.ROWVERSION: 'ROWVERSION'>"}, "sqlglot.expressions.DataType.Type.SERIAL": {"fullname": "sqlglot.expressions.DataType.Type.SERIAL", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.SERIAL", "kind": "variable", "doc": "

\n", "default_value": "<Type.SERIAL: 'SERIAL'>"}, "sqlglot.expressions.DataType.Type.SMALLINT": {"fullname": "sqlglot.expressions.DataType.Type.SMALLINT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.SMALLINT", "kind": "variable", "doc": "

\n", "default_value": "<Type.SMALLINT: 'SMALLINT'>"}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"fullname": "sqlglot.expressions.DataType.Type.SMALLMONEY", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.SMALLMONEY", "kind": "variable", "doc": "

\n", "default_value": "<Type.SMALLMONEY: 'SMALLMONEY'>"}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"fullname": "sqlglot.expressions.DataType.Type.SMALLSERIAL", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.SMALLSERIAL", "kind": "variable", "doc": "

\n", "default_value": "<Type.SMALLSERIAL: 'SMALLSERIAL'>"}, "sqlglot.expressions.DataType.Type.STRUCT": {"fullname": "sqlglot.expressions.DataType.Type.STRUCT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.STRUCT", "kind": "variable", "doc": "

\n", "default_value": "<Type.STRUCT: 'STRUCT'>"}, "sqlglot.expressions.DataType.Type.SUPER": {"fullname": "sqlglot.expressions.DataType.Type.SUPER", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.SUPER", "kind": "variable", "doc": "

\n", "default_value": "<Type.SUPER: 'SUPER'>"}, "sqlglot.expressions.DataType.Type.TEXT": {"fullname": "sqlglot.expressions.DataType.Type.TEXT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TEXT", "kind": "variable", "doc": "

\n", "default_value": "<Type.TEXT: 'TEXT'>"}, "sqlglot.expressions.DataType.Type.TIME": {"fullname": "sqlglot.expressions.DataType.Type.TIME", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TIME", "kind": "variable", "doc": "

\n", "default_value": "<Type.TIME: 'TIME'>"}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"fullname": "sqlglot.expressions.DataType.Type.TIMESTAMP", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TIMESTAMP", "kind": "variable", "doc": "

\n", "default_value": "<Type.TIMESTAMP: 'TIMESTAMP'>"}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"fullname": "sqlglot.expressions.DataType.Type.TIMESTAMPTZ", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TIMESTAMPTZ", "kind": "variable", "doc": "

\n", "default_value": "<Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>"}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"fullname": "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TIMESTAMPLTZ", "kind": "variable", "doc": "

\n", "default_value": "<Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>"}, "sqlglot.expressions.DataType.Type.TINYINT": {"fullname": "sqlglot.expressions.DataType.Type.TINYINT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TINYINT", "kind": "variable", "doc": "

\n", "default_value": "<Type.TINYINT: 'TINYINT'>"}, "sqlglot.expressions.DataType.Type.UBIGINT": {"fullname": "sqlglot.expressions.DataType.Type.UBIGINT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.UBIGINT", "kind": "variable", "doc": "

\n", "default_value": "<Type.UBIGINT: 'UBIGINT'>"}, "sqlglot.expressions.DataType.Type.UINT": {"fullname": "sqlglot.expressions.DataType.Type.UINT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.UINT", "kind": "variable", "doc": "

\n", "default_value": "<Type.UINT: 'UINT'>"}, "sqlglot.expressions.DataType.Type.USMALLINT": {"fullname": "sqlglot.expressions.DataType.Type.USMALLINT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.USMALLINT", "kind": "variable", "doc": "

\n", "default_value": "<Type.USMALLINT: 'USMALLINT'>"}, "sqlglot.expressions.DataType.Type.UTINYINT": {"fullname": "sqlglot.expressions.DataType.Type.UTINYINT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.UTINYINT", "kind": "variable", "doc": "

\n", "default_value": "<Type.UTINYINT: 'UTINYINT'>"}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"fullname": "sqlglot.expressions.DataType.Type.UNKNOWN", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.UNKNOWN", "kind": "variable", "doc": "

\n", "default_value": "<Type.UNKNOWN: 'UNKNOWN'>"}, "sqlglot.expressions.DataType.Type.UINT128": {"fullname": "sqlglot.expressions.DataType.Type.UINT128", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.UINT128", "kind": "variable", "doc": "

\n", "default_value": "<Type.UINT128: 'UINT128'>"}, "sqlglot.expressions.DataType.Type.UINT256": {"fullname": "sqlglot.expressions.DataType.Type.UINT256", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.UINT256", "kind": "variable", "doc": "

\n", "default_value": "<Type.UINT256: 'UINT256'>"}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"fullname": "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.UNIQUEIDENTIFIER", "kind": "variable", "doc": "

\n", "default_value": "<Type.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>"}, "sqlglot.expressions.DataType.Type.UUID": {"fullname": "sqlglot.expressions.DataType.Type.UUID", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.UUID", "kind": "variable", "doc": "

\n", "default_value": "<Type.UUID: 'UUID'>"}, "sqlglot.expressions.DataType.Type.VARBINARY": {"fullname": "sqlglot.expressions.DataType.Type.VARBINARY", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.VARBINARY", "kind": "variable", "doc": "

\n", "default_value": "<Type.VARBINARY: 'VARBINARY'>"}, "sqlglot.expressions.DataType.Type.VARCHAR": {"fullname": "sqlglot.expressions.DataType.Type.VARCHAR", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.VARCHAR", "kind": "variable", "doc": "

\n", "default_value": "<Type.VARCHAR: 'VARCHAR'>"}, "sqlglot.expressions.DataType.Type.VARIANT": {"fullname": "sqlglot.expressions.DataType.Type.VARIANT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.VARIANT", "kind": "variable", "doc": "

\n", "default_value": "<Type.VARIANT: 'VARIANT'>"}, "sqlglot.expressions.DataType.Type.XML": {"fullname": "sqlglot.expressions.DataType.Type.XML", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.XML", "kind": "variable", "doc": "

\n", "default_value": "<Type.XML: 'XML'>"}, "sqlglot.expressions.DataType.build": {"fullname": "sqlglot.expressions.DataType.build", "modulename": "sqlglot.expressions", "qualname": "DataType.build", "kind": "function", "doc": "

\n", "signature": "(\tcls,\tdtype: str | sqlglot.expressions.DataType | sqlglot.expressions.DataType.Type,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**kwargs) -> sqlglot.expressions.DataType:", "funcdef": "def"}, "sqlglot.expressions.DataType.is_type": {"fullname": "sqlglot.expressions.DataType.is_type", "modulename": "sqlglot.expressions", "qualname": "DataType.is_type", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*dtypes: str | sqlglot.expressions.DataType | sqlglot.expressions.DataType.Type) -> bool:", "funcdef": "def"}, "sqlglot.expressions.PseudoType": {"fullname": "sqlglot.expressions.PseudoType", "modulename": "sqlglot.expressions", "qualname": "PseudoType", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.SubqueryPredicate": {"fullname": "sqlglot.expressions.SubqueryPredicate", "modulename": "sqlglot.expressions", "qualname": "SubqueryPredicate", "kind": "class", "doc": "

\n", "bases": "Predicate"}, "sqlglot.expressions.All": {"fullname": "sqlglot.expressions.All", "modulename": "sqlglot.expressions", "qualname": "All", "kind": "class", "doc": "

\n", "bases": "SubqueryPredicate"}, "sqlglot.expressions.Any": {"fullname": "sqlglot.expressions.Any", "modulename": "sqlglot.expressions", "qualname": "Any", "kind": "class", "doc": "

\n", "bases": "SubqueryPredicate"}, "sqlglot.expressions.Exists": {"fullname": "sqlglot.expressions.Exists", "modulename": "sqlglot.expressions", "qualname": "Exists", "kind": "class", "doc": "

\n", "bases": "SubqueryPredicate"}, "sqlglot.expressions.Command": {"fullname": "sqlglot.expressions.Command", "modulename": "sqlglot.expressions", "qualname": "Command", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Transaction": {"fullname": "sqlglot.expressions.Transaction", "modulename": "sqlglot.expressions", "qualname": "Transaction", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Commit": {"fullname": "sqlglot.expressions.Commit", "modulename": "sqlglot.expressions", "qualname": "Commit", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Rollback": {"fullname": "sqlglot.expressions.Rollback", "modulename": "sqlglot.expressions", "qualname": "Rollback", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.AlterTable": {"fullname": "sqlglot.expressions.AlterTable", "modulename": "sqlglot.expressions", "qualname": "AlterTable", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.AddConstraint": {"fullname": "sqlglot.expressions.AddConstraint", "modulename": "sqlglot.expressions", "qualname": "AddConstraint", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.DropPartition": {"fullname": "sqlglot.expressions.DropPartition", "modulename": "sqlglot.expressions", "qualname": "DropPartition", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Binary": {"fullname": "sqlglot.expressions.Binary", "modulename": "sqlglot.expressions", "qualname": "Binary", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.Add": {"fullname": "sqlglot.expressions.Add", "modulename": "sqlglot.expressions", "qualname": "Add", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Connector": {"fullname": "sqlglot.expressions.Connector", "modulename": "sqlglot.expressions", "qualname": "Connector", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.And": {"fullname": "sqlglot.expressions.And", "modulename": "sqlglot.expressions", "qualname": "And", "kind": "class", "doc": "

\n", "bases": "Connector"}, "sqlglot.expressions.Or": {"fullname": "sqlglot.expressions.Or", "modulename": "sqlglot.expressions", "qualname": "Or", "kind": "class", "doc": "

\n", "bases": "Connector"}, "sqlglot.expressions.BitwiseAnd": {"fullname": "sqlglot.expressions.BitwiseAnd", "modulename": "sqlglot.expressions", "qualname": "BitwiseAnd", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.BitwiseLeftShift": {"fullname": "sqlglot.expressions.BitwiseLeftShift", "modulename": "sqlglot.expressions", "qualname": "BitwiseLeftShift", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.BitwiseOr": {"fullname": "sqlglot.expressions.BitwiseOr", "modulename": "sqlglot.expressions", "qualname": "BitwiseOr", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.BitwiseRightShift": {"fullname": "sqlglot.expressions.BitwiseRightShift", "modulename": "sqlglot.expressions", "qualname": "BitwiseRightShift", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.BitwiseXor": {"fullname": "sqlglot.expressions.BitwiseXor", "modulename": "sqlglot.expressions", "qualname": "BitwiseXor", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Div": {"fullname": "sqlglot.expressions.Div", "modulename": "sqlglot.expressions", "qualname": "Div", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Overlaps": {"fullname": "sqlglot.expressions.Overlaps", "modulename": "sqlglot.expressions", "qualname": "Overlaps", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Dot": {"fullname": "sqlglot.expressions.Dot", "modulename": "sqlglot.expressions", "qualname": "Dot", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Dot.build": {"fullname": "sqlglot.expressions.Dot.build", "modulename": "sqlglot.expressions", "qualname": "Dot.build", "kind": "function", "doc": "

Build a Dot object with a sequence of expressions.

\n", "signature": "(\tself,\texpressions: Sequence[sqlglot.expressions.Expression]) -> sqlglot.expressions.Dot:", "funcdef": "def"}, "sqlglot.expressions.DPipe": {"fullname": "sqlglot.expressions.DPipe", "modulename": "sqlglot.expressions", "qualname": "DPipe", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.EQ": {"fullname": "sqlglot.expressions.EQ", "modulename": "sqlglot.expressions", "qualname": "EQ", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.NullSafeEQ": {"fullname": "sqlglot.expressions.NullSafeEQ", "modulename": "sqlglot.expressions", "qualname": "NullSafeEQ", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.NullSafeNEQ": {"fullname": "sqlglot.expressions.NullSafeNEQ", "modulename": "sqlglot.expressions", "qualname": "NullSafeNEQ", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.Distance": {"fullname": "sqlglot.expressions.Distance", "modulename": "sqlglot.expressions", "qualname": "Distance", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Escape": {"fullname": "sqlglot.expressions.Escape", "modulename": "sqlglot.expressions", "qualname": "Escape", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Glob": {"fullname": "sqlglot.expressions.Glob", "modulename": "sqlglot.expressions", "qualname": "Glob", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.GT": {"fullname": "sqlglot.expressions.GT", "modulename": "sqlglot.expressions", "qualname": "GT", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.GTE": {"fullname": "sqlglot.expressions.GTE", "modulename": "sqlglot.expressions", "qualname": "GTE", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.ILike": {"fullname": "sqlglot.expressions.ILike", "modulename": "sqlglot.expressions", "qualname": "ILike", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.ILikeAny": {"fullname": "sqlglot.expressions.ILikeAny", "modulename": "sqlglot.expressions", "qualname": "ILikeAny", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.IntDiv": {"fullname": "sqlglot.expressions.IntDiv", "modulename": "sqlglot.expressions", "qualname": "IntDiv", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Is": {"fullname": "sqlglot.expressions.Is", "modulename": "sqlglot.expressions", "qualname": "Is", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.Kwarg": {"fullname": "sqlglot.expressions.Kwarg", "modulename": "sqlglot.expressions", "qualname": "Kwarg", "kind": "class", "doc": "

Kwarg in special functions like func(kwarg => y).

\n", "bases": "Binary"}, "sqlglot.expressions.Like": {"fullname": "sqlglot.expressions.Like", "modulename": "sqlglot.expressions", "qualname": "Like", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.LikeAny": {"fullname": "sqlglot.expressions.LikeAny", "modulename": "sqlglot.expressions", "qualname": "LikeAny", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.LT": {"fullname": "sqlglot.expressions.LT", "modulename": "sqlglot.expressions", "qualname": "LT", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.LTE": {"fullname": "sqlglot.expressions.LTE", "modulename": "sqlglot.expressions", "qualname": "LTE", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.Mod": {"fullname": "sqlglot.expressions.Mod", "modulename": "sqlglot.expressions", "qualname": "Mod", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Mul": {"fullname": "sqlglot.expressions.Mul", "modulename": "sqlglot.expressions", "qualname": "Mul", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.NEQ": {"fullname": "sqlglot.expressions.NEQ", "modulename": "sqlglot.expressions", "qualname": "NEQ", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.SimilarTo": {"fullname": "sqlglot.expressions.SimilarTo", "modulename": "sqlglot.expressions", "qualname": "SimilarTo", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.Slice": {"fullname": "sqlglot.expressions.Slice", "modulename": "sqlglot.expressions", "qualname": "Slice", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Sub": {"fullname": "sqlglot.expressions.Sub", "modulename": "sqlglot.expressions", "qualname": "Sub", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.ArrayOverlaps": {"fullname": "sqlglot.expressions.ArrayOverlaps", "modulename": "sqlglot.expressions", "qualname": "ArrayOverlaps", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Unary": {"fullname": "sqlglot.expressions.Unary", "modulename": "sqlglot.expressions", "qualname": "Unary", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.BitwiseNot": {"fullname": "sqlglot.expressions.BitwiseNot", "modulename": "sqlglot.expressions", "qualname": "BitwiseNot", "kind": "class", "doc": "

\n", "bases": "Unary"}, "sqlglot.expressions.Not": {"fullname": "sqlglot.expressions.Not", "modulename": "sqlglot.expressions", "qualname": "Not", "kind": "class", "doc": "

\n", "bases": "Unary"}, "sqlglot.expressions.Paren": {"fullname": "sqlglot.expressions.Paren", "modulename": "sqlglot.expressions", "qualname": "Paren", "kind": "class", "doc": "

\n", "bases": "Unary"}, "sqlglot.expressions.Neg": {"fullname": "sqlglot.expressions.Neg", "modulename": "sqlglot.expressions", "qualname": "Neg", "kind": "class", "doc": "

\n", "bases": "Unary"}, "sqlglot.expressions.Alias": {"fullname": "sqlglot.expressions.Alias", "modulename": "sqlglot.expressions", "qualname": "Alias", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Alias.output_name": {"fullname": "sqlglot.expressions.Alias.output_name", "modulename": "sqlglot.expressions", "qualname": "Alias.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n", "annotation": ": str"}, "sqlglot.expressions.Aliases": {"fullname": "sqlglot.expressions.Aliases", "modulename": "sqlglot.expressions", "qualname": "Aliases", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.AtTimeZone": {"fullname": "sqlglot.expressions.AtTimeZone", "modulename": "sqlglot.expressions", "qualname": "AtTimeZone", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Between": {"fullname": "sqlglot.expressions.Between", "modulename": "sqlglot.expressions", "qualname": "Between", "kind": "class", "doc": "

\n", "bases": "Predicate"}, "sqlglot.expressions.Bracket": {"fullname": "sqlglot.expressions.Bracket", "modulename": "sqlglot.expressions", "qualname": "Bracket", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.Distinct": {"fullname": "sqlglot.expressions.Distinct", "modulename": "sqlglot.expressions", "qualname": "Distinct", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.In": {"fullname": "sqlglot.expressions.In", "modulename": "sqlglot.expressions", "qualname": "In", "kind": "class", "doc": "

\n", "bases": "Predicate"}, "sqlglot.expressions.TimeUnit": {"fullname": "sqlglot.expressions.TimeUnit", "modulename": "sqlglot.expressions", "qualname": "TimeUnit", "kind": "class", "doc": "

Automatically converts unit arg into a var.

\n", "bases": "Expression"}, "sqlglot.expressions.TimeUnit.__init__": {"fullname": "sqlglot.expressions.TimeUnit.__init__", "modulename": "sqlglot.expressions", "qualname": "TimeUnit.__init__", "kind": "function", "doc": "

\n", "signature": "(**args)"}, "sqlglot.expressions.Interval": {"fullname": "sqlglot.expressions.Interval", "modulename": "sqlglot.expressions", "qualname": "Interval", "kind": "class", "doc": "

\n", "bases": "TimeUnit"}, "sqlglot.expressions.IgnoreNulls": {"fullname": "sqlglot.expressions.IgnoreNulls", "modulename": "sqlglot.expressions", "qualname": "IgnoreNulls", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.RespectNulls": {"fullname": "sqlglot.expressions.RespectNulls", "modulename": "sqlglot.expressions", "qualname": "RespectNulls", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Func": {"fullname": "sqlglot.expressions.Func", "modulename": "sqlglot.expressions", "qualname": "Func", "kind": "class", "doc": "

The base class for all function expressions.

\n\n
Attributes:
\n\n
    \n
  • is_var_len_args (bool): if set to True the last argument defined in arg_types will be\ntreated as a variable length argument and the argument's value will be stored as a list.
  • \n
  • _sql_names (list): determines the SQL name (1st item in the list) and aliases (subsequent items)\nfor this function expression. These values are used to map this node to a name during parsing\nas well as to provide the function's name during SQL string generation. By default the SQL\nname is set to the expression's class name transformed to snake case.
  • \n
\n", "bases": "Condition"}, "sqlglot.expressions.Func.from_arg_list": {"fullname": "sqlglot.expressions.Func.from_arg_list", "modulename": "sqlglot.expressions", "qualname": "Func.from_arg_list", "kind": "function", "doc": "

\n", "signature": "(cls, args):", "funcdef": "def"}, "sqlglot.expressions.Func.sql_names": {"fullname": "sqlglot.expressions.Func.sql_names", "modulename": "sqlglot.expressions", "qualname": "Func.sql_names", "kind": "function", "doc": "

\n", "signature": "(cls):", "funcdef": "def"}, "sqlglot.expressions.Func.sql_name": {"fullname": "sqlglot.expressions.Func.sql_name", "modulename": "sqlglot.expressions", "qualname": "Func.sql_name", "kind": "function", "doc": "

\n", "signature": "(cls):", "funcdef": "def"}, "sqlglot.expressions.Func.default_parser_mappings": {"fullname": "sqlglot.expressions.Func.default_parser_mappings", "modulename": "sqlglot.expressions", "qualname": "Func.default_parser_mappings", "kind": "function", "doc": "

\n", "signature": "(cls):", "funcdef": "def"}, "sqlglot.expressions.AggFunc": {"fullname": "sqlglot.expressions.AggFunc", "modulename": "sqlglot.expressions", "qualname": "AggFunc", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ParameterizedAgg": {"fullname": "sqlglot.expressions.ParameterizedAgg", "modulename": "sqlglot.expressions", "qualname": "ParameterizedAgg", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Abs": {"fullname": "sqlglot.expressions.Abs", "modulename": "sqlglot.expressions", "qualname": "Abs", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Anonymous": {"fullname": "sqlglot.expressions.Anonymous", "modulename": "sqlglot.expressions", "qualname": "Anonymous", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Hll": {"fullname": "sqlglot.expressions.Hll", "modulename": "sqlglot.expressions", "qualname": "Hll", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.ApproxDistinct": {"fullname": "sqlglot.expressions.ApproxDistinct", "modulename": "sqlglot.expressions", "qualname": "ApproxDistinct", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Array": {"fullname": "sqlglot.expressions.Array", "modulename": "sqlglot.expressions", "qualname": "Array", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ToChar": {"fullname": "sqlglot.expressions.ToChar", "modulename": "sqlglot.expressions", "qualname": "ToChar", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.GenerateSeries": {"fullname": "sqlglot.expressions.GenerateSeries", "modulename": "sqlglot.expressions", "qualname": "GenerateSeries", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArrayAgg": {"fullname": "sqlglot.expressions.ArrayAgg", "modulename": "sqlglot.expressions", "qualname": "ArrayAgg", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.ArrayAll": {"fullname": "sqlglot.expressions.ArrayAll", "modulename": "sqlglot.expressions", "qualname": "ArrayAll", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArrayAny": {"fullname": "sqlglot.expressions.ArrayAny", "modulename": "sqlglot.expressions", "qualname": "ArrayAny", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArrayConcat": {"fullname": "sqlglot.expressions.ArrayConcat", "modulename": "sqlglot.expressions", "qualname": "ArrayConcat", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArrayContains": {"fullname": "sqlglot.expressions.ArrayContains", "modulename": "sqlglot.expressions", "qualname": "ArrayContains", "kind": "class", "doc": "

\n", "bases": "Binary, Func"}, "sqlglot.expressions.ArrayContained": {"fullname": "sqlglot.expressions.ArrayContained", "modulename": "sqlglot.expressions", "qualname": "ArrayContained", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.ArrayFilter": {"fullname": "sqlglot.expressions.ArrayFilter", "modulename": "sqlglot.expressions", "qualname": "ArrayFilter", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArrayJoin": {"fullname": "sqlglot.expressions.ArrayJoin", "modulename": "sqlglot.expressions", "qualname": "ArrayJoin", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArraySize": {"fullname": "sqlglot.expressions.ArraySize", "modulename": "sqlglot.expressions", "qualname": "ArraySize", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArraySort": {"fullname": "sqlglot.expressions.ArraySort", "modulename": "sqlglot.expressions", "qualname": "ArraySort", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArraySum": {"fullname": "sqlglot.expressions.ArraySum", "modulename": "sqlglot.expressions", "qualname": "ArraySum", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArrayUnionAgg": {"fullname": "sqlglot.expressions.ArrayUnionAgg", "modulename": "sqlglot.expressions", "qualname": "ArrayUnionAgg", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Avg": {"fullname": "sqlglot.expressions.Avg", "modulename": "sqlglot.expressions", "qualname": "Avg", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.AnyValue": {"fullname": "sqlglot.expressions.AnyValue", "modulename": "sqlglot.expressions", "qualname": "AnyValue", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Case": {"fullname": "sqlglot.expressions.Case", "modulename": "sqlglot.expressions", "qualname": "Case", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Case.when": {"fullname": "sqlglot.expressions.Case.when", "modulename": "sqlglot.expressions", "qualname": "Case.when", "kind": "function", "doc": "

\n", "signature": "(\tself,\tcondition: Union[str, sqlglot.expressions.Expression],\tthen: Union[str, sqlglot.expressions.Expression],\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Case:", "funcdef": "def"}, "sqlglot.expressions.Case.else_": {"fullname": "sqlglot.expressions.Case.else_", "modulename": "sqlglot.expressions", "qualname": "Case.else_", "kind": "function", "doc": "

\n", "signature": "(\tself,\tcondition: Union[str, sqlglot.expressions.Expression],\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Case:", "funcdef": "def"}, "sqlglot.expressions.Cast": {"fullname": "sqlglot.expressions.Cast", "modulename": "sqlglot.expressions", "qualname": "Cast", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Cast.output_name": {"fullname": "sqlglot.expressions.Cast.output_name", "modulename": "sqlglot.expressions", "qualname": "Cast.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n", "annotation": ": str"}, "sqlglot.expressions.Cast.is_type": {"fullname": "sqlglot.expressions.Cast.is_type", "modulename": "sqlglot.expressions", "qualname": "Cast.is_type", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*dtypes: str | sqlglot.expressions.DataType | sqlglot.expressions.DataType.Type) -> bool:", "funcdef": "def"}, "sqlglot.expressions.CastToStrType": {"fullname": "sqlglot.expressions.CastToStrType", "modulename": "sqlglot.expressions", "qualname": "CastToStrType", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Collate": {"fullname": "sqlglot.expressions.Collate", "modulename": "sqlglot.expressions", "qualname": "Collate", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.TryCast": {"fullname": "sqlglot.expressions.TryCast", "modulename": "sqlglot.expressions", "qualname": "TryCast", "kind": "class", "doc": "

\n", "bases": "Cast"}, "sqlglot.expressions.Ceil": {"fullname": "sqlglot.expressions.Ceil", "modulename": "sqlglot.expressions", "qualname": "Ceil", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Coalesce": {"fullname": "sqlglot.expressions.Coalesce", "modulename": "sqlglot.expressions", "qualname": "Coalesce", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Concat": {"fullname": "sqlglot.expressions.Concat", "modulename": "sqlglot.expressions", "qualname": "Concat", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ConcatWs": {"fullname": "sqlglot.expressions.ConcatWs", "modulename": "sqlglot.expressions", "qualname": "ConcatWs", "kind": "class", "doc": "

\n", "bases": "Concat"}, "sqlglot.expressions.Count": {"fullname": "sqlglot.expressions.Count", "modulename": "sqlglot.expressions", "qualname": "Count", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.CountIf": {"fullname": "sqlglot.expressions.CountIf", "modulename": "sqlglot.expressions", "qualname": "CountIf", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.CurrentDate": {"fullname": "sqlglot.expressions.CurrentDate", "modulename": "sqlglot.expressions", "qualname": "CurrentDate", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.CurrentDatetime": {"fullname": "sqlglot.expressions.CurrentDatetime", "modulename": "sqlglot.expressions", "qualname": "CurrentDatetime", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.CurrentTime": {"fullname": "sqlglot.expressions.CurrentTime", "modulename": "sqlglot.expressions", "qualname": "CurrentTime", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.CurrentTimestamp": {"fullname": "sqlglot.expressions.CurrentTimestamp", "modulename": "sqlglot.expressions", "qualname": "CurrentTimestamp", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.CurrentUser": {"fullname": "sqlglot.expressions.CurrentUser", "modulename": "sqlglot.expressions", "qualname": "CurrentUser", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.DateAdd": {"fullname": "sqlglot.expressions.DateAdd", "modulename": "sqlglot.expressions", "qualname": "DateAdd", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.DateSub": {"fullname": "sqlglot.expressions.DateSub", "modulename": "sqlglot.expressions", "qualname": "DateSub", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.DateDiff": {"fullname": "sqlglot.expressions.DateDiff", "modulename": "sqlglot.expressions", "qualname": "DateDiff", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.DateTrunc": {"fullname": "sqlglot.expressions.DateTrunc", "modulename": "sqlglot.expressions", "qualname": "DateTrunc", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.DatetimeAdd": {"fullname": "sqlglot.expressions.DatetimeAdd", "modulename": "sqlglot.expressions", "qualname": "DatetimeAdd", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.DatetimeSub": {"fullname": "sqlglot.expressions.DatetimeSub", "modulename": "sqlglot.expressions", "qualname": "DatetimeSub", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.DatetimeDiff": {"fullname": "sqlglot.expressions.DatetimeDiff", "modulename": "sqlglot.expressions", "qualname": "DatetimeDiff", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.DatetimeTrunc": {"fullname": "sqlglot.expressions.DatetimeTrunc", "modulename": "sqlglot.expressions", "qualname": "DatetimeTrunc", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.DayOfWeek": {"fullname": "sqlglot.expressions.DayOfWeek", "modulename": "sqlglot.expressions", "qualname": "DayOfWeek", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.DayOfMonth": {"fullname": "sqlglot.expressions.DayOfMonth", "modulename": "sqlglot.expressions", "qualname": "DayOfMonth", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.DayOfYear": {"fullname": "sqlglot.expressions.DayOfYear", "modulename": "sqlglot.expressions", "qualname": "DayOfYear", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.WeekOfYear": {"fullname": "sqlglot.expressions.WeekOfYear", "modulename": "sqlglot.expressions", "qualname": "WeekOfYear", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.LastDateOfMonth": {"fullname": "sqlglot.expressions.LastDateOfMonth", "modulename": "sqlglot.expressions", "qualname": "LastDateOfMonth", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Extract": {"fullname": "sqlglot.expressions.Extract", "modulename": "sqlglot.expressions", "qualname": "Extract", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TimestampAdd": {"fullname": "sqlglot.expressions.TimestampAdd", "modulename": "sqlglot.expressions", "qualname": "TimestampAdd", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.TimestampSub": {"fullname": "sqlglot.expressions.TimestampSub", "modulename": "sqlglot.expressions", "qualname": "TimestampSub", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.TimestampDiff": {"fullname": "sqlglot.expressions.TimestampDiff", "modulename": "sqlglot.expressions", "qualname": "TimestampDiff", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.TimestampTrunc": {"fullname": "sqlglot.expressions.TimestampTrunc", "modulename": "sqlglot.expressions", "qualname": "TimestampTrunc", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.TimeAdd": {"fullname": "sqlglot.expressions.TimeAdd", "modulename": "sqlglot.expressions", "qualname": "TimeAdd", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.TimeSub": {"fullname": "sqlglot.expressions.TimeSub", "modulename": "sqlglot.expressions", "qualname": "TimeSub", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.TimeDiff": {"fullname": "sqlglot.expressions.TimeDiff", "modulename": "sqlglot.expressions", "qualname": "TimeDiff", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.TimeTrunc": {"fullname": "sqlglot.expressions.TimeTrunc", "modulename": "sqlglot.expressions", "qualname": "TimeTrunc", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.DateFromParts": {"fullname": "sqlglot.expressions.DateFromParts", "modulename": "sqlglot.expressions", "qualname": "DateFromParts", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.DateStrToDate": {"fullname": "sqlglot.expressions.DateStrToDate", "modulename": "sqlglot.expressions", "qualname": "DateStrToDate", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.DateToDateStr": {"fullname": "sqlglot.expressions.DateToDateStr", "modulename": "sqlglot.expressions", "qualname": "DateToDateStr", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.DateToDi": {"fullname": "sqlglot.expressions.DateToDi", "modulename": "sqlglot.expressions", "qualname": "DateToDi", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Day": {"fullname": "sqlglot.expressions.Day", "modulename": "sqlglot.expressions", "qualname": "Day", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Decode": {"fullname": "sqlglot.expressions.Decode", "modulename": "sqlglot.expressions", "qualname": "Decode", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.DiToDate": {"fullname": "sqlglot.expressions.DiToDate", "modulename": "sqlglot.expressions", "qualname": "DiToDate", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Encode": {"fullname": "sqlglot.expressions.Encode", "modulename": "sqlglot.expressions", "qualname": "Encode", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Exp": {"fullname": "sqlglot.expressions.Exp", "modulename": "sqlglot.expressions", "qualname": "Exp", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Explode": {"fullname": "sqlglot.expressions.Explode", "modulename": "sqlglot.expressions", "qualname": "Explode", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Floor": {"fullname": "sqlglot.expressions.Floor", "modulename": "sqlglot.expressions", "qualname": "Floor", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.FromBase64": {"fullname": "sqlglot.expressions.FromBase64", "modulename": "sqlglot.expressions", "qualname": "FromBase64", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ToBase64": {"fullname": "sqlglot.expressions.ToBase64", "modulename": "sqlglot.expressions", "qualname": "ToBase64", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Greatest": {"fullname": "sqlglot.expressions.Greatest", "modulename": "sqlglot.expressions", "qualname": "Greatest", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.GroupConcat": {"fullname": "sqlglot.expressions.GroupConcat", "modulename": "sqlglot.expressions", "qualname": "GroupConcat", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Hex": {"fullname": "sqlglot.expressions.Hex", "modulename": "sqlglot.expressions", "qualname": "Hex", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.If": {"fullname": "sqlglot.expressions.If", "modulename": "sqlglot.expressions", "qualname": "If", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.IfNull": {"fullname": "sqlglot.expressions.IfNull", "modulename": "sqlglot.expressions", "qualname": "IfNull", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Initcap": {"fullname": "sqlglot.expressions.Initcap", "modulename": "sqlglot.expressions", "qualname": "Initcap", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.JSONKeyValue": {"fullname": "sqlglot.expressions.JSONKeyValue", "modulename": "sqlglot.expressions", "qualname": "JSONKeyValue", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.JSONObject": {"fullname": "sqlglot.expressions.JSONObject", "modulename": "sqlglot.expressions", "qualname": "JSONObject", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.OpenJSONColumnDef": {"fullname": "sqlglot.expressions.OpenJSONColumnDef", "modulename": "sqlglot.expressions", "qualname": "OpenJSONColumnDef", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.OpenJSON": {"fullname": "sqlglot.expressions.OpenJSON", "modulename": "sqlglot.expressions", "qualname": "OpenJSON", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.JSONBContains": {"fullname": "sqlglot.expressions.JSONBContains", "modulename": "sqlglot.expressions", "qualname": "JSONBContains", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.JSONExtract": {"fullname": "sqlglot.expressions.JSONExtract", "modulename": "sqlglot.expressions", "qualname": "JSONExtract", "kind": "class", "doc": "

\n", "bases": "Binary, Func"}, "sqlglot.expressions.JSONExtractScalar": {"fullname": "sqlglot.expressions.JSONExtractScalar", "modulename": "sqlglot.expressions", "qualname": "JSONExtractScalar", "kind": "class", "doc": "

\n", "bases": "JSONExtract"}, "sqlglot.expressions.JSONBExtract": {"fullname": "sqlglot.expressions.JSONBExtract", "modulename": "sqlglot.expressions", "qualname": "JSONBExtract", "kind": "class", "doc": "

\n", "bases": "JSONExtract"}, "sqlglot.expressions.JSONBExtractScalar": {"fullname": "sqlglot.expressions.JSONBExtractScalar", "modulename": "sqlglot.expressions", "qualname": "JSONBExtractScalar", "kind": "class", "doc": "

\n", "bases": "JSONExtract"}, "sqlglot.expressions.JSONFormat": {"fullname": "sqlglot.expressions.JSONFormat", "modulename": "sqlglot.expressions", "qualname": "JSONFormat", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Least": {"fullname": "sqlglot.expressions.Least", "modulename": "sqlglot.expressions", "qualname": "Least", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Left": {"fullname": "sqlglot.expressions.Left", "modulename": "sqlglot.expressions", "qualname": "Left", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Right": {"fullname": "sqlglot.expressions.Right", "modulename": "sqlglot.expressions", "qualname": "Right", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Length": {"fullname": "sqlglot.expressions.Length", "modulename": "sqlglot.expressions", "qualname": "Length", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Levenshtein": {"fullname": "sqlglot.expressions.Levenshtein", "modulename": "sqlglot.expressions", "qualname": "Levenshtein", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Ln": {"fullname": "sqlglot.expressions.Ln", "modulename": "sqlglot.expressions", "qualname": "Ln", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Log": {"fullname": "sqlglot.expressions.Log", "modulename": "sqlglot.expressions", "qualname": "Log", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Log2": {"fullname": "sqlglot.expressions.Log2", "modulename": "sqlglot.expressions", "qualname": "Log2", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Log10": {"fullname": "sqlglot.expressions.Log10", "modulename": "sqlglot.expressions", "qualname": "Log10", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.LogicalOr": {"fullname": "sqlglot.expressions.LogicalOr", "modulename": "sqlglot.expressions", "qualname": "LogicalOr", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.LogicalAnd": {"fullname": "sqlglot.expressions.LogicalAnd", "modulename": "sqlglot.expressions", "qualname": "LogicalAnd", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Lower": {"fullname": "sqlglot.expressions.Lower", "modulename": "sqlglot.expressions", "qualname": "Lower", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Map": {"fullname": "sqlglot.expressions.Map", "modulename": "sqlglot.expressions", "qualname": "Map", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.StarMap": {"fullname": "sqlglot.expressions.StarMap", "modulename": "sqlglot.expressions", "qualname": "StarMap", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.VarMap": {"fullname": "sqlglot.expressions.VarMap", "modulename": "sqlglot.expressions", "qualname": "VarMap", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.MatchAgainst": {"fullname": "sqlglot.expressions.MatchAgainst", "modulename": "sqlglot.expressions", "qualname": "MatchAgainst", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Max": {"fullname": "sqlglot.expressions.Max", "modulename": "sqlglot.expressions", "qualname": "Max", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.MD5": {"fullname": "sqlglot.expressions.MD5", "modulename": "sqlglot.expressions", "qualname": "MD5", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Min": {"fullname": "sqlglot.expressions.Min", "modulename": "sqlglot.expressions", "qualname": "Min", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Month": {"fullname": "sqlglot.expressions.Month", "modulename": "sqlglot.expressions", "qualname": "Month", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Nvl2": {"fullname": "sqlglot.expressions.Nvl2", "modulename": "sqlglot.expressions", "qualname": "Nvl2", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Posexplode": {"fullname": "sqlglot.expressions.Posexplode", "modulename": "sqlglot.expressions", "qualname": "Posexplode", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Pow": {"fullname": "sqlglot.expressions.Pow", "modulename": "sqlglot.expressions", "qualname": "Pow", "kind": "class", "doc": "

\n", "bases": "Binary, Func"}, "sqlglot.expressions.PercentileCont": {"fullname": "sqlglot.expressions.PercentileCont", "modulename": "sqlglot.expressions", "qualname": "PercentileCont", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.PercentileDisc": {"fullname": "sqlglot.expressions.PercentileDisc", "modulename": "sqlglot.expressions", "qualname": "PercentileDisc", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Quantile": {"fullname": "sqlglot.expressions.Quantile", "modulename": "sqlglot.expressions", "qualname": "Quantile", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.ApproxQuantile": {"fullname": "sqlglot.expressions.ApproxQuantile", "modulename": "sqlglot.expressions", "qualname": "ApproxQuantile", "kind": "class", "doc": "

\n", "bases": "Quantile"}, "sqlglot.expressions.RangeN": {"fullname": "sqlglot.expressions.RangeN", "modulename": "sqlglot.expressions", "qualname": "RangeN", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ReadCSV": {"fullname": "sqlglot.expressions.ReadCSV", "modulename": "sqlglot.expressions", "qualname": "ReadCSV", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Reduce": {"fullname": "sqlglot.expressions.Reduce", "modulename": "sqlglot.expressions", "qualname": "Reduce", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.RegexpExtract": {"fullname": "sqlglot.expressions.RegexpExtract", "modulename": "sqlglot.expressions", "qualname": "RegexpExtract", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.RegexpLike": {"fullname": "sqlglot.expressions.RegexpLike", "modulename": "sqlglot.expressions", "qualname": "RegexpLike", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.RegexpILike": {"fullname": "sqlglot.expressions.RegexpILike", "modulename": "sqlglot.expressions", "qualname": "RegexpILike", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.RegexpSplit": {"fullname": "sqlglot.expressions.RegexpSplit", "modulename": "sqlglot.expressions", "qualname": "RegexpSplit", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Repeat": {"fullname": "sqlglot.expressions.Repeat", "modulename": "sqlglot.expressions", "qualname": "Repeat", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Round": {"fullname": "sqlglot.expressions.Round", "modulename": "sqlglot.expressions", "qualname": "Round", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.RowNumber": {"fullname": "sqlglot.expressions.RowNumber", "modulename": "sqlglot.expressions", "qualname": "RowNumber", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.SafeDivide": {"fullname": "sqlglot.expressions.SafeDivide", "modulename": "sqlglot.expressions", "qualname": "SafeDivide", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.SetAgg": {"fullname": "sqlglot.expressions.SetAgg", "modulename": "sqlglot.expressions", "qualname": "SetAgg", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.SHA": {"fullname": "sqlglot.expressions.SHA", "modulename": "sqlglot.expressions", "qualname": "SHA", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.SHA2": {"fullname": "sqlglot.expressions.SHA2", "modulename": "sqlglot.expressions", "qualname": "SHA2", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.SortArray": {"fullname": "sqlglot.expressions.SortArray", "modulename": "sqlglot.expressions", "qualname": "SortArray", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Split": {"fullname": "sqlglot.expressions.Split", "modulename": "sqlglot.expressions", "qualname": "Split", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Substring": {"fullname": "sqlglot.expressions.Substring", "modulename": "sqlglot.expressions", "qualname": "Substring", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.StandardHash": {"fullname": "sqlglot.expressions.StandardHash", "modulename": "sqlglot.expressions", "qualname": "StandardHash", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.StrPosition": {"fullname": "sqlglot.expressions.StrPosition", "modulename": "sqlglot.expressions", "qualname": "StrPosition", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.StrToDate": {"fullname": "sqlglot.expressions.StrToDate", "modulename": "sqlglot.expressions", "qualname": "StrToDate", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.StrToTime": {"fullname": "sqlglot.expressions.StrToTime", "modulename": "sqlglot.expressions", "qualname": "StrToTime", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.StrToUnix": {"fullname": "sqlglot.expressions.StrToUnix", "modulename": "sqlglot.expressions", "qualname": "StrToUnix", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.NumberToStr": {"fullname": "sqlglot.expressions.NumberToStr", "modulename": "sqlglot.expressions", "qualname": "NumberToStr", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Struct": {"fullname": "sqlglot.expressions.Struct", "modulename": "sqlglot.expressions", "qualname": "Struct", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.StructExtract": {"fullname": "sqlglot.expressions.StructExtract", "modulename": "sqlglot.expressions", "qualname": "StructExtract", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Sum": {"fullname": "sqlglot.expressions.Sum", "modulename": "sqlglot.expressions", "qualname": "Sum", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Sqrt": {"fullname": "sqlglot.expressions.Sqrt", "modulename": "sqlglot.expressions", "qualname": "Sqrt", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Stddev": {"fullname": "sqlglot.expressions.Stddev", "modulename": "sqlglot.expressions", "qualname": "Stddev", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.StddevPop": {"fullname": "sqlglot.expressions.StddevPop", "modulename": "sqlglot.expressions", "qualname": "StddevPop", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.StddevSamp": {"fullname": "sqlglot.expressions.StddevSamp", "modulename": "sqlglot.expressions", "qualname": "StddevSamp", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.TimeToStr": {"fullname": "sqlglot.expressions.TimeToStr", "modulename": "sqlglot.expressions", "qualname": "TimeToStr", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TimeToTimeStr": {"fullname": "sqlglot.expressions.TimeToTimeStr", "modulename": "sqlglot.expressions", "qualname": "TimeToTimeStr", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TimeToUnix": {"fullname": "sqlglot.expressions.TimeToUnix", "modulename": "sqlglot.expressions", "qualname": "TimeToUnix", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TimeStrToDate": {"fullname": "sqlglot.expressions.TimeStrToDate", "modulename": "sqlglot.expressions", "qualname": "TimeStrToDate", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TimeStrToTime": {"fullname": "sqlglot.expressions.TimeStrToTime", "modulename": "sqlglot.expressions", "qualname": "TimeStrToTime", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TimeStrToUnix": {"fullname": "sqlglot.expressions.TimeStrToUnix", "modulename": "sqlglot.expressions", "qualname": "TimeStrToUnix", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Trim": {"fullname": "sqlglot.expressions.Trim", "modulename": "sqlglot.expressions", "qualname": "Trim", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TsOrDsAdd": {"fullname": "sqlglot.expressions.TsOrDsAdd", "modulename": "sqlglot.expressions", "qualname": "TsOrDsAdd", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.TsOrDsToDateStr": {"fullname": "sqlglot.expressions.TsOrDsToDateStr", "modulename": "sqlglot.expressions", "qualname": "TsOrDsToDateStr", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TsOrDsToDate": {"fullname": "sqlglot.expressions.TsOrDsToDate", "modulename": "sqlglot.expressions", "qualname": "TsOrDsToDate", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TsOrDiToDi": {"fullname": "sqlglot.expressions.TsOrDiToDi", "modulename": "sqlglot.expressions", "qualname": "TsOrDiToDi", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Unhex": {"fullname": "sqlglot.expressions.Unhex", "modulename": "sqlglot.expressions", "qualname": "Unhex", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.UnixToStr": {"fullname": "sqlglot.expressions.UnixToStr", "modulename": "sqlglot.expressions", "qualname": "UnixToStr", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.UnixToTime": {"fullname": "sqlglot.expressions.UnixToTime", "modulename": "sqlglot.expressions", "qualname": "UnixToTime", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.UnixToTimeStr": {"fullname": "sqlglot.expressions.UnixToTimeStr", "modulename": "sqlglot.expressions", "qualname": "UnixToTimeStr", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Upper": {"fullname": "sqlglot.expressions.Upper", "modulename": "sqlglot.expressions", "qualname": "Upper", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Variance": {"fullname": "sqlglot.expressions.Variance", "modulename": "sqlglot.expressions", "qualname": "Variance", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.VariancePop": {"fullname": "sqlglot.expressions.VariancePop", "modulename": "sqlglot.expressions", "qualname": "VariancePop", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Week": {"fullname": "sqlglot.expressions.Week", "modulename": "sqlglot.expressions", "qualname": "Week", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.XMLTable": {"fullname": "sqlglot.expressions.XMLTable", "modulename": "sqlglot.expressions", "qualname": "XMLTable", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Year": {"fullname": "sqlglot.expressions.Year", "modulename": "sqlglot.expressions", "qualname": "Year", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Use": {"fullname": "sqlglot.expressions.Use", "modulename": "sqlglot.expressions", "qualname": "Use", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Merge": {"fullname": "sqlglot.expressions.Merge", "modulename": "sqlglot.expressions", "qualname": "Merge", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.When": {"fullname": "sqlglot.expressions.When", "modulename": "sqlglot.expressions", "qualname": "When", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.NextValueFor": {"fullname": "sqlglot.expressions.NextValueFor", "modulename": "sqlglot.expressions", "qualname": "NextValueFor", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.maybe_parse": {"fullname": "sqlglot.expressions.maybe_parse", "modulename": "sqlglot.expressions", "qualname": "maybe_parse", "kind": "function", "doc": "

Gracefully handle a possible string or expression.

\n\n
Example:
\n\n
\n
\n
>>> maybe_parse("1")\n(LITERAL this: 1, is_string: False)\n>>> maybe_parse(to_identifier("x"))\n(IDENTIFIER this: x, quoted: False)\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • sql_or_expression: the SQL code string or an expression
  • \n
  • into: the SQLGlot Expression to parse into
  • \n
  • dialect: the dialect used to parse the input expressions (in the case that an\ninput expression is a SQL string).
  • \n
  • prefix: a string to prefix the sql with before it gets parsed\n(automatically includes a space)
  • \n
  • copy: whether or not to copy the expression.
  • \n
  • **opts: other options to use to parse the input expressions (again, in the case\nthat an input expression is a SQL string).
  • \n
\n\n
Returns:
\n\n
\n

Expression: the parsed or given expression.

\n
\n", "signature": "(\tsql_or_expression: Union[str, sqlglot.expressions.Expression],\t*,\tinto: Union[str, Type[sqlglot.expressions.Expression], Collection[Union[str, Type[sqlglot.expressions.Expression]]], NoneType] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tprefix: Optional[str] = None,\tcopy: bool = False,\t**opts) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.expressions.union": {"fullname": "sqlglot.expressions.union", "modulename": "sqlglot.expressions", "qualname": "union", "kind": "function", "doc": "

Initializes a syntax tree from one UNION expression.

\n\n
Example:
\n\n
\n
\n
>>> union("SELECT * FROM foo", "SELECT * FROM bla").sql()\n'SELECT * FROM foo UNION SELECT * FROM bla'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • left: the SQL code string corresponding to the left-hand side.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • right: the SQL code string corresponding to the right-hand side.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • distinct: set the DISTINCT flag if and only if this is true.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The new Union instance.

\n
\n", "signature": "(\tleft: Union[str, sqlglot.expressions.Expression],\tright: Union[str, sqlglot.expressions.Expression],\tdistinct: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> sqlglot.expressions.Union:", "funcdef": "def"}, "sqlglot.expressions.intersect": {"fullname": "sqlglot.expressions.intersect", "modulename": "sqlglot.expressions", "qualname": "intersect", "kind": "function", "doc": "

Initializes a syntax tree from one INTERSECT expression.

\n\n
Example:
\n\n
\n
\n
>>> intersect("SELECT * FROM foo", "SELECT * FROM bla").sql()\n'SELECT * FROM foo INTERSECT SELECT * FROM bla'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • left: the SQL code string corresponding to the left-hand side.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • right: the SQL code string corresponding to the right-hand side.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • distinct: set the DISTINCT flag if and only if this is true.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The new Intersect instance.

\n
\n", "signature": "(\tleft: Union[str, sqlglot.expressions.Expression],\tright: Union[str, sqlglot.expressions.Expression],\tdistinct: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> sqlglot.expressions.Intersect:", "funcdef": "def"}, "sqlglot.expressions.except_": {"fullname": "sqlglot.expressions.except_", "modulename": "sqlglot.expressions", "qualname": "except_", "kind": "function", "doc": "

Initializes a syntax tree from one EXCEPT expression.

\n\n
Example:
\n\n
\n
\n
>>> except_("SELECT * FROM foo", "SELECT * FROM bla").sql()\n'SELECT * FROM foo EXCEPT SELECT * FROM bla'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • left: the SQL code string corresponding to the left-hand side.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • right: the SQL code string corresponding to the right-hand side.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • distinct: set the DISTINCT flag if and only if this is true.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The new Except instance.

\n
\n", "signature": "(\tleft: Union[str, sqlglot.expressions.Expression],\tright: Union[str, sqlglot.expressions.Expression],\tdistinct: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> sqlglot.expressions.Except:", "funcdef": "def"}, "sqlglot.expressions.select": {"fullname": "sqlglot.expressions.select", "modulename": "sqlglot.expressions", "qualname": "select", "kind": "function", "doc": "

Initializes a syntax tree from one or multiple SELECT expressions.

\n\n
Example:
\n\n
\n
\n
>>> select("col1", "col2").from_("tbl").sql()\n'SELECT col1, col2 FROM tbl'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code string to parse as the expressions of a\nSELECT statement. If an Expression instance is passed, this is used as-is.
  • \n
  • dialect: the dialect used to parse the input expressions (in the case that an\ninput expression is a SQL string).
  • \n
  • **opts: other options to use to parse the input expressions (again, in the case\nthat an input expression is a SQL string).
  • \n
\n\n
Returns:
\n\n
\n

Select: the syntax tree for the SELECT statement.

\n
\n", "signature": "(\t*expressions: Union[str, sqlglot.expressions.Expression],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.from_": {"fullname": "sqlglot.expressions.from_", "modulename": "sqlglot.expressions", "qualname": "from_", "kind": "function", "doc": "

Initializes a syntax tree from a FROM expression.

\n\n
Example:
\n\n
\n
\n
>>> from_("tbl").select("col1", "col2").sql()\n'SELECT col1, col2 FROM tbl'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expression: the SQL code string to parse as the FROM expressions of a\nSELECT statement. If an Expression instance is passed, this is used as-is.
  • \n
  • dialect: the dialect used to parse the input expression (in the case that the\ninput expression is a SQL string).
  • \n
  • **opts: other options to use to parse the input expressions (again, in the case\nthat the input expression is a SQL string).
  • \n
\n\n
Returns:
\n\n
\n

Select: the syntax tree for the SELECT statement.

\n
\n", "signature": "(\texpression: Union[str, sqlglot.expressions.Expression],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.update": {"fullname": "sqlglot.expressions.update", "modulename": "sqlglot.expressions", "qualname": "update", "kind": "function", "doc": "

Creates an update statement.

\n\n
Example:
\n\n
\n
\n
>>> update("my_table", {"x": 1, "y": "2", "z": None}, from_="baz", where="id > 1").sql()\n"UPDATE my_table SET x = 1, y = '2', z = NULL FROM baz WHERE id > 1"\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *properties: dictionary of properties to set which are\nauto converted to sql objects eg None -> NULL
  • \n
  • where: sql conditional parsed into a WHERE statement
  • \n
  • from_: sql statement parsed into a FROM statement
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • **opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Update: the syntax tree for the UPDATE statement.

\n
\n", "signature": "(\ttable: str | sqlglot.expressions.Table,\tproperties: dict,\twhere: Union[str, sqlglot.expressions.Expression, NoneType] = None,\tfrom_: Union[str, sqlglot.expressions.Expression, NoneType] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> sqlglot.expressions.Update:", "funcdef": "def"}, "sqlglot.expressions.delete": {"fullname": "sqlglot.expressions.delete", "modulename": "sqlglot.expressions", "qualname": "delete", "kind": "function", "doc": "

Builds a delete statement.

\n\n
Example:
\n\n
\n
\n
>>> delete("my_table", where="id > 1").sql()\n'DELETE FROM my_table WHERE id > 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • where: sql conditional parsed into a WHERE statement
  • \n
  • returning: sql conditional parsed into a RETURNING statement
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • **opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Delete: the syntax tree for the DELETE statement.

\n
\n", "signature": "(\ttable: Union[str, sqlglot.expressions.Expression],\twhere: Union[str, sqlglot.expressions.Expression, NoneType] = None,\treturning: Union[str, sqlglot.expressions.Expression, NoneType] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> sqlglot.expressions.Delete:", "funcdef": "def"}, "sqlglot.expressions.insert": {"fullname": "sqlglot.expressions.insert", "modulename": "sqlglot.expressions", "qualname": "insert", "kind": "function", "doc": "

Builds an INSERT statement.

\n\n
Example:
\n\n
\n
\n
>>> insert("VALUES (1, 2, 3)", "tbl").sql()\n'INSERT INTO tbl VALUES (1, 2, 3)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the sql string or expression of the INSERT statement
  • \n
  • into: the tbl to insert data to.
  • \n
  • columns: optionally the table's column names.
  • \n
  • overwrite: whether to INSERT OVERWRITE or not.
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • copy: whether or not to copy the expression.
  • \n
  • **opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Insert: the syntax tree for the INSERT statement.

\n
\n", "signature": "(\texpression: Union[str, sqlglot.expressions.Expression],\tinto: Union[str, sqlglot.expressions.Expression],\tcolumns: Optional[Sequence[Union[str, sqlglot.expressions.Expression]]] = None,\toverwrite: Optional[bool] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Insert:", "funcdef": "def"}, "sqlglot.expressions.condition": {"fullname": "sqlglot.expressions.condition", "modulename": "sqlglot.expressions", "qualname": "condition", "kind": "function", "doc": "

Initialize a logical condition expression.

\n\n
Example:
\n\n
\n
\n
>>> condition("x=1").sql()\n'x = 1'\n
\n
\n \n

This is helpful for composing larger logical syntax trees:

\n \n
\n
>>> where = condition("x=1")\n>>> where = where.and_("y=1")\n>>> Select().from_("tbl").select("*").where(where).sql()\n'SELECT * FROM tbl WHERE x = 1 AND y = 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expression: the SQL code string to parse.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • dialect: the dialect used to parse the input expression (in the case that the\ninput expression is a SQL string).
  • \n
  • copy: Whether or not to copy expression (only applies to expressions).
  • \n
  • **opts: other options to use to parse the input expressions (again, in the case\nthat the input expression is a SQL string).
  • \n
\n\n
Returns:
\n\n
\n

The new Condition instance

\n
\n", "signature": "(\texpression: Union[str, sqlglot.expressions.Expression],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Condition:", "funcdef": "def"}, "sqlglot.expressions.and_": {"fullname": "sqlglot.expressions.and_", "modulename": "sqlglot.expressions", "qualname": "and_", "kind": "function", "doc": "

Combine multiple conditions with an AND logical operator.

\n\n
Example:
\n\n
\n
\n
>>> and_("x=1", and_("y=1", "z=1")).sql()\n'x = 1 AND (y = 1 AND z = 1)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: whether or not to copy expressions (only applies to Expressions).
  • \n
  • **opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

And: the new condition

\n
\n", "signature": "(\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Condition:", "funcdef": "def"}, "sqlglot.expressions.or_": {"fullname": "sqlglot.expressions.or_", "modulename": "sqlglot.expressions", "qualname": "or_", "kind": "function", "doc": "

Combine multiple conditions with an OR logical operator.

\n\n
Example:
\n\n
\n
\n
>>> or_("x=1", or_("y=1", "z=1")).sql()\n'x = 1 OR (y = 1 OR z = 1)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: whether or not to copy expressions (only applies to Expressions).
  • \n
  • **opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Or: the new condition

\n
\n", "signature": "(\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Condition:", "funcdef": "def"}, "sqlglot.expressions.not_": {"fullname": "sqlglot.expressions.not_", "modulename": "sqlglot.expressions", "qualname": "not_", "kind": "function", "doc": "

Wrap a condition with a NOT operator.

\n\n
Example:
\n\n
\n
\n
>>> not_("this_suit='black'").sql()\n"NOT this_suit = 'black'"\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code string to parse.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: whether to copy the expression or not.
  • \n
  • **opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The new condition.

\n
\n", "signature": "(\texpression: Union[str, sqlglot.expressions.Expression],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Not:", "funcdef": "def"}, "sqlglot.expressions.paren": {"fullname": "sqlglot.expressions.paren", "modulename": "sqlglot.expressions", "qualname": "paren", "kind": "function", "doc": "

Wrap an expression in parentheses.

\n\n
Example:
\n\n
\n
\n
>>> paren("5 + 3").sql()\n'(5 + 3)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code string to parse.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • copy: whether to copy the expression or not.
  • \n
\n\n
Returns:
\n\n
\n

The wrapped expression.

\n
\n", "signature": "(\texpression: Union[str, sqlglot.expressions.Expression],\tcopy: bool = True) -> sqlglot.expressions.Paren:", "funcdef": "def"}, "sqlglot.expressions.to_identifier": {"fullname": "sqlglot.expressions.to_identifier", "modulename": "sqlglot.expressions", "qualname": "to_identifier", "kind": "function", "doc": "

Builds an identifier.

\n\n
Arguments:
\n\n
    \n
  • name: The name to turn into an identifier.
  • \n
  • quoted: Whether or not force quote the identifier.
  • \n
  • copy: Whether or not to copy a passed in Identefier node.
  • \n
\n\n
Returns:
\n\n
\n

The identifier ast node.

\n
\n", "signature": "(name, quoted=None, copy=True):", "funcdef": "def"}, "sqlglot.expressions.to_interval": {"fullname": "sqlglot.expressions.to_interval", "modulename": "sqlglot.expressions", "qualname": "to_interval", "kind": "function", "doc": "

Builds an interval expression from a string like '1 day' or '5 months'.

\n", "signature": "(\tinterval: str | sqlglot.expressions.Literal) -> sqlglot.expressions.Interval:", "funcdef": "def"}, "sqlglot.expressions.to_table": {"fullname": "sqlglot.expressions.to_table", "modulename": "sqlglot.expressions", "qualname": "to_table", "kind": "function", "doc": "

Create a table expression from a [catalog].[schema].[table] sql path. Catalog and schema are optional.\nIf a table is passed in then that table is returned.

\n\n
Arguments:
\n\n
    \n
  • sql_path: a [catalog].[schema].[table] string.
  • \n
  • dialect: the source dialect according to which the table name will be parsed.
  • \n
  • kwargs: the kwargs to instantiate the resulting Table expression with.
  • \n
\n\n
Returns:
\n\n
\n

A table expression.

\n
\n", "signature": "(\tsql_path: Union[str, sqlglot.expressions.Table, NoneType],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**kwargs) -> Optional[sqlglot.expressions.Table]:", "funcdef": "def"}, "sqlglot.expressions.to_column": {"fullname": "sqlglot.expressions.to_column", "modulename": "sqlglot.expressions", "qualname": "to_column", "kind": "function", "doc": "

Create a column from a [table].[column] sql path. Schema is optional.

\n\n

If a column is passed in then that column is returned.

\n\n
Arguments:
\n\n
    \n
  • sql_path: [table].[column] string
  • \n
\n\n
Returns:
\n\n
\n

Table: A column expression

\n
\n", "signature": "(\tsql_path: str | sqlglot.expressions.Column,\t**kwargs) -> sqlglot.expressions.Column:", "funcdef": "def"}, "sqlglot.expressions.alias_": {"fullname": "sqlglot.expressions.alias_", "modulename": "sqlglot.expressions", "qualname": "alias_", "kind": "function", "doc": "

Create an Alias expression.

\n\n
Example:
\n\n
\n
\n
>>> alias_('foo', 'bar').sql()\n'foo AS bar'\n
\n
\n \n
\n
>>> alias_('(select 1, 2)', 'bar', table=['a', 'b']).sql()\n'(SELECT 1, 2) AS bar(a, b)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code strings to parse.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • alias: the alias name to use. If the name has\nspecial characters it is quoted.
  • \n
  • table: Whether or not to create a table alias, can also be a list of columns.
  • \n
  • quoted: whether or not to quote the alias
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: Whether or not to copy the expression.
  • \n
  • **opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Alias: the aliased expression

\n
\n", "signature": "(\texpression: Union[str, sqlglot.expressions.Expression],\talias: str | sqlglot.expressions.Identifier,\ttable: Union[bool, Sequence[str | sqlglot.expressions.Identifier]] = False,\tquoted: Optional[bool] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts):", "funcdef": "def"}, "sqlglot.expressions.subquery": {"fullname": "sqlglot.expressions.subquery", "modulename": "sqlglot.expressions", "qualname": "subquery", "kind": "function", "doc": "

Build a subquery expression.

\n\n
Example:
\n\n
\n
\n
>>> subquery('select x from tbl', 'bar').select('x').sql()\n'SELECT x FROM (SELECT x FROM tbl) AS bar'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code strings to parse.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • alias: the alias name to use.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • **opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

A new Select instance with the subquery expression included.

\n
\n", "signature": "(\texpression: Union[str, sqlglot.expressions.Expression],\talias: Union[sqlglot.expressions.Identifier, str, NoneType] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.column": {"fullname": "sqlglot.expressions.column", "modulename": "sqlglot.expressions", "qualname": "column", "kind": "function", "doc": "

Build a Column.

\n\n
Arguments:
\n\n
    \n
  • col: Column name.
  • \n
  • table: Table name.
  • \n
  • db: Database name.
  • \n
  • catalog: Catalog name.
  • \n
  • quoted: Whether to force quotes on the column's identifiers.
  • \n
\n\n
Returns:
\n\n
\n

The new Column instance.

\n
\n", "signature": "(\tcol: str | sqlglot.expressions.Identifier,\ttable: Union[sqlglot.expressions.Identifier, str, NoneType] = None,\tdb: Union[sqlglot.expressions.Identifier, str, NoneType] = None,\tcatalog: Union[sqlglot.expressions.Identifier, str, NoneType] = None,\tquoted: Optional[bool] = None) -> sqlglot.expressions.Column:", "funcdef": "def"}, "sqlglot.expressions.cast": {"fullname": "sqlglot.expressions.cast", "modulename": "sqlglot.expressions", "qualname": "cast", "kind": "function", "doc": "

Cast an expression to a data type.

\n\n
Example:
\n\n
\n
\n
>>> cast('x + 1', 'int').sql()\n'CAST(x + 1 AS INT)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: The expression to cast.
  • \n
  • to: The datatype to cast to.
  • \n
\n\n
Returns:
\n\n
\n

The new Cast instance.

\n
\n", "signature": "(\texpression: Union[str, sqlglot.expressions.Expression],\tto: str | sqlglot.expressions.DataType | sqlglot.expressions.DataType.Type,\t**opts) -> sqlglot.expressions.Cast:", "funcdef": "def"}, "sqlglot.expressions.table_": {"fullname": "sqlglot.expressions.table_", "modulename": "sqlglot.expressions", "qualname": "table_", "kind": "function", "doc": "

Build a Table.

\n\n
Arguments:
\n\n
    \n
  • table: Table name.
  • \n
  • db: Database name.
  • \n
  • catalog: Catalog name.
  • \n
  • quote: Whether to force quotes on the table's identifiers.
  • \n
  • alias: Table's alias.
  • \n
\n\n
Returns:
\n\n
\n

The new Table instance.

\n
\n", "signature": "(\ttable: sqlglot.expressions.Identifier | str,\tdb: Union[sqlglot.expressions.Identifier, str, NoneType] = None,\tcatalog: Union[sqlglot.expressions.Identifier, str, NoneType] = None,\tquoted: Optional[bool] = None,\talias: Union[sqlglot.expressions.Identifier, str, NoneType] = None) -> sqlglot.expressions.Table:", "funcdef": "def"}, "sqlglot.expressions.values": {"fullname": "sqlglot.expressions.values", "modulename": "sqlglot.expressions", "qualname": "values", "kind": "function", "doc": "

Build VALUES statement.

\n\n
Example:
\n\n
\n
\n
>>> values([(1, '2')]).sql()\n"VALUES (1, '2')"\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • values: values statements that will be converted to SQL
  • \n
  • alias: optional alias
  • \n
  • columns: Optional list of ordered column names or ordered dictionary of column names to types.\nIf either are provided then an alias is also required.
  • \n
\n\n
Returns:
\n\n
\n

Values: the Values expression object

\n
\n", "signature": "(\tvalues: Iterable[Tuple[Any, ...]],\talias: Optional[str] = None,\tcolumns: Union[Iterable[str], Dict[str, sqlglot.expressions.DataType], NoneType] = None) -> sqlglot.expressions.Values:", "funcdef": "def"}, "sqlglot.expressions.var": {"fullname": "sqlglot.expressions.var", "modulename": "sqlglot.expressions", "qualname": "var", "kind": "function", "doc": "

Build a SQL variable.

\n\n
Example:
\n\n
\n
\n
>>> repr(var('x'))\n'(VAR this: x)'\n
\n
\n \n
\n
>>> repr(var(column('x', table='y')))\n'(VAR this: x)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • name: The name of the var or an expression who's name will become the var.
  • \n
\n\n
Returns:
\n\n
\n

The new variable node.

\n
\n", "signature": "(\tname: Union[str, sqlglot.expressions.Expression, NoneType]) -> sqlglot.expressions.Var:", "funcdef": "def"}, "sqlglot.expressions.rename_table": {"fullname": "sqlglot.expressions.rename_table", "modulename": "sqlglot.expressions", "qualname": "rename_table", "kind": "function", "doc": "

Build ALTER TABLE... RENAME... expression

\n\n
Arguments:
\n\n
    \n
  • old_name: The old name of the table
  • \n
  • new_name: The new name of the table
  • \n
\n\n
Returns:
\n\n
\n

Alter table expression

\n
\n", "signature": "(\told_name: str | sqlglot.expressions.Table,\tnew_name: str | sqlglot.expressions.Table) -> sqlglot.expressions.AlterTable:", "funcdef": "def"}, "sqlglot.expressions.convert": {"fullname": "sqlglot.expressions.convert", "modulename": "sqlglot.expressions", "qualname": "convert", "kind": "function", "doc": "

Convert a python value into an expression object.

\n\n

Raises an error if a conversion is not possible.

\n\n
Arguments:
\n\n
    \n
  • value: A python object.
  • \n
  • copy: Whether or not to copy value (only applies to Expressions and collections).
  • \n
\n\n
Returns:
\n\n
\n

Expression: the equivalent expression object.

\n
\n", "signature": "(value: Any, copy: bool = False) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.expressions.replace_children": {"fullname": "sqlglot.expressions.replace_children", "modulename": "sqlglot.expressions", "qualname": "replace_children", "kind": "function", "doc": "

Replace children of an expression with the result of a lambda fun(child) -> exp.

\n", "signature": "(\texpression: sqlglot.expressions.Expression,\tfun: Callable,\t*args,\t**kwargs) -> None:", "funcdef": "def"}, "sqlglot.expressions.column_table_names": {"fullname": "sqlglot.expressions.column_table_names", "modulename": "sqlglot.expressions", "qualname": "column_table_names", "kind": "function", "doc": "

Return all table names referenced through columns in an expression.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> column_table_names(sqlglot.parse_one("a.b AND c.d AND c.e"))\n['c', 'a']\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: expression to find table names.
  • \n
\n\n
Returns:
\n\n
\n

A list of unique names.

\n
\n", "signature": "(expression: sqlglot.expressions.Expression) -> List[str]:", "funcdef": "def"}, "sqlglot.expressions.table_name": {"fullname": "sqlglot.expressions.table_name", "modulename": "sqlglot.expressions", "qualname": "table_name", "kind": "function", "doc": "

Get the full name of a table as a string.

\n\n
Arguments:
\n\n
    \n
  • table: table expression node or string.
  • \n
\n\n
Examples:
\n\n
\n
\n
>>> from sqlglot import exp, parse_one\n>>> table_name(parse_one("select * from a.b.c").find(exp.Table))\n'a.b.c'\n
\n
\n
\n\n
Returns:
\n\n
\n

The table name.

\n
\n", "signature": "(table: sqlglot.expressions.Table | str) -> str:", "funcdef": "def"}, "sqlglot.expressions.replace_tables": {"fullname": "sqlglot.expressions.replace_tables", "modulename": "sqlglot.expressions", "qualname": "replace_tables", "kind": "function", "doc": "

Replace all tables in expression according to the mapping.

\n\n
Arguments:
\n\n
    \n
  • expression: expression node to be transformed and replaced.
  • \n
  • mapping: mapping of table names.
  • \n
\n\n
Examples:
\n\n
\n
\n
>>> from sqlglot import exp, parse_one\n>>> replace_tables(parse_one("select * from a.b"), {"a.b": "c"}).sql()\n'SELECT * FROM c'\n
\n
\n
\n\n
Returns:
\n\n
\n

The mapped expression.

\n
\n", "signature": "(expression: ~E, mapping: Dict[str, str]) -> ~E:", "funcdef": "def"}, "sqlglot.expressions.replace_placeholders": {"fullname": "sqlglot.expressions.replace_placeholders", "modulename": "sqlglot.expressions", "qualname": "replace_placeholders", "kind": "function", "doc": "

Replace placeholders in an expression.

\n\n
Arguments:
\n\n
    \n
  • expression: expression node to be transformed and replaced.
  • \n
  • args: positional names that will substitute unnamed placeholders in the given order.
  • \n
  • kwargs: keyword arguments that will substitute named placeholders.
  • \n
\n\n
Examples:
\n\n
\n
\n
>>> from sqlglot import exp, parse_one\n>>> replace_placeholders(\n...     parse_one("select * from :tbl where ? = ?"),\n...     exp.to_identifier("str_col"), "b", tbl=exp.to_identifier("foo")\n... ).sql()\n"SELECT * FROM foo WHERE str_col = 'b'"\n
\n
\n
\n\n
Returns:
\n\n
\n

The mapped expression.

\n
\n", "signature": "(\texpression: sqlglot.expressions.Expression,\t*args,\t**kwargs) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.expressions.expand": {"fullname": "sqlglot.expressions.expand", "modulename": "sqlglot.expressions", "qualname": "expand", "kind": "function", "doc": "

Transforms an expression by expanding all referenced sources into subqueries.

\n\n
Examples:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y")}).sql()\n'SELECT * FROM (SELECT * FROM y) AS z /* source: x */'\n
\n
\n \n
\n
>>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y"), "y": parse_one("select * from z")}).sql()\n'SELECT * FROM (SELECT * FROM (SELECT * FROM z) AS y /* source: y */) AS z /* source: x */'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: The expression to expand.
  • \n
  • sources: A dictionary of name to Subqueryables.
  • \n
  • copy: Whether or not to copy the expression during transformation. Defaults to True.
  • \n
\n\n
Returns:
\n\n
\n

The transformed expression.

\n
\n", "signature": "(\texpression: sqlglot.expressions.Expression,\tsources: Dict[str, sqlglot.expressions.Subqueryable],\tcopy: bool = True) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.expressions.func": {"fullname": "sqlglot.expressions.func", "modulename": "sqlglot.expressions", "qualname": "func", "kind": "function", "doc": "

Returns a Func expression.

\n\n
Examples:
\n\n
\n
\n
>>> func("abs", 5).sql()\n'ABS(5)'\n
\n
\n \n
\n
>>> func("cast", this=5, to=DataType.build("DOUBLE")).sql()\n'CAST(5 AS DOUBLE)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • name: the name of the function to build.
  • \n
  • args: the args used to instantiate the function of interest.
  • \n
  • dialect: the source dialect.
  • \n
  • kwargs: the kwargs used to instantiate the function of interest.
  • \n
\n\n
Note:
\n\n
\n

The arguments args and kwargs are mutually exclusive.

\n
\n\n
Returns:
\n\n
\n

An instance of the function of interest, or an anonymous function, if name doesn't\n correspond to an existing sqlglot.expressions.Func class.

\n
\n", "signature": "(\tname: str,\t*args,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**kwargs) -> sqlglot.expressions.Func:", "funcdef": "def"}, "sqlglot.expressions.true": {"fullname": "sqlglot.expressions.true", "modulename": "sqlglot.expressions", "qualname": "true", "kind": "function", "doc": "

Returns a true Boolean expression.

\n", "signature": "() -> sqlglot.expressions.Boolean:", "funcdef": "def"}, "sqlglot.expressions.false": {"fullname": "sqlglot.expressions.false", "modulename": "sqlglot.expressions", "qualname": "false", "kind": "function", "doc": "

Returns a false Boolean expression.

\n", "signature": "() -> sqlglot.expressions.Boolean:", "funcdef": "def"}, "sqlglot.expressions.null": {"fullname": "sqlglot.expressions.null", "modulename": "sqlglot.expressions", "qualname": "null", "kind": "function", "doc": "

Returns a Null expression.

\n", "signature": "() -> sqlglot.expressions.Null:", "funcdef": "def"}, "sqlglot.generator": {"fullname": "sqlglot.generator", "modulename": "sqlglot.generator", "kind": "module", "doc": "

\n"}, "sqlglot.generator.Generator": {"fullname": "sqlglot.generator.Generator", "modulename": "sqlglot.generator", "qualname": "Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • \n
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • \n
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • \n
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • \n
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • \n
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • \n
  • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
  • \n
  • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
  • \n
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n"}, "sqlglot.generator.Generator.__init__": {"fullname": "sqlglot.generator.Generator.__init__", "modulename": "sqlglot.generator", "qualname": "Generator.__init__", "kind": "function", "doc": "

\n", "signature": "(\ttime_mapping=None,\ttime_trie=None,\tpretty=None,\tquote_start=None,\tquote_end=None,\tidentifier_start=None,\tidentifier_end=None,\tbit_start=None,\tbit_end=None,\thex_start=None,\thex_end=None,\tbyte_start=None,\tbyte_end=None,\traw_start=None,\traw_end=None,\tidentify=False,\tnormalize=False,\tstring_escape=None,\tidentifier_escape=None,\tpad=2,\tindent=2,\tindex_offset=0,\tunnest_column_only=False,\talias_post_tablesample=False,\tidentifiers_can_start_with_digit=False,\tnormalize_functions='upper',\tunsupported_level=<ErrorLevel.WARN: 'WARN'>,\tnull_ordering=None,\tmax_unsupported=3,\tleading_comma=False,\tmax_text_width=80,\tcomments=True)"}, "sqlglot.generator.Generator.generate": {"fullname": "sqlglot.generator.Generator.generate", "modulename": "sqlglot.generator", "qualname": "Generator.generate", "kind": "function", "doc": "

Generates a SQL string by interpreting the given syntax tree.

\n\n

Args\n expression: the syntax tree.\n cache: an optional sql string cache. this leverages the hash of an expression which is slow, so only use this if you set _hash on each node.

\n\n

Returns\n the SQL string.

\n", "signature": "(\tself,\texpression: Optional[sqlglot.expressions.Expression],\tcache: Optional[Dict[int, str]] = None) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.unsupported": {"fullname": "sqlglot.generator.Generator.unsupported", "modulename": "sqlglot.generator", "qualname": "Generator.unsupported", "kind": "function", "doc": "

\n", "signature": "(self, message: str) -> None:", "funcdef": "def"}, "sqlglot.generator.Generator.sep": {"fullname": "sqlglot.generator.Generator.sep", "modulename": "sqlglot.generator", "qualname": "Generator.sep", "kind": "function", "doc": "

\n", "signature": "(self, sep: str = ' ') -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.seg": {"fullname": "sqlglot.generator.Generator.seg", "modulename": "sqlglot.generator", "qualname": "Generator.seg", "kind": "function", "doc": "

\n", "signature": "(self, sql: str, sep: str = ' ') -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.pad_comment": {"fullname": "sqlglot.generator.Generator.pad_comment", "modulename": "sqlglot.generator", "qualname": "Generator.pad_comment", "kind": "function", "doc": "

\n", "signature": "(self, comment: str) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.maybe_comment": {"fullname": "sqlglot.generator.Generator.maybe_comment", "modulename": "sqlglot.generator", "qualname": "Generator.maybe_comment", "kind": "function", "doc": "

\n", "signature": "(\tself,\tsql: str,\texpression: Optional[sqlglot.expressions.Expression] = None,\tcomments: Optional[List[str]] = None) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.wrap": {"fullname": "sqlglot.generator.Generator.wrap", "modulename": "sqlglot.generator", "qualname": "Generator.wrap", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Expression | str) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.no_identify": {"fullname": "sqlglot.generator.Generator.no_identify", "modulename": "sqlglot.generator", "qualname": "Generator.no_identify", "kind": "function", "doc": "

\n", "signature": "(self, func: Callable[..., str], *args, **kwargs) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.normalize_func": {"fullname": "sqlglot.generator.Generator.normalize_func", "modulename": "sqlglot.generator", "qualname": "Generator.normalize_func", "kind": "function", "doc": "

\n", "signature": "(self, name: str) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.indent": {"fullname": "sqlglot.generator.Generator.indent", "modulename": "sqlglot.generator", "qualname": "Generator.indent", "kind": "function", "doc": "

\n", "signature": "(\tself,\tsql: str,\tlevel: int = 0,\tpad: Optional[int] = None,\tskip_first: bool = False,\tskip_last: bool = False) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.sql": {"fullname": "sqlglot.generator.Generator.sql", "modulename": "sqlglot.generator", "qualname": "Generator.sql", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: Union[str, sqlglot.expressions.Expression, NoneType],\tkey: Optional[str] = None,\tcomment: bool = True) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.uncache_sql": {"fullname": "sqlglot.generator.Generator.uncache_sql", "modulename": "sqlglot.generator", "qualname": "Generator.uncache_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Uncache) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.cache_sql": {"fullname": "sqlglot.generator.Generator.cache_sql", "modulename": "sqlglot.generator", "qualname": "Generator.cache_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Cache) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.characterset_sql": {"fullname": "sqlglot.generator.Generator.characterset_sql", "modulename": "sqlglot.generator", "qualname": "Generator.characterset_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.CharacterSet) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.column_sql": {"fullname": "sqlglot.generator.Generator.column_sql", "modulename": "sqlglot.generator", "qualname": "Generator.column_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Column) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.columnposition_sql": {"fullname": "sqlglot.generator.Generator.columnposition_sql", "modulename": "sqlglot.generator", "qualname": "Generator.columnposition_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ColumnPosition) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.columndef_sql": {"fullname": "sqlglot.generator.Generator.columndef_sql", "modulename": "sqlglot.generator", "qualname": "Generator.columndef_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ColumnDef, sep: str = ' ') -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.columnconstraint_sql": {"fullname": "sqlglot.generator.Generator.columnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.columnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"fullname": "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.autoincrementcolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, _) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"fullname": "sqlglot.generator.Generator.compresscolumnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.compresscolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.CompressColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"fullname": "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.generatedasidentitycolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"fullname": "sqlglot.generator.Generator.notnullcolumnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.notnullcolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.NotNullColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"fullname": "sqlglot.generator.Generator.primarykeycolumnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.primarykeycolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.PrimaryKeyColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"fullname": "sqlglot.generator.Generator.uniquecolumnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.uniquecolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.UniqueColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.create_sql": {"fullname": "sqlglot.generator.Generator.create_sql", "modulename": "sqlglot.generator", "qualname": "Generator.create_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Create) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.clone_sql": {"fullname": "sqlglot.generator.Generator.clone_sql", "modulename": "sqlglot.generator", "qualname": "Generator.clone_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Clone) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.describe_sql": {"fullname": "sqlglot.generator.Generator.describe_sql", "modulename": "sqlglot.generator", "qualname": "Generator.describe_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Describe) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.prepend_ctes": {"fullname": "sqlglot.generator.Generator.prepend_ctes", "modulename": "sqlglot.generator", "qualname": "Generator.prepend_ctes", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Expression, sql: str) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.with_sql": {"fullname": "sqlglot.generator.Generator.with_sql", "modulename": "sqlglot.generator", "qualname": "Generator.with_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.With) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.cte_sql": {"fullname": "sqlglot.generator.Generator.cte_sql", "modulename": "sqlglot.generator", "qualname": "Generator.cte_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.CTE) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.tablealias_sql": {"fullname": "sqlglot.generator.Generator.tablealias_sql", "modulename": "sqlglot.generator", "qualname": "Generator.tablealias_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.TableAlias) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bitstring_sql": {"fullname": "sqlglot.generator.Generator.bitstring_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bitstring_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.BitString) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.hexstring_sql": {"fullname": "sqlglot.generator.Generator.hexstring_sql", "modulename": "sqlglot.generator", "qualname": "Generator.hexstring_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.HexString) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bytestring_sql": {"fullname": "sqlglot.generator.Generator.bytestring_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bytestring_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ByteString) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.rawstring_sql": {"fullname": "sqlglot.generator.Generator.rawstring_sql", "modulename": "sqlglot.generator", "qualname": "Generator.rawstring_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.RawString) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.datatypesize_sql": {"fullname": "sqlglot.generator.Generator.datatypesize_sql", "modulename": "sqlglot.generator", "qualname": "Generator.datatypesize_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DataTypeSize) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.datatype_sql": {"fullname": "sqlglot.generator.Generator.datatype_sql", "modulename": "sqlglot.generator", "qualname": "Generator.datatype_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DataType) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.directory_sql": {"fullname": "sqlglot.generator.Generator.directory_sql", "modulename": "sqlglot.generator", "qualname": "Generator.directory_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Directory) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.delete_sql": {"fullname": "sqlglot.generator.Generator.delete_sql", "modulename": "sqlglot.generator", "qualname": "Generator.delete_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Delete) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.drop_sql": {"fullname": "sqlglot.generator.Generator.drop_sql", "modulename": "sqlglot.generator", "qualname": "Generator.drop_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Drop) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.except_sql": {"fullname": "sqlglot.generator.Generator.except_sql", "modulename": "sqlglot.generator", "qualname": "Generator.except_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Except) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.except_op": {"fullname": "sqlglot.generator.Generator.except_op", "modulename": "sqlglot.generator", "qualname": "Generator.except_op", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Except) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.fetch_sql": {"fullname": "sqlglot.generator.Generator.fetch_sql", "modulename": "sqlglot.generator", "qualname": "Generator.fetch_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Fetch) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.filter_sql": {"fullname": "sqlglot.generator.Generator.filter_sql", "modulename": "sqlglot.generator", "qualname": "Generator.filter_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Filter) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.hint_sql": {"fullname": "sqlglot.generator.Generator.hint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.hint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Hint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.index_sql": {"fullname": "sqlglot.generator.Generator.index_sql", "modulename": "sqlglot.generator", "qualname": "Generator.index_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Index) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.identifier_sql": {"fullname": "sqlglot.generator.Generator.identifier_sql", "modulename": "sqlglot.generator", "qualname": "Generator.identifier_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Identifier) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.inputoutputformat_sql": {"fullname": "sqlglot.generator.Generator.inputoutputformat_sql", "modulename": "sqlglot.generator", "qualname": "Generator.inputoutputformat_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.InputOutputFormat) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.national_sql": {"fullname": "sqlglot.generator.Generator.national_sql", "modulename": "sqlglot.generator", "qualname": "Generator.national_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.National, prefix: str = 'N') -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.partition_sql": {"fullname": "sqlglot.generator.Generator.partition_sql", "modulename": "sqlglot.generator", "qualname": "Generator.partition_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Partition) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.properties_sql": {"fullname": "sqlglot.generator.Generator.properties_sql", "modulename": "sqlglot.generator", "qualname": "Generator.properties_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Properties) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.root_properties": {"fullname": "sqlglot.generator.Generator.root_properties", "modulename": "sqlglot.generator", "qualname": "Generator.root_properties", "kind": "function", "doc": "

\n", "signature": "(self, properties: sqlglot.expressions.Properties) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.properties": {"fullname": "sqlglot.generator.Generator.properties", "modulename": "sqlglot.generator", "qualname": "Generator.properties", "kind": "function", "doc": "

\n", "signature": "(\tself,\tproperties: sqlglot.expressions.Properties,\tprefix: str = '',\tsep: str = ', ',\tsuffix: str = '',\twrapped: bool = True) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.with_properties": {"fullname": "sqlglot.generator.Generator.with_properties", "modulename": "sqlglot.generator", "qualname": "Generator.with_properties", "kind": "function", "doc": "

\n", "signature": "(self, properties: sqlglot.expressions.Properties) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.locate_properties": {"fullname": "sqlglot.generator.Generator.locate_properties", "modulename": "sqlglot.generator", "qualname": "Generator.locate_properties", "kind": "function", "doc": "

\n", "signature": "(\tself,\tproperties: sqlglot.expressions.Properties) -> Dict[sqlglot.expressions.Properties.Location, list[sqlglot.expressions.Property]]:", "funcdef": "def"}, "sqlglot.generator.Generator.property_sql": {"fullname": "sqlglot.generator.Generator.property_sql", "modulename": "sqlglot.generator", "qualname": "Generator.property_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Property) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.likeproperty_sql": {"fullname": "sqlglot.generator.Generator.likeproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.likeproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.LikeProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.fallbackproperty_sql": {"fullname": "sqlglot.generator.Generator.fallbackproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.fallbackproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.FallbackProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.journalproperty_sql": {"fullname": "sqlglot.generator.Generator.journalproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.journalproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.JournalProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.freespaceproperty_sql": {"fullname": "sqlglot.generator.Generator.freespaceproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.freespaceproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.FreespaceProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.checksumproperty_sql": {"fullname": "sqlglot.generator.Generator.checksumproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.checksumproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ChecksumProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"fullname": "sqlglot.generator.Generator.mergeblockratioproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.mergeblockratioproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.MergeBlockRatioProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"fullname": "sqlglot.generator.Generator.datablocksizeproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.datablocksizeproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DataBlocksizeProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"fullname": "sqlglot.generator.Generator.blockcompressionproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.blockcompressionproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.BlockCompressionProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"fullname": "sqlglot.generator.Generator.isolatedloadingproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.isolatedloadingproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.IsolatedLoadingProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.lockingproperty_sql": {"fullname": "sqlglot.generator.Generator.lockingproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.lockingproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.LockingProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.withdataproperty_sql": {"fullname": "sqlglot.generator.Generator.withdataproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.withdataproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.WithDataProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.insert_sql": {"fullname": "sqlglot.generator.Generator.insert_sql", "modulename": "sqlglot.generator", "qualname": "Generator.insert_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Insert) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.intersect_sql": {"fullname": "sqlglot.generator.Generator.intersect_sql", "modulename": "sqlglot.generator", "qualname": "Generator.intersect_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Intersect) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.intersect_op": {"fullname": "sqlglot.generator.Generator.intersect_op", "modulename": "sqlglot.generator", "qualname": "Generator.intersect_op", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Intersect) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.introducer_sql": {"fullname": "sqlglot.generator.Generator.introducer_sql", "modulename": "sqlglot.generator", "qualname": "Generator.introducer_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Introducer) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.pseudotype_sql": {"fullname": "sqlglot.generator.Generator.pseudotype_sql", "modulename": "sqlglot.generator", "qualname": "Generator.pseudotype_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.PseudoType) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.onconflict_sql": {"fullname": "sqlglot.generator.Generator.onconflict_sql", "modulename": "sqlglot.generator", "qualname": "Generator.onconflict_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.OnConflict) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.returning_sql": {"fullname": "sqlglot.generator.Generator.returning_sql", "modulename": "sqlglot.generator", "qualname": "Generator.returning_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Returning) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"fullname": "sqlglot.generator.Generator.rowformatdelimitedproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.rowformatdelimitedproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.RowFormatDelimitedProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.table_sql": {"fullname": "sqlglot.generator.Generator.table_sql", "modulename": "sqlglot.generator", "qualname": "Generator.table_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Table, sep: str = ' AS ') -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.tablesample_sql": {"fullname": "sqlglot.generator.Generator.tablesample_sql", "modulename": "sqlglot.generator", "qualname": "Generator.tablesample_sql", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: sqlglot.expressions.TableSample,\tseed_prefix: str = 'SEED',\tsep=' AS ') -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.pivot_sql": {"fullname": "sqlglot.generator.Generator.pivot_sql", "modulename": "sqlglot.generator", "qualname": "Generator.pivot_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Pivot) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.tuple_sql": {"fullname": "sqlglot.generator.Generator.tuple_sql", "modulename": "sqlglot.generator", "qualname": "Generator.tuple_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Tuple) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.update_sql": {"fullname": "sqlglot.generator.Generator.update_sql", "modulename": "sqlglot.generator", "qualname": "Generator.update_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Update) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.values_sql": {"fullname": "sqlglot.generator.Generator.values_sql", "modulename": "sqlglot.generator", "qualname": "Generator.values_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Values) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.var_sql": {"fullname": "sqlglot.generator.Generator.var_sql", "modulename": "sqlglot.generator", "qualname": "Generator.var_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Var) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.into_sql": {"fullname": "sqlglot.generator.Generator.into_sql", "modulename": "sqlglot.generator", "qualname": "Generator.into_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Into) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.from_sql": {"fullname": "sqlglot.generator.Generator.from_sql", "modulename": "sqlglot.generator", "qualname": "Generator.from_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.From) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.group_sql": {"fullname": "sqlglot.generator.Generator.group_sql", "modulename": "sqlglot.generator", "qualname": "Generator.group_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Group) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.having_sql": {"fullname": "sqlglot.generator.Generator.having_sql", "modulename": "sqlglot.generator", "qualname": "Generator.having_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Having) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.join_sql": {"fullname": "sqlglot.generator.Generator.join_sql", "modulename": "sqlglot.generator", "qualname": "Generator.join_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Join) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.lambda_sql": {"fullname": "sqlglot.generator.Generator.lambda_sql", "modulename": "sqlglot.generator", "qualname": "Generator.lambda_sql", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: sqlglot.expressions.Lambda,\tarrow_sep: str = '->') -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.lateral_sql": {"fullname": "sqlglot.generator.Generator.lateral_sql", "modulename": "sqlglot.generator", "qualname": "Generator.lateral_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Lateral) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.limit_sql": {"fullname": "sqlglot.generator.Generator.limit_sql", "modulename": "sqlglot.generator", "qualname": "Generator.limit_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Limit) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.offset_sql": {"fullname": "sqlglot.generator.Generator.offset_sql", "modulename": "sqlglot.generator", "qualname": "Generator.offset_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Offset) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.setitem_sql": {"fullname": "sqlglot.generator.Generator.setitem_sql", "modulename": "sqlglot.generator", "qualname": "Generator.setitem_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.SetItem) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.set_sql": {"fullname": "sqlglot.generator.Generator.set_sql", "modulename": "sqlglot.generator", "qualname": "Generator.set_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Set) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.pragma_sql": {"fullname": "sqlglot.generator.Generator.pragma_sql", "modulename": "sqlglot.generator", "qualname": "Generator.pragma_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Pragma) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.lock_sql": {"fullname": "sqlglot.generator.Generator.lock_sql", "modulename": "sqlglot.generator", "qualname": "Generator.lock_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Lock) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.literal_sql": {"fullname": "sqlglot.generator.Generator.literal_sql", "modulename": "sqlglot.generator", "qualname": "Generator.literal_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Literal) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.loaddata_sql": {"fullname": "sqlglot.generator.Generator.loaddata_sql", "modulename": "sqlglot.generator", "qualname": "Generator.loaddata_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.LoadData) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.null_sql": {"fullname": "sqlglot.generator.Generator.null_sql", "modulename": "sqlglot.generator", "qualname": "Generator.null_sql", "kind": "function", "doc": "

\n", "signature": "(self, *_) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.boolean_sql": {"fullname": "sqlglot.generator.Generator.boolean_sql", "modulename": "sqlglot.generator", "qualname": "Generator.boolean_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Boolean) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.order_sql": {"fullname": "sqlglot.generator.Generator.order_sql", "modulename": "sqlglot.generator", "qualname": "Generator.order_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Order, flat: bool = False) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.cluster_sql": {"fullname": "sqlglot.generator.Generator.cluster_sql", "modulename": "sqlglot.generator", "qualname": "Generator.cluster_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Cluster) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.distribute_sql": {"fullname": "sqlglot.generator.Generator.distribute_sql", "modulename": "sqlglot.generator", "qualname": "Generator.distribute_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Distribute) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.sort_sql": {"fullname": "sqlglot.generator.Generator.sort_sql", "modulename": "sqlglot.generator", "qualname": "Generator.sort_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Sort) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.ordered_sql": {"fullname": "sqlglot.generator.Generator.ordered_sql", "modulename": "sqlglot.generator", "qualname": "Generator.ordered_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Ordered) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.matchrecognize_sql": {"fullname": "sqlglot.generator.Generator.matchrecognize_sql", "modulename": "sqlglot.generator", "qualname": "Generator.matchrecognize_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.MatchRecognize) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.query_modifiers": {"fullname": "sqlglot.generator.Generator.query_modifiers", "modulename": "sqlglot.generator", "qualname": "Generator.query_modifiers", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Expression, *sqls: str) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.after_having_modifiers": {"fullname": "sqlglot.generator.Generator.after_having_modifiers", "modulename": "sqlglot.generator", "qualname": "Generator.after_having_modifiers", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Expression) -> List[str]:", "funcdef": "def"}, "sqlglot.generator.Generator.after_limit_modifiers": {"fullname": "sqlglot.generator.Generator.after_limit_modifiers", "modulename": "sqlglot.generator", "qualname": "Generator.after_limit_modifiers", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Expression) -> List[str]:", "funcdef": "def"}, "sqlglot.generator.Generator.select_sql": {"fullname": "sqlglot.generator.Generator.select_sql", "modulename": "sqlglot.generator", "qualname": "Generator.select_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Select) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.schema_sql": {"fullname": "sqlglot.generator.Generator.schema_sql", "modulename": "sqlglot.generator", "qualname": "Generator.schema_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Schema) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.star_sql": {"fullname": "sqlglot.generator.Generator.star_sql", "modulename": "sqlglot.generator", "qualname": "Generator.star_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Star) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.parameter_sql": {"fullname": "sqlglot.generator.Generator.parameter_sql", "modulename": "sqlglot.generator", "qualname": "Generator.parameter_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Parameter) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.sessionparameter_sql": {"fullname": "sqlglot.generator.Generator.sessionparameter_sql", "modulename": "sqlglot.generator", "qualname": "Generator.sessionparameter_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.SessionParameter) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.placeholder_sql": {"fullname": "sqlglot.generator.Generator.placeholder_sql", "modulename": "sqlglot.generator", "qualname": "Generator.placeholder_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Placeholder) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.subquery_sql": {"fullname": "sqlglot.generator.Generator.subquery_sql", "modulename": "sqlglot.generator", "qualname": "Generator.subquery_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Subquery, sep: str = ' AS ') -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.qualify_sql": {"fullname": "sqlglot.generator.Generator.qualify_sql", "modulename": "sqlglot.generator", "qualname": "Generator.qualify_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Qualify) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.union_sql": {"fullname": "sqlglot.generator.Generator.union_sql", "modulename": "sqlglot.generator", "qualname": "Generator.union_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Union) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.union_op": {"fullname": "sqlglot.generator.Generator.union_op", "modulename": "sqlglot.generator", "qualname": "Generator.union_op", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Union) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.unnest_sql": {"fullname": "sqlglot.generator.Generator.unnest_sql", "modulename": "sqlglot.generator", "qualname": "Generator.unnest_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Unnest) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.where_sql": {"fullname": "sqlglot.generator.Generator.where_sql", "modulename": "sqlglot.generator", "qualname": "Generator.where_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Where) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.window_sql": {"fullname": "sqlglot.generator.Generator.window_sql", "modulename": "sqlglot.generator", "qualname": "Generator.window_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Window) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.partition_by_sql": {"fullname": "sqlglot.generator.Generator.partition_by_sql", "modulename": "sqlglot.generator", "qualname": "Generator.partition_by_sql", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: sqlglot.expressions.Window | sqlglot.expressions.MatchRecognize) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.windowspec_sql": {"fullname": "sqlglot.generator.Generator.windowspec_sql", "modulename": "sqlglot.generator", "qualname": "Generator.windowspec_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.WindowSpec) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.withingroup_sql": {"fullname": "sqlglot.generator.Generator.withingroup_sql", "modulename": "sqlglot.generator", "qualname": "Generator.withingroup_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.WithinGroup) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.between_sql": {"fullname": "sqlglot.generator.Generator.between_sql", "modulename": "sqlglot.generator", "qualname": "Generator.between_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Between) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bracket_sql": {"fullname": "sqlglot.generator.Generator.bracket_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bracket_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Bracket) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.all_sql": {"fullname": "sqlglot.generator.Generator.all_sql", "modulename": "sqlglot.generator", "qualname": "Generator.all_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.All) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.any_sql": {"fullname": "sqlglot.generator.Generator.any_sql", "modulename": "sqlglot.generator", "qualname": "Generator.any_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Any) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.exists_sql": {"fullname": "sqlglot.generator.Generator.exists_sql", "modulename": "sqlglot.generator", "qualname": "Generator.exists_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Exists) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.case_sql": {"fullname": "sqlglot.generator.Generator.case_sql", "modulename": "sqlglot.generator", "qualname": "Generator.case_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Case) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.constraint_sql": {"fullname": "sqlglot.generator.Generator.constraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.constraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Constraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.nextvaluefor_sql": {"fullname": "sqlglot.generator.Generator.nextvaluefor_sql", "modulename": "sqlglot.generator", "qualname": "Generator.nextvaluefor_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.NextValueFor) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.extract_sql": {"fullname": "sqlglot.generator.Generator.extract_sql", "modulename": "sqlglot.generator", "qualname": "Generator.extract_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Extract) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.trim_sql": {"fullname": "sqlglot.generator.Generator.trim_sql", "modulename": "sqlglot.generator", "qualname": "Generator.trim_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Trim) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.concat_sql": {"fullname": "sqlglot.generator.Generator.concat_sql", "modulename": "sqlglot.generator", "qualname": "Generator.concat_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Concat) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.check_sql": {"fullname": "sqlglot.generator.Generator.check_sql", "modulename": "sqlglot.generator", "qualname": "Generator.check_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Check) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.foreignkey_sql": {"fullname": "sqlglot.generator.Generator.foreignkey_sql", "modulename": "sqlglot.generator", "qualname": "Generator.foreignkey_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ForeignKey) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.primarykey_sql": {"fullname": "sqlglot.generator.Generator.primarykey_sql", "modulename": "sqlglot.generator", "qualname": "Generator.primarykey_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ForeignKey) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.if_sql": {"fullname": "sqlglot.generator.Generator.if_sql", "modulename": "sqlglot.generator", "qualname": "Generator.if_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.If) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.matchagainst_sql": {"fullname": "sqlglot.generator.Generator.matchagainst_sql", "modulename": "sqlglot.generator", "qualname": "Generator.matchagainst_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.MatchAgainst) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"fullname": "sqlglot.generator.Generator.jsonkeyvalue_sql", "modulename": "sqlglot.generator", "qualname": "Generator.jsonkeyvalue_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.JSONKeyValue) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.jsonobject_sql": {"fullname": "sqlglot.generator.Generator.jsonobject_sql", "modulename": "sqlglot.generator", "qualname": "Generator.jsonobject_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.JSONObject) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"fullname": "sqlglot.generator.Generator.openjsoncolumndef_sql", "modulename": "sqlglot.generator", "qualname": "Generator.openjsoncolumndef_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.OpenJSONColumnDef) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.openjson_sql": {"fullname": "sqlglot.generator.Generator.openjson_sql", "modulename": "sqlglot.generator", "qualname": "Generator.openjson_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.OpenJSON) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.in_sql": {"fullname": "sqlglot.generator.Generator.in_sql", "modulename": "sqlglot.generator", "qualname": "Generator.in_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.In) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.in_unnest_op": {"fullname": "sqlglot.generator.Generator.in_unnest_op", "modulename": "sqlglot.generator", "qualname": "Generator.in_unnest_op", "kind": "function", "doc": "

\n", "signature": "(self, unnest: sqlglot.expressions.Unnest) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.interval_sql": {"fullname": "sqlglot.generator.Generator.interval_sql", "modulename": "sqlglot.generator", "qualname": "Generator.interval_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Interval) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.return_sql": {"fullname": "sqlglot.generator.Generator.return_sql", "modulename": "sqlglot.generator", "qualname": "Generator.return_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Return) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.reference_sql": {"fullname": "sqlglot.generator.Generator.reference_sql", "modulename": "sqlglot.generator", "qualname": "Generator.reference_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Reference) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.anonymous_sql": {"fullname": "sqlglot.generator.Generator.anonymous_sql", "modulename": "sqlglot.generator", "qualname": "Generator.anonymous_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Anonymous) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.paren_sql": {"fullname": "sqlglot.generator.Generator.paren_sql", "modulename": "sqlglot.generator", "qualname": "Generator.paren_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Paren) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.neg_sql": {"fullname": "sqlglot.generator.Generator.neg_sql", "modulename": "sqlglot.generator", "qualname": "Generator.neg_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Neg) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.not_sql": {"fullname": "sqlglot.generator.Generator.not_sql", "modulename": "sqlglot.generator", "qualname": "Generator.not_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Not) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.alias_sql": {"fullname": "sqlglot.generator.Generator.alias_sql", "modulename": "sqlglot.generator", "qualname": "Generator.alias_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Alias) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.aliases_sql": {"fullname": "sqlglot.generator.Generator.aliases_sql", "modulename": "sqlglot.generator", "qualname": "Generator.aliases_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Aliases) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.attimezone_sql": {"fullname": "sqlglot.generator.Generator.attimezone_sql", "modulename": "sqlglot.generator", "qualname": "Generator.attimezone_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.AtTimeZone) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.add_sql": {"fullname": "sqlglot.generator.Generator.add_sql", "modulename": "sqlglot.generator", "qualname": "Generator.add_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Add) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.and_sql": {"fullname": "sqlglot.generator.Generator.and_sql", "modulename": "sqlglot.generator", "qualname": "Generator.and_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.And) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.connector_sql": {"fullname": "sqlglot.generator.Generator.connector_sql", "modulename": "sqlglot.generator", "qualname": "Generator.connector_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Connector, op: str) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bitwiseand_sql": {"fullname": "sqlglot.generator.Generator.bitwiseand_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bitwiseand_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.BitwiseAnd) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"fullname": "sqlglot.generator.Generator.bitwiseleftshift_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bitwiseleftshift_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.BitwiseLeftShift) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bitwisenot_sql": {"fullname": "sqlglot.generator.Generator.bitwisenot_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bitwisenot_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.BitwiseNot) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bitwiseor_sql": {"fullname": "sqlglot.generator.Generator.bitwiseor_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bitwiseor_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.BitwiseOr) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"fullname": "sqlglot.generator.Generator.bitwiserightshift_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bitwiserightshift_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.BitwiseRightShift) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bitwisexor_sql": {"fullname": "sqlglot.generator.Generator.bitwisexor_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bitwisexor_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.BitwiseXor) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.cast_sql": {"fullname": "sqlglot.generator.Generator.cast_sql", "modulename": "sqlglot.generator", "qualname": "Generator.cast_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Cast) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.currentdate_sql": {"fullname": "sqlglot.generator.Generator.currentdate_sql", "modulename": "sqlglot.generator", "qualname": "Generator.currentdate_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.CurrentDate) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.collate_sql": {"fullname": "sqlglot.generator.Generator.collate_sql", "modulename": "sqlglot.generator", "qualname": "Generator.collate_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Collate) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.command_sql": {"fullname": "sqlglot.generator.Generator.command_sql", "modulename": "sqlglot.generator", "qualname": "Generator.command_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Command) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.comment_sql": {"fullname": "sqlglot.generator.Generator.comment_sql", "modulename": "sqlglot.generator", "qualname": "Generator.comment_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Comment) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"fullname": "sqlglot.generator.Generator.mergetreettlaction_sql", "modulename": "sqlglot.generator", "qualname": "Generator.mergetreettlaction_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.MergeTreeTTLAction) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.mergetreettl_sql": {"fullname": "sqlglot.generator.Generator.mergetreettl_sql", "modulename": "sqlglot.generator", "qualname": "Generator.mergetreettl_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.MergeTreeTTL) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.transaction_sql": {"fullname": "sqlglot.generator.Generator.transaction_sql", "modulename": "sqlglot.generator", "qualname": "Generator.transaction_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Transaction) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.commit_sql": {"fullname": "sqlglot.generator.Generator.commit_sql", "modulename": "sqlglot.generator", "qualname": "Generator.commit_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Commit) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.rollback_sql": {"fullname": "sqlglot.generator.Generator.rollback_sql", "modulename": "sqlglot.generator", "qualname": "Generator.rollback_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Rollback) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.altercolumn_sql": {"fullname": "sqlglot.generator.Generator.altercolumn_sql", "modulename": "sqlglot.generator", "qualname": "Generator.altercolumn_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.AlterColumn) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.renametable_sql": {"fullname": "sqlglot.generator.Generator.renametable_sql", "modulename": "sqlglot.generator", "qualname": "Generator.renametable_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.RenameTable) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.altertable_sql": {"fullname": "sqlglot.generator.Generator.altertable_sql", "modulename": "sqlglot.generator", "qualname": "Generator.altertable_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.AlterTable) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.droppartition_sql": {"fullname": "sqlglot.generator.Generator.droppartition_sql", "modulename": "sqlglot.generator", "qualname": "Generator.droppartition_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DropPartition) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.addconstraint_sql": {"fullname": "sqlglot.generator.Generator.addconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.addconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.AddConstraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.distinct_sql": {"fullname": "sqlglot.generator.Generator.distinct_sql", "modulename": "sqlglot.generator", "qualname": "Generator.distinct_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Distinct) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.ignorenulls_sql": {"fullname": "sqlglot.generator.Generator.ignorenulls_sql", "modulename": "sqlglot.generator", "qualname": "Generator.ignorenulls_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.IgnoreNulls) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.respectnulls_sql": {"fullname": "sqlglot.generator.Generator.respectnulls_sql", "modulename": "sqlglot.generator", "qualname": "Generator.respectnulls_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.RespectNulls) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.intdiv_sql": {"fullname": "sqlglot.generator.Generator.intdiv_sql", "modulename": "sqlglot.generator", "qualname": "Generator.intdiv_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.IntDiv) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.dpipe_sql": {"fullname": "sqlglot.generator.Generator.dpipe_sql", "modulename": "sqlglot.generator", "qualname": "Generator.dpipe_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DPipe) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.div_sql": {"fullname": "sqlglot.generator.Generator.div_sql", "modulename": "sqlglot.generator", "qualname": "Generator.div_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Div) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.overlaps_sql": {"fullname": "sqlglot.generator.Generator.overlaps_sql", "modulename": "sqlglot.generator", "qualname": "Generator.overlaps_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Overlaps) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.distance_sql": {"fullname": "sqlglot.generator.Generator.distance_sql", "modulename": "sqlglot.generator", "qualname": "Generator.distance_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Distance) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.dot_sql": {"fullname": "sqlglot.generator.Generator.dot_sql", "modulename": "sqlglot.generator", "qualname": "Generator.dot_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Dot) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.eq_sql": {"fullname": "sqlglot.generator.Generator.eq_sql", "modulename": "sqlglot.generator", "qualname": "Generator.eq_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.EQ) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.escape_sql": {"fullname": "sqlglot.generator.Generator.escape_sql", "modulename": "sqlglot.generator", "qualname": "Generator.escape_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Escape) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.glob_sql": {"fullname": "sqlglot.generator.Generator.glob_sql", "modulename": "sqlglot.generator", "qualname": "Generator.glob_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Glob) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.gt_sql": {"fullname": "sqlglot.generator.Generator.gt_sql", "modulename": "sqlglot.generator", "qualname": "Generator.gt_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.GT) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.gte_sql": {"fullname": "sqlglot.generator.Generator.gte_sql", "modulename": "sqlglot.generator", "qualname": "Generator.gte_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.GTE) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.ilike_sql": {"fullname": "sqlglot.generator.Generator.ilike_sql", "modulename": "sqlglot.generator", "qualname": "Generator.ilike_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ILike) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.ilikeany_sql": {"fullname": "sqlglot.generator.Generator.ilikeany_sql", "modulename": "sqlglot.generator", "qualname": "Generator.ilikeany_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ILikeAny) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.is_sql": {"fullname": "sqlglot.generator.Generator.is_sql", "modulename": "sqlglot.generator", "qualname": "Generator.is_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Is) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.like_sql": {"fullname": "sqlglot.generator.Generator.like_sql", "modulename": "sqlglot.generator", "qualname": "Generator.like_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Like) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.likeany_sql": {"fullname": "sqlglot.generator.Generator.likeany_sql", "modulename": "sqlglot.generator", "qualname": "Generator.likeany_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.LikeAny) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.similarto_sql": {"fullname": "sqlglot.generator.Generator.similarto_sql", "modulename": "sqlglot.generator", "qualname": "Generator.similarto_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.SimilarTo) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.lt_sql": {"fullname": "sqlglot.generator.Generator.lt_sql", "modulename": "sqlglot.generator", "qualname": "Generator.lt_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.LT) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.lte_sql": {"fullname": "sqlglot.generator.Generator.lte_sql", "modulename": "sqlglot.generator", "qualname": "Generator.lte_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.LTE) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.mod_sql": {"fullname": "sqlglot.generator.Generator.mod_sql", "modulename": "sqlglot.generator", "qualname": "Generator.mod_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Mod) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.mul_sql": {"fullname": "sqlglot.generator.Generator.mul_sql", "modulename": "sqlglot.generator", "qualname": "Generator.mul_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Mul) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.neq_sql": {"fullname": "sqlglot.generator.Generator.neq_sql", "modulename": "sqlglot.generator", "qualname": "Generator.neq_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.NEQ) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.nullsafeeq_sql": {"fullname": "sqlglot.generator.Generator.nullsafeeq_sql", "modulename": "sqlglot.generator", "qualname": "Generator.nullsafeeq_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.NullSafeEQ) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.nullsafeneq_sql": {"fullname": "sqlglot.generator.Generator.nullsafeneq_sql", "modulename": "sqlglot.generator", "qualname": "Generator.nullsafeneq_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.NullSafeNEQ) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.or_sql": {"fullname": "sqlglot.generator.Generator.or_sql", "modulename": "sqlglot.generator", "qualname": "Generator.or_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Or) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.slice_sql": {"fullname": "sqlglot.generator.Generator.slice_sql", "modulename": "sqlglot.generator", "qualname": "Generator.slice_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Slice) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.sub_sql": {"fullname": "sqlglot.generator.Generator.sub_sql", "modulename": "sqlglot.generator", "qualname": "Generator.sub_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Sub) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.trycast_sql": {"fullname": "sqlglot.generator.Generator.trycast_sql", "modulename": "sqlglot.generator", "qualname": "Generator.trycast_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.TryCast) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.use_sql": {"fullname": "sqlglot.generator.Generator.use_sql", "modulename": "sqlglot.generator", "qualname": "Generator.use_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Use) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.binary": {"fullname": "sqlglot.generator.Generator.binary", "modulename": "sqlglot.generator", "qualname": "Generator.binary", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Binary, op: str) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.function_fallback_sql": {"fullname": "sqlglot.generator.Generator.function_fallback_sql", "modulename": "sqlglot.generator", "qualname": "Generator.function_fallback_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Func) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.func": {"fullname": "sqlglot.generator.Generator.func", "modulename": "sqlglot.generator", "qualname": "Generator.func", "kind": "function", "doc": "

\n", "signature": "(\tself,\tname: str,\t*args: Union[str, sqlglot.expressions.Expression, NoneType]) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.format_args": {"fullname": "sqlglot.generator.Generator.format_args", "modulename": "sqlglot.generator", "qualname": "Generator.format_args", "kind": "function", "doc": "

\n", "signature": "(self, *args: Union[str, sqlglot.expressions.Expression, NoneType]) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.text_width": {"fullname": "sqlglot.generator.Generator.text_width", "modulename": "sqlglot.generator", "qualname": "Generator.text_width", "kind": "function", "doc": "

\n", "signature": "(self, args: Iterable) -> int:", "funcdef": "def"}, "sqlglot.generator.Generator.format_time": {"fullname": "sqlglot.generator.Generator.format_time", "modulename": "sqlglot.generator", "qualname": "Generator.format_time", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Expression) -> Optional[str]:", "funcdef": "def"}, "sqlglot.generator.Generator.expressions": {"fullname": "sqlglot.generator.Generator.expressions", "modulename": "sqlglot.generator", "qualname": "Generator.expressions", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: Optional[sqlglot.expressions.Expression] = None,\tkey: Optional[str] = None,\tsqls: Optional[List[str]] = None,\tflat: bool = False,\tindent: bool = True,\tsep: str = ', ',\tprefix: str = '') -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.op_expressions": {"fullname": "sqlglot.generator.Generator.op_expressions", "modulename": "sqlglot.generator", "qualname": "Generator.op_expressions", "kind": "function", "doc": "

\n", "signature": "(\tself,\top: str,\texpression: sqlglot.expressions.Expression,\tflat: bool = False) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.naked_property": {"fullname": "sqlglot.generator.Generator.naked_property", "modulename": "sqlglot.generator", "qualname": "Generator.naked_property", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Property) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.set_operation": {"fullname": "sqlglot.generator.Generator.set_operation", "modulename": "sqlglot.generator", "qualname": "Generator.set_operation", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Expression, op: str) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.tag_sql": {"fullname": "sqlglot.generator.Generator.tag_sql", "modulename": "sqlglot.generator", "qualname": "Generator.tag_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Tag) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.token_sql": {"fullname": "sqlglot.generator.Generator.token_sql", "modulename": "sqlglot.generator", "qualname": "Generator.token_sql", "kind": "function", "doc": "

\n", "signature": "(self, token_type: sqlglot.tokens.TokenType) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"fullname": "sqlglot.generator.Generator.userdefinedfunction_sql", "modulename": "sqlglot.generator", "qualname": "Generator.userdefinedfunction_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.UserDefinedFunction) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.joinhint_sql": {"fullname": "sqlglot.generator.Generator.joinhint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.joinhint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.JoinHint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.kwarg_sql": {"fullname": "sqlglot.generator.Generator.kwarg_sql", "modulename": "sqlglot.generator", "qualname": "Generator.kwarg_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Kwarg) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.when_sql": {"fullname": "sqlglot.generator.Generator.when_sql", "modulename": "sqlglot.generator", "qualname": "Generator.when_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.When) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.merge_sql": {"fullname": "sqlglot.generator.Generator.merge_sql", "modulename": "sqlglot.generator", "qualname": "Generator.merge_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Merge) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.tochar_sql": {"fullname": "sqlglot.generator.Generator.tochar_sql", "modulename": "sqlglot.generator", "qualname": "Generator.tochar_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ToChar) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.dictproperty_sql": {"fullname": "sqlglot.generator.Generator.dictproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.dictproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DictProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.dictrange_sql": {"fullname": "sqlglot.generator.Generator.dictrange_sql", "modulename": "sqlglot.generator", "qualname": "Generator.dictrange_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DictRange) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.dictsubproperty_sql": {"fullname": "sqlglot.generator.Generator.dictsubproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.dictsubproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DictSubProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.cached_generator": {"fullname": "sqlglot.generator.cached_generator", "modulename": "sqlglot.generator", "qualname": "cached_generator", "kind": "function", "doc": "

Returns a cached generator.

\n", "signature": "(\tcache: Optional[Dict[int, str]] = None) -> Callable[[sqlglot.expressions.Expression], str]:", "funcdef": "def"}, "sqlglot.helper": {"fullname": "sqlglot.helper", "modulename": "sqlglot.helper", "kind": "module", "doc": "

\n"}, "sqlglot.helper.AutoName": {"fullname": "sqlglot.helper.AutoName", "modulename": "sqlglot.helper", "qualname": "AutoName", "kind": "class", "doc": "

This is used for creating enum classes where auto() is the string form of the corresponding value's name.

\n", "bases": "enum.Enum"}, "sqlglot.helper.seq_get": {"fullname": "sqlglot.helper.seq_get", "modulename": "sqlglot.helper", "qualname": "seq_get", "kind": "function", "doc": "

Returns the value in seq at position index, or None if index is out of bounds.

\n", "signature": "(seq: Sequence[~T], index: int) -> Optional[~T]:", "funcdef": "def"}, "sqlglot.helper.ensure_list": {"fullname": "sqlglot.helper.ensure_list", "modulename": "sqlglot.helper", "qualname": "ensure_list", "kind": "function", "doc": "

Ensures that a value is a list, otherwise casts or wraps it into one.

\n\n
Arguments:
\n\n
    \n
  • value: the value of interest.
  • \n
\n\n
Returns:
\n\n
\n

The value cast as a list if it's a list or a tuple, or else the value wrapped in a list.

\n
\n", "signature": "(value):", "funcdef": "def"}, "sqlglot.helper.ensure_collection": {"fullname": "sqlglot.helper.ensure_collection", "modulename": "sqlglot.helper", "qualname": "ensure_collection", "kind": "function", "doc": "

Ensures that a value is a collection (excluding str and bytes), otherwise wraps it into a list.

\n\n
Arguments:
\n\n
    \n
  • value: the value of interest.
  • \n
\n\n
Returns:
\n\n
\n

The value if it's a collection, or else the value wrapped in a list.

\n
\n", "signature": "(value):", "funcdef": "def"}, "sqlglot.helper.csv": {"fullname": "sqlglot.helper.csv", "modulename": "sqlglot.helper", "qualname": "csv", "kind": "function", "doc": "

Formats any number of string arguments as CSV.

\n\n
Arguments:
\n\n
    \n
  • args: the string arguments to format.
  • \n
  • sep: the argument separator.
  • \n
\n\n
Returns:
\n\n
\n

The arguments formatted as a CSV string.

\n
\n", "signature": "(*args: str, sep: str = ', ') -> str:", "funcdef": "def"}, "sqlglot.helper.subclasses": {"fullname": "sqlglot.helper.subclasses", "modulename": "sqlglot.helper", "qualname": "subclasses", "kind": "function", "doc": "

Returns all subclasses for a collection of classes, possibly excluding some of them.

\n\n
Arguments:
\n\n
    \n
  • module_name: the name of the module to search for subclasses in.
  • \n
  • classes: class(es) we want to find the subclasses of.
  • \n
  • exclude: class(es) we want to exclude from the returned list.
  • \n
\n\n
Returns:
\n\n
\n

The target subclasses.

\n
\n", "signature": "(\tmodule_name: str,\tclasses: Union[Type, Tuple[Type, ...]],\texclude: Union[Type, Tuple[Type, ...]] = ()) -> List[Type]:", "funcdef": "def"}, "sqlglot.helper.apply_index_offset": {"fullname": "sqlglot.helper.apply_index_offset", "modulename": "sqlglot.helper", "qualname": "apply_index_offset", "kind": "function", "doc": "

Applies an offset to a given integer literal expression.

\n\n
Arguments:
\n\n
    \n
  • this: the target of the index
  • \n
  • expressions: the expression the offset will be applied to, wrapped in a list.
  • \n
  • offset: the offset that will be applied.
  • \n
\n\n
Returns:
\n\n
\n

The original expression with the offset applied to it, wrapped in a list. If the provided\n expressions argument contains more than one expressions, it's returned unaffected.

\n
\n", "signature": "(\tthis: sqlglot.expressions.Expression,\texpressions: List[Optional[~E]],\toffset: int) -> List[Optional[~E]]:", "funcdef": "def"}, "sqlglot.helper.camel_to_snake_case": {"fullname": "sqlglot.helper.camel_to_snake_case", "modulename": "sqlglot.helper", "qualname": "camel_to_snake_case", "kind": "function", "doc": "

Converts name from camelCase to snake_case and returns the result.

\n", "signature": "(name: str) -> str:", "funcdef": "def"}, "sqlglot.helper.while_changing": {"fullname": "sqlglot.helper.while_changing", "modulename": "sqlglot.helper", "qualname": "while_changing", "kind": "function", "doc": "

Applies a transformation to a given expression until a fix point is reached.

\n\n
Arguments:
\n\n
    \n
  • expression: the expression to be transformed.
  • \n
  • func: the transformation to be applied.
  • \n
\n\n
Returns:
\n\n
\n

The transformed expression.

\n
\n", "signature": "(\texpression: sqlglot.expressions.Expression,\tfunc: Callable[[sqlglot.expressions.Expression], ~E]) -> ~E:", "funcdef": "def"}, "sqlglot.helper.tsort": {"fullname": "sqlglot.helper.tsort", "modulename": "sqlglot.helper", "qualname": "tsort", "kind": "function", "doc": "

Sorts a given directed acyclic graph in topological order.

\n\n
Arguments:
\n\n
    \n
  • dag: the graph to be sorted.
  • \n
\n\n
Returns:
\n\n
\n

A list that contains all of the graph's nodes in topological order.

\n
\n", "signature": "(dag: Dict[~T, List[~T]]) -> List[~T]:", "funcdef": "def"}, "sqlglot.helper.open_file": {"fullname": "sqlglot.helper.open_file", "modulename": "sqlglot.helper", "qualname": "open_file", "kind": "function", "doc": "

Open a file that may be compressed as gzip and return it in universal newline mode.

\n", "signature": "(file_name: str) -> <class 'TextIO'>:", "funcdef": "def"}, "sqlglot.helper.csv_reader": {"fullname": "sqlglot.helper.csv_reader", "modulename": "sqlglot.helper", "qualname": "csv_reader", "kind": "function", "doc": "

Returns a csv reader given the expression READ_CSV(name, ['delimiter', '|', ...]).

\n\n
Arguments:
\n\n
    \n
  • read_csv: a ReadCSV function call
  • \n
\n\n
Yields:
\n\n
\n

A python csv reader.

\n
\n", "signature": "(read_csv: sqlglot.expressions.ReadCSV) -> Any:", "funcdef": "def"}, "sqlglot.helper.find_new_name": {"fullname": "sqlglot.helper.find_new_name", "modulename": "sqlglot.helper", "qualname": "find_new_name", "kind": "function", "doc": "

Searches for a new name.

\n\n
Arguments:
\n\n
    \n
  • taken: a collection of taken names.
  • \n
  • base: base name to alter.
  • \n
\n\n
Returns:
\n\n
\n

The new, available name.

\n
\n", "signature": "(taken: Collection[str], base: str) -> str:", "funcdef": "def"}, "sqlglot.helper.name_sequence": {"fullname": "sqlglot.helper.name_sequence", "modulename": "sqlglot.helper", "qualname": "name_sequence", "kind": "function", "doc": "

Returns a name generator given a prefix (e.g. a0, a1, a2, ... if the prefix is \"a\").

\n", "signature": "(prefix: str) -> Callable[[], str]:", "funcdef": "def"}, "sqlglot.helper.object_to_dict": {"fullname": "sqlglot.helper.object_to_dict", "modulename": "sqlglot.helper", "qualname": "object_to_dict", "kind": "function", "doc": "

Returns a dictionary created from an object's attributes.

\n", "signature": "(obj: Any, **kwargs) -> Dict:", "funcdef": "def"}, "sqlglot.helper.split_num_words": {"fullname": "sqlglot.helper.split_num_words", "modulename": "sqlglot.helper", "qualname": "split_num_words", "kind": "function", "doc": "

Perform a split on a value and return N words as a result with None used for words that don't exist.

\n\n
Arguments:
\n\n
    \n
  • value: the value to be split.
  • \n
  • sep: the value to use to split on.
  • \n
  • min_num_words: the minimum number of words that are going to be in the result.
  • \n
  • fill_from_start: indicates that if None values should be inserted at the start or end of the list.
  • \n
\n\n
Examples:
\n\n
\n
\n
>>> split_num_words("db.table", ".", 3)\n[None, 'db', 'table']\n>>> split_num_words("db.table", ".", 3, fill_from_start=False)\n['db', 'table', None]\n>>> split_num_words("db.table", ".", 1)\n['db', 'table']\n
\n
\n
\n\n
Returns:
\n\n
\n

The list of words returned by split, possibly augmented by a number of None values.

\n
\n", "signature": "(\tvalue: str,\tsep: str,\tmin_num_words: int,\tfill_from_start: bool = True) -> List[Optional[str]]:", "funcdef": "def"}, "sqlglot.helper.is_iterable": {"fullname": "sqlglot.helper.is_iterable", "modulename": "sqlglot.helper", "qualname": "is_iterable", "kind": "function", "doc": "

Checks if the value is an iterable, excluding the types str and bytes.

\n\n
Examples:
\n\n
\n
\n
>>> is_iterable([1,2])\nTrue\n>>> is_iterable("test")\nFalse\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • value: the value to check if it is an iterable.
  • \n
\n\n
Returns:
\n\n
\n

A bool value indicating if it is an iterable.

\n
\n", "signature": "(value: Any) -> bool:", "funcdef": "def"}, "sqlglot.helper.flatten": {"fullname": "sqlglot.helper.flatten", "modulename": "sqlglot.helper", "qualname": "flatten", "kind": "function", "doc": "

Flattens an iterable that can contain both iterable and non-iterable elements. Objects of\ntype str and bytes are not regarded as iterables.

\n\n
Examples:
\n\n
\n
\n
>>> list(flatten([[1, 2], 3, {4}, (5, "bla")]))\n[1, 2, 3, 4, 5, 'bla']\n>>> list(flatten([1, 2, 3]))\n[1, 2, 3]\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • values: the value to be flattened.
  • \n
\n\n
Yields:
\n\n
\n

Non-iterable elements in values.

\n
\n", "signature": "(values: Iterable[Union[Iterable[Any], Any]]) -> Iterator[Any]:", "funcdef": "def"}, "sqlglot.helper.dict_depth": {"fullname": "sqlglot.helper.dict_depth", "modulename": "sqlglot.helper", "qualname": "dict_depth", "kind": "function", "doc": "

Get the nesting depth of a dictionary.

\n\n
For example:
\n\n
\n
\n
>>> dict_depth(None)\n0\n>>> dict_depth({})\n1\n>>> dict_depth({"a": "b"})\n1\n>>> dict_depth({"a": {}})\n2\n>>> dict_depth({"a": {"b": {}}})\n3\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • d (dict): dictionary
  • \n
\n\n
Returns:
\n\n
\n

int: depth

\n
\n", "signature": "(d: Dict) -> int:", "funcdef": "def"}, "sqlglot.helper.first": {"fullname": "sqlglot.helper.first", "modulename": "sqlglot.helper", "qualname": "first", "kind": "function", "doc": "

Returns the first element from an iterable.

\n\n

Useful for sets.

\n", "signature": "(it: Iterable[~T]) -> ~T:", "funcdef": "def"}, "sqlglot.helper.case_sensitive": {"fullname": "sqlglot.helper.case_sensitive", "modulename": "sqlglot.helper", "qualname": "case_sensitive", "kind": "function", "doc": "

Checks if text contains any case sensitive characters depending on dialect.

\n", "signature": "(\ttext: str,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType]) -> bool:", "funcdef": "def"}, "sqlglot.helper.should_identify": {"fullname": "sqlglot.helper.should_identify", "modulename": "sqlglot.helper", "qualname": "should_identify", "kind": "function", "doc": "

Checks if text should be identified given an identify option.

\n\n
Arguments:
\n\n
    \n
  • text: the text to check.
  • \n
  • identify: \"always\" or True: always returns true.\n\"safe\": true if there is no uppercase or lowercase character in text, depending on dialect.
  • \n
  • dialect: the dialect to use in order to decide whether a text should be identified.
  • \n
\n\n
Returns:
\n\n
\n

Whether or not a string should be identified.

\n
\n", "signature": "(\ttext: str,\tidentify: str | bool,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None) -> bool:", "funcdef": "def"}, "sqlglot.lineage": {"fullname": "sqlglot.lineage", "modulename": "sqlglot.lineage", "kind": "module", "doc": "

\n"}, "sqlglot.lineage.Node": {"fullname": "sqlglot.lineage.Node", "modulename": "sqlglot.lineage", "qualname": "Node", "kind": "class", "doc": "

\n"}, "sqlglot.lineage.Node.__init__": {"fullname": "sqlglot.lineage.Node.__init__", "modulename": "sqlglot.lineage", "qualname": "Node.__init__", "kind": "function", "doc": "

\n", "signature": "(\tname: str,\texpression: sqlglot.expressions.Expression,\tsource: sqlglot.expressions.Expression,\tdownstream: List[sqlglot.lineage.Node] = <factory>,\talias: str = '')"}, "sqlglot.lineage.Node.walk": {"fullname": "sqlglot.lineage.Node.walk", "modulename": "sqlglot.lineage", "qualname": "Node.walk", "kind": "function", "doc": "

\n", "signature": "(self) -> Iterator[sqlglot.lineage.Node]:", "funcdef": "def"}, "sqlglot.lineage.Node.to_html": {"fullname": "sqlglot.lineage.Node.to_html", "modulename": "sqlglot.lineage", "qualname": "Node.to_html", "kind": "function", "doc": "

\n", "signature": "(self, **opts) -> sqlglot.lineage.LineageHTML:", "funcdef": "def"}, "sqlglot.lineage.lineage": {"fullname": "sqlglot.lineage.lineage", "modulename": "sqlglot.lineage", "qualname": "lineage", "kind": "function", "doc": "

Build the lineage graph for a column of a SQL query.

\n\n
Arguments:
\n\n
    \n
  • column: The column to build the lineage for.
  • \n
  • sql: The SQL string or expression.
  • \n
  • schema: The schema of tables.
  • \n
  • sources: A mapping of queries which will be used to continue building lineage.
  • \n
  • dialect: The dialect of input SQL.
  • \n
  • **kwargs: Qualification optimizer kwargs.
  • \n
\n\n
Returns:
\n\n
\n

A lineage node.

\n
\n", "signature": "(\tcolumn: str | sqlglot.expressions.Column,\tsql: str | sqlglot.expressions.Expression,\tschema: Union[Dict, sqlglot.schema.Schema, NoneType] = None,\tsources: Optional[Dict[str, str | sqlglot.expressions.Subqueryable]] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**kwargs) -> sqlglot.lineage.Node:", "funcdef": "def"}, "sqlglot.lineage.LineageHTML": {"fullname": "sqlglot.lineage.LineageHTML", "modulename": "sqlglot.lineage", "qualname": "LineageHTML", "kind": "class", "doc": "

Node to HTML generator using vis.js.

\n\n

https://visjs.github.io/vis-network/docs/network/

\n"}, "sqlglot.lineage.LineageHTML.__init__": {"fullname": "sqlglot.lineage.LineageHTML.__init__", "modulename": "sqlglot.lineage", "qualname": "LineageHTML.__init__", "kind": "function", "doc": "

\n", "signature": "(\tnode: sqlglot.lineage.Node,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\timports: bool = True,\t**opts: Any)"}, "sqlglot.optimizer": {"fullname": "sqlglot.optimizer", "modulename": "sqlglot.optimizer", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.annotate_types": {"fullname": "sqlglot.optimizer.annotate_types", "modulename": "sqlglot.optimizer.annotate_types", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.annotate_types.annotate_types": {"fullname": "sqlglot.optimizer.annotate_types.annotate_types", "modulename": "sqlglot.optimizer.annotate_types", "qualname": "annotate_types", "kind": "function", "doc": "

Recursively infer & annotate types in an expression syntax tree against a schema.\nAssumes that we've already executed the optimizer's qualify_columns step.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> schema = {"y": {"cola": "SMALLINT"}}\n>>> sql = "SELECT x.cola + 2.5 AS cola FROM (SELECT y.cola AS cola FROM y AS y) AS x"\n>>> annotated_expr = annotate_types(sqlglot.parse_one(sql), schema=schema)\n>>> annotated_expr.expressions[0].type.this  # Get the type of "x.cola + 2.5 AS cola"\n<Type.DOUBLE: 'DOUBLE'>\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): Expression to annotate.
  • \n
  • schema (dict|sqlglot.optimizer.Schema): Database schema.
  • \n
  • annotators (dict): Maps expression type to corresponding annotation function.
  • \n
  • coerces_to (dict): Maps expression type to set of types that it can be coerced into.
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: expression annotated with types

\n
\n", "signature": "(expression, schema=None, annotators=None, coerces_to=None):", "funcdef": "def"}, "sqlglot.optimizer.annotate_types.TypeAnnotator": {"fullname": "sqlglot.optimizer.annotate_types.TypeAnnotator", "modulename": "sqlglot.optimizer.annotate_types", "qualname": "TypeAnnotator", "kind": "class", "doc": "

\n"}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"fullname": "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__", "modulename": "sqlglot.optimizer.annotate_types", "qualname": "TypeAnnotator.__init__", "kind": "function", "doc": "

\n", "signature": "(schema=None, annotators=None, coerces_to=None)"}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"fullname": "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate", "modulename": "sqlglot.optimizer.annotate_types", "qualname": "TypeAnnotator.annotate", "kind": "function", "doc": "

\n", "signature": "(self, expression):", "funcdef": "def"}, "sqlglot.optimizer.canonicalize": {"fullname": "sqlglot.optimizer.canonicalize", "modulename": "sqlglot.optimizer.canonicalize", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.canonicalize.canonicalize": {"fullname": "sqlglot.optimizer.canonicalize.canonicalize", "modulename": "sqlglot.optimizer.canonicalize", "qualname": "canonicalize", "kind": "function", "doc": "

Converts a sql expression into a standard form.

\n\n

This method relies on annotate_types because many of the\nconversions rely on type inference.

\n\n
Arguments:
\n\n
    \n
  • expression: The expression to canonicalize.
  • \n
\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"fullname": "sqlglot.optimizer.canonicalize.add_text_to_concat", "modulename": "sqlglot.optimizer.canonicalize", "qualname": "add_text_to_concat", "kind": "function", "doc": "

\n", "signature": "(node: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.optimizer.canonicalize.coerce_type": {"fullname": "sqlglot.optimizer.canonicalize.coerce_type", "modulename": "sqlglot.optimizer.canonicalize", "qualname": "coerce_type", "kind": "function", "doc": "

\n", "signature": "(node: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"fullname": "sqlglot.optimizer.canonicalize.remove_redundant_casts", "modulename": "sqlglot.optimizer.canonicalize", "qualname": "remove_redundant_casts", "kind": "function", "doc": "

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"fullname": "sqlglot.optimizer.canonicalize.ensure_bool_predicates", "modulename": "sqlglot.optimizer.canonicalize", "qualname": "ensure_bool_predicates", "kind": "function", "doc": "

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.optimizer.eliminate_ctes": {"fullname": "sqlglot.optimizer.eliminate_ctes", "modulename": "sqlglot.optimizer.eliminate_ctes", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"fullname": "sqlglot.optimizer.eliminate_ctes.eliminate_ctes", "modulename": "sqlglot.optimizer.eliminate_ctes", "qualname": "eliminate_ctes", "kind": "function", "doc": "

Remove unused CTEs from an expression.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sql = "WITH y AS (SELECT a FROM x) SELECT a FROM z"\n>>> expression = sqlglot.parse_one(sql)\n>>> eliminate_ctes(expression).sql()\n'SELECT a FROM z'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to optimize
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: optimized expression

\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.eliminate_joins": {"fullname": "sqlglot.optimizer.eliminate_joins", "modulename": "sqlglot.optimizer.eliminate_joins", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"fullname": "sqlglot.optimizer.eliminate_joins.eliminate_joins", "modulename": "sqlglot.optimizer.eliminate_joins", "qualname": "eliminate_joins", "kind": "function", "doc": "

Remove unused joins from an expression.

\n\n

This only removes joins when we know that the join condition doesn't produce duplicate rows.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sql = "SELECT x.a FROM x LEFT JOIN (SELECT DISTINCT y.b FROM y) AS y ON x.b = y.b"\n>>> expression = sqlglot.parse_one(sql)\n>>> eliminate_joins(expression).sql()\n'SELECT x.a FROM x'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to optimize
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: optimized expression

\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.eliminate_joins.join_condition": {"fullname": "sqlglot.optimizer.eliminate_joins.join_condition", "modulename": "sqlglot.optimizer.eliminate_joins", "qualname": "join_condition", "kind": "function", "doc": "

Extract the join condition from a join expression.

\n\n
Arguments:
\n\n
    \n
  • join (exp.Join)
  • \n
\n\n
Returns:
\n\n
\n

tuple[list[str], list[str], exp.Expression]:\n Tuple of (source key, join key, remaining predicate)

\n
\n", "signature": "(join):", "funcdef": "def"}, "sqlglot.optimizer.eliminate_subqueries": {"fullname": "sqlglot.optimizer.eliminate_subqueries", "modulename": "sqlglot.optimizer.eliminate_subqueries", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"fullname": "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries", "modulename": "sqlglot.optimizer.eliminate_subqueries", "qualname": "eliminate_subqueries", "kind": "function", "doc": "

Rewrite derived tables as CTES, deduplicating if possible.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one("SELECT a FROM (SELECT * FROM x) AS y")\n>>> eliminate_subqueries(expression).sql()\n'WITH y AS (SELECT * FROM x) SELECT a FROM y AS y'\n
\n
\n
\n\n
This also deduplicates common subqueries:
\n\n
\n
\n
>>> expression = sqlglot.parse_one("SELECT a FROM (SELECT * FROM x) AS y CROSS JOIN (SELECT * FROM x) AS z")\n>>> eliminate_subqueries(expression).sql()\n'WITH y AS (SELECT * FROM x) SELECT a FROM y AS y CROSS JOIN y AS z'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: expression

\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.isolate_table_selects": {"fullname": "sqlglot.optimizer.isolate_table_selects", "modulename": "sqlglot.optimizer.isolate_table_selects", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"fullname": "sqlglot.optimizer.isolate_table_selects.isolate_table_selects", "modulename": "sqlglot.optimizer.isolate_table_selects", "qualname": "isolate_table_selects", "kind": "function", "doc": "

\n", "signature": "(expression, schema=None):", "funcdef": "def"}, "sqlglot.optimizer.merge_subqueries": {"fullname": "sqlglot.optimizer.merge_subqueries", "modulename": "sqlglot.optimizer.merge_subqueries", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"fullname": "sqlglot.optimizer.merge_subqueries.merge_subqueries", "modulename": "sqlglot.optimizer.merge_subqueries", "qualname": "merge_subqueries", "kind": "function", "doc": "

Rewrite sqlglot AST to merge derived tables into the outer query.

\n\n

This also merges CTEs if they are selected from only once.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one("SELECT a FROM (SELECT x.a FROM x) CROSS JOIN y")\n>>> merge_subqueries(expression).sql()\n'SELECT x.a FROM x CROSS JOIN y'\n
\n
\n
\n\n

If leave_tables_isolated is True, this will not merge inner queries into outer\nqueries if it would result in multiple table selects in a single query:

\n\n
\n
\n
\n

expression = sqlglot.parse_one(\"SELECT a FROM (SELECT x.a FROM x) CROSS JOIN y\")\n merge_subqueries(expression, leave_tables_isolated=True).sql()\n 'SELECT a FROM (SELECT x.a FROM x) CROSS JOIN y'

\n
\n
\n
\n\n

Inspired by https://dev.mysql.com/doc/refman/8.0/en/derived-table-optimization.html

\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to optimize
  • \n
  • leave_tables_isolated (bool):
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: optimized expression

\n
\n", "signature": "(expression, leave_tables_isolated=False):", "funcdef": "def"}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"fullname": "sqlglot.optimizer.merge_subqueries.merge_ctes", "modulename": "sqlglot.optimizer.merge_subqueries", "qualname": "merge_ctes", "kind": "function", "doc": "

\n", "signature": "(expression, leave_tables_isolated=False):", "funcdef": "def"}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"fullname": "sqlglot.optimizer.merge_subqueries.merge_derived_tables", "modulename": "sqlglot.optimizer.merge_subqueries", "qualname": "merge_derived_tables", "kind": "function", "doc": "

\n", "signature": "(expression, leave_tables_isolated=False):", "funcdef": "def"}, "sqlglot.optimizer.normalize": {"fullname": "sqlglot.optimizer.normalize", "modulename": "sqlglot.optimizer.normalize", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.normalize.normalize": {"fullname": "sqlglot.optimizer.normalize.normalize", "modulename": "sqlglot.optimizer.normalize", "qualname": "normalize", "kind": "function", "doc": "

Rewrite sqlglot AST into conjunctive normal form or disjunctive normal form.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one("(x AND y) OR z")\n>>> normalize(expression, dnf=False).sql()\n'(x OR z) AND (y OR z)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: expression to normalize
  • \n
  • dnf: rewrite in disjunctive normal form instead.
  • \n
  • max_distance (int): the maximal estimated distance from cnf/dnf to attempt conversion
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: normalized expression

\n
\n", "signature": "(\texpression: sqlglot.expressions.Expression,\tdnf: bool = False,\tmax_distance: int = 128):", "funcdef": "def"}, "sqlglot.optimizer.normalize.normalized": {"fullname": "sqlglot.optimizer.normalize.normalized", "modulename": "sqlglot.optimizer.normalize", "qualname": "normalized", "kind": "function", "doc": "

\n", "signature": "(expression, dnf=False):", "funcdef": "def"}, "sqlglot.optimizer.normalize.normalization_distance": {"fullname": "sqlglot.optimizer.normalize.normalization_distance", "modulename": "sqlglot.optimizer.normalize", "qualname": "normalization_distance", "kind": "function", "doc": "

The difference in the number of predicates between the current expression and the normalized form.

\n\n

This is used as an estimate of the cost of the conversion which is exponential in complexity.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one("(a AND b) OR (c AND d)")\n>>> normalization_distance(expression)\n4\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to compute distance
  • \n
  • dnf (bool): compute to dnf distance instead
  • \n
\n\n
Returns:
\n\n
\n

int: difference

\n
\n", "signature": "(expression, dnf=False):", "funcdef": "def"}, "sqlglot.optimizer.normalize.distributive_law": {"fullname": "sqlglot.optimizer.normalize.distributive_law", "modulename": "sqlglot.optimizer.normalize", "qualname": "distributive_law", "kind": "function", "doc": "

x OR (y AND z) -> (x OR y) AND (x OR z)\n(x AND y) OR (y AND z) -> (x OR y) AND (x OR z) AND (y OR y) AND (y OR z)

\n", "signature": "(expression, dnf, max_distance, generate):", "funcdef": "def"}, "sqlglot.optimizer.normalize_identifiers": {"fullname": "sqlglot.optimizer.normalize_identifiers", "modulename": "sqlglot.optimizer.normalize_identifiers", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"fullname": "sqlglot.optimizer.normalize_identifiers.normalize_identifiers", "modulename": "sqlglot.optimizer.normalize_identifiers", "qualname": "normalize_identifiers", "kind": "function", "doc": "

Normalize all unquoted identifiers to either lower or upper case, depending on\nthe dialect. This essentially makes those identifiers case-insensitive.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one('SELECT Bar.A AS A FROM "Foo".Bar')\n>>> normalize_identifiers(expression).sql()\n'SELECT bar.a AS a FROM "Foo".bar'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: The expression to transform.
  • \n
  • dialect: The dialect to use in order to decide how to normalize identifiers.
  • \n
\n\n
Returns:
\n\n
\n

The transformed expression.

\n
\n", "signature": "(\texpression: ~E,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None) -> ~E:", "funcdef": "def"}, "sqlglot.optimizer.optimize_joins": {"fullname": "sqlglot.optimizer.optimize_joins", "modulename": "sqlglot.optimizer.optimize_joins", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"fullname": "sqlglot.optimizer.optimize_joins.optimize_joins", "modulename": "sqlglot.optimizer.optimize_joins", "qualname": "optimize_joins", "kind": "function", "doc": "

Removes cross joins if possible and reorder joins based on predicate dependencies.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> optimize_joins(parse_one("SELECT * FROM x CROSS JOIN y JOIN z ON x.a = z.a AND y.a = z.a")).sql()\n'SELECT * FROM x JOIN z ON x.a = z.a AND TRUE JOIN y ON y.a = z.a'\n
\n
\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"fullname": "sqlglot.optimizer.optimize_joins.reorder_joins", "modulename": "sqlglot.optimizer.optimize_joins", "qualname": "reorder_joins", "kind": "function", "doc": "

Reorder joins by topological sort order based on predicate references.

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.optimize_joins.normalize": {"fullname": "sqlglot.optimizer.optimize_joins.normalize", "modulename": "sqlglot.optimizer.optimize_joins", "qualname": "normalize", "kind": "function", "doc": "

Remove INNER and OUTER from joins as they are optional.

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.optimize_joins.other_table_names": {"fullname": "sqlglot.optimizer.optimize_joins.other_table_names", "modulename": "sqlglot.optimizer.optimize_joins", "qualname": "other_table_names", "kind": "function", "doc": "

\n", "signature": "(join, exclude):", "funcdef": "def"}, "sqlglot.optimizer.optimizer": {"fullname": "sqlglot.optimizer.optimizer", "modulename": "sqlglot.optimizer.optimizer", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.optimizer.optimize": {"fullname": "sqlglot.optimizer.optimizer.optimize", "modulename": "sqlglot.optimizer.optimizer", "qualname": "optimize", "kind": "function", "doc": "

Rewrite a sqlglot AST into an optimized form.

\n\n
Arguments:
\n\n
    \n
  • expression: expression to optimize
  • \n
  • schema: database schema.\nThis can either be an instance of sqlglot.optimizer.Schema or a mapping in one of\nthe following forms:\n 1. {table: {col: type}}\n 2. {db: {table: {col: type}}}\n 3. {catalog: {db: {table: {col: type}}}}\nIf no schema is provided then the default schema defined at sqlgot.schema will be used
  • \n
  • db: specify the default database, as might be set by a USE DATABASE db statement
  • \n
  • catalog: specify the default catalog, as might be set by a USE CATALOG c statement
  • \n
  • dialect: The dialect to parse the sql string.
  • \n
  • rules: sequence of optimizer rules to use.\nMany of the rules require tables and columns to be qualified.\nDo not remove qualify from the sequence of rules unless you know what you're doing!
  • \n
  • *kwargs: If a rule has a keyword argument with a same name in *kwargs, it will be passed in.
  • \n
\n\n
Returns:
\n\n
\n

The optimized expression.

\n
\n", "signature": "(\texpression: str | sqlglot.expressions.Expression,\tschema: Union[dict, sqlglot.schema.Schema, NoneType] = None,\tdb: Optional[str] = None,\tcatalog: Optional[str] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\trules: Sequence[Callable] = (<function qualify at 0x7fe11813a560>, <function pushdown_projections at 0x7fe118139f30>, <function normalize at 0x7fe11814a0e0>, <function unnest_subqueries at 0x7fe11813a950>, <function pushdown_predicates at 0x7fe118138e50>, <function optimize_joins at 0x7fe118138af0>, <function eliminate_subqueries at 0x7fe11814bd90>, <function merge_subqueries at 0x7fe11814be20>, <function eliminate_joins at 0x7fe118149f30>, <function eliminate_ctes at 0x7fe118149e10>, <function quote_identifiers at 0x7fe118139ea0>, <function annotate_types at 0x7fe11818a560>, <function canonicalize at 0x7fe118149870>, <function simplify at 0x7fe11814a290>),\t**kwargs) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.optimizer.pushdown_predicates": {"fullname": "sqlglot.optimizer.pushdown_predicates", "modulename": "sqlglot.optimizer.pushdown_predicates", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"fullname": "sqlglot.optimizer.pushdown_predicates.pushdown_predicates", "modulename": "sqlglot.optimizer.pushdown_predicates", "qualname": "pushdown_predicates", "kind": "function", "doc": "

Rewrite sqlglot AST to pushdown predicates in FROMS and JOINS

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sql = "SELECT y.a AS a FROM (SELECT x.a AS a FROM x AS x) AS y WHERE y.a = 1"\n>>> expression = sqlglot.parse_one(sql)\n>>> pushdown_predicates(expression).sql()\n'SELECT y.a AS a FROM (SELECT x.a AS a FROM x AS x WHERE x.a = 1) AS y WHERE TRUE'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to optimize
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: optimized expression

\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"fullname": "sqlglot.optimizer.pushdown_predicates.pushdown", "modulename": "sqlglot.optimizer.pushdown_predicates", "qualname": "pushdown", "kind": "function", "doc": "

\n", "signature": "(condition, sources, scope_ref_count):", "funcdef": "def"}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"fullname": "sqlglot.optimizer.pushdown_predicates.pushdown_cnf", "modulename": "sqlglot.optimizer.pushdown_predicates", "qualname": "pushdown_cnf", "kind": "function", "doc": "

If the predicates are in CNF like form, we can simply replace each block in the parent.

\n", "signature": "(predicates, scope, scope_ref_count):", "funcdef": "def"}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"fullname": "sqlglot.optimizer.pushdown_predicates.pushdown_dnf", "modulename": "sqlglot.optimizer.pushdown_predicates", "qualname": "pushdown_dnf", "kind": "function", "doc": "

If the predicates are in DNF form, we can only push down conditions that are in all blocks.\nAdditionally, we can't remove predicates from their original form.

\n", "signature": "(predicates, scope, scope_ref_count):", "funcdef": "def"}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"fullname": "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate", "modulename": "sqlglot.optimizer.pushdown_predicates", "qualname": "nodes_for_predicate", "kind": "function", "doc": "

\n", "signature": "(predicate, sources, scope_ref_count):", "funcdef": "def"}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"fullname": "sqlglot.optimizer.pushdown_predicates.replace_aliases", "modulename": "sqlglot.optimizer.pushdown_predicates", "qualname": "replace_aliases", "kind": "function", "doc": "

\n", "signature": "(source, predicate):", "funcdef": "def"}, "sqlglot.optimizer.pushdown_projections": {"fullname": "sqlglot.optimizer.pushdown_projections", "modulename": "sqlglot.optimizer.pushdown_projections", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"fullname": "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION", "modulename": "sqlglot.optimizer.pushdown_projections", "qualname": "DEFAULT_SELECTION", "kind": "function", "doc": "

\n", "signature": "():", "funcdef": "def"}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"fullname": "sqlglot.optimizer.pushdown_projections.pushdown_projections", "modulename": "sqlglot.optimizer.pushdown_projections", "qualname": "pushdown_projections", "kind": "function", "doc": "

Rewrite sqlglot AST to remove unused columns projections.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sql = "SELECT y.a AS a FROM (SELECT x.a AS a, x.b AS b FROM x) AS y"\n>>> expression = sqlglot.parse_one(sql)\n>>> pushdown_projections(expression).sql()\n'SELECT y.a AS a FROM (SELECT x.a AS a FROM x) AS y'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to optimize
  • \n
  • remove_unused_selections (bool): remove selects that are unused
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: optimized expression

\n
\n", "signature": "(expression, schema=None, remove_unused_selections=True):", "funcdef": "def"}, "sqlglot.optimizer.qualify": {"fullname": "sqlglot.optimizer.qualify", "modulename": "sqlglot.optimizer.qualify", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.qualify.qualify": {"fullname": "sqlglot.optimizer.qualify.qualify", "modulename": "sqlglot.optimizer.qualify", "qualname": "qualify", "kind": "function", "doc": "

Rewrite sqlglot AST to have normalized and qualified tables and columns.

\n\n

This step is necessary for all further SQLGlot optimizations.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> schema = {"tbl": {"col": "INT"}}\n>>> expression = sqlglot.parse_one("SELECT col FROM tbl")\n>>> qualify(expression, schema=schema).sql()\n'SELECT "tbl"."col" AS "col" FROM "tbl" AS "tbl"'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: Expression to qualify.
  • \n
  • db: Default database name for tables.
  • \n
  • catalog: Default catalog name for tables.
  • \n
  • schema: Schema to infer column names and types.
  • \n
  • expand_alias_refs: Whether or not to expand references to aliases.
  • \n
  • infer_schema: Whether or not to infer the schema if missing.
  • \n
  • isolate_tables: Whether or not to isolate table selects.
  • \n
  • qualify_columns: Whether or not to qualify columns.
  • \n
  • validate_qualify_columns: Whether or not to validate columns.
  • \n
  • quote_identifiers: Whether or not to run the quote_identifiers step.\nThis step is necessary to ensure correctness for case sensitive queries.\nBut this flag is provided in case this step is performed at a later time.
  • \n
  • identify: If True, quote all identifiers, else only necessary ones.
  • \n
\n\n
Returns:
\n\n
\n

The qualified expression.

\n
\n", "signature": "(\texpression: sqlglot.expressions.Expression,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tdb: Optional[str] = None,\tcatalog: Optional[str] = None,\tschema: Union[dict, sqlglot.schema.Schema, NoneType] = None,\texpand_alias_refs: bool = True,\tinfer_schema: Optional[bool] = None,\tisolate_tables: bool = False,\tqualify_columns: bool = True,\tvalidate_qualify_columns: bool = True,\tquote_identifiers: bool = True,\tidentify: bool = True) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.optimizer.qualify_columns": {"fullname": "sqlglot.optimizer.qualify_columns", "modulename": "sqlglot.optimizer.qualify_columns", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"fullname": "sqlglot.optimizer.qualify_columns.qualify_columns", "modulename": "sqlglot.optimizer.qualify_columns", "qualname": "qualify_columns", "kind": "function", "doc": "

Rewrite sqlglot AST to have fully qualified columns.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> schema = {"tbl": {"col": "INT"}}\n>>> expression = sqlglot.parse_one("SELECT col FROM tbl")\n>>> qualify_columns(expression, schema).sql()\n'SELECT tbl.col AS col FROM tbl'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: expression to qualify
  • \n
  • schema: Database schema
  • \n
  • expand_alias_refs: whether or not to expand references to aliases
  • \n
  • infer_schema: whether or not to infer the schema if missing
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: qualified expression

\n
\n", "signature": "(\texpression: sqlglot.expressions.Expression,\tschema: dict | sqlglot.schema.Schema,\texpand_alias_refs: bool = True,\tinfer_schema: Optional[bool] = None) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"fullname": "sqlglot.optimizer.qualify_columns.validate_qualify_columns", "modulename": "sqlglot.optimizer.qualify_columns", "qualname": "validate_qualify_columns", "kind": "function", "doc": "

Raise an OptimizeError if any columns aren't qualified

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"fullname": "sqlglot.optimizer.qualify_columns.quote_identifiers", "modulename": "sqlglot.optimizer.qualify_columns", "qualname": "quote_identifiers", "kind": "function", "doc": "

Makes sure all identifiers that need to be quoted are quoted.

\n", "signature": "(\texpression: ~E,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tidentify: bool = True) -> ~E:", "funcdef": "def"}, "sqlglot.optimizer.qualify_columns.Resolver": {"fullname": "sqlglot.optimizer.qualify_columns.Resolver", "modulename": "sqlglot.optimizer.qualify_columns", "qualname": "Resolver", "kind": "class", "doc": "

Helper for resolving columns.

\n\n

This is a class so we can lazily load some things and easily share them across functions.

\n"}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"fullname": "sqlglot.optimizer.qualify_columns.Resolver.__init__", "modulename": "sqlglot.optimizer.qualify_columns", "qualname": "Resolver.__init__", "kind": "function", "doc": "

\n", "signature": "(scope, schema, infer_schema: bool = True)"}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"fullname": "sqlglot.optimizer.qualify_columns.Resolver.get_table", "modulename": "sqlglot.optimizer.qualify_columns", "qualname": "Resolver.get_table", "kind": "function", "doc": "

Get the table for a column name.

\n\n
Arguments:
\n\n
    \n
  • column_name: The column name to find the table for.
  • \n
\n\n
Returns:
\n\n
\n

The table name if it can be found/inferred.

\n
\n", "signature": "(self, column_name: str) -> Optional[sqlglot.expressions.Identifier]:", "funcdef": "def"}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"fullname": "sqlglot.optimizer.qualify_columns.Resolver.all_columns", "modulename": "sqlglot.optimizer.qualify_columns", "qualname": "Resolver.all_columns", "kind": "variable", "doc": "

All available columns of all sources in this scope

\n"}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"fullname": "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns", "modulename": "sqlglot.optimizer.qualify_columns", "qualname": "Resolver.get_source_columns", "kind": "function", "doc": "

Resolve the source columns for a given source name

\n", "signature": "(self, name, only_visible=False):", "funcdef": "def"}, "sqlglot.optimizer.qualify_tables": {"fullname": "sqlglot.optimizer.qualify_tables", "modulename": "sqlglot.optimizer.qualify_tables", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"fullname": "sqlglot.optimizer.qualify_tables.qualify_tables", "modulename": "sqlglot.optimizer.qualify_tables", "qualname": "qualify_tables", "kind": "function", "doc": "

Rewrite sqlglot AST to have fully qualified tables. Additionally, this\nreplaces \"join constructs\" (*) by equivalent SELECT * subqueries.

\n\n
Examples:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one("SELECT 1 FROM tbl")\n>>> qualify_tables(expression, db="db").sql()\n'SELECT 1 FROM db.tbl AS tbl'\n>>>\n>>> expression = sqlglot.parse_one("SELECT * FROM (tbl1 JOIN tbl2 ON id1 = id2)")\n>>> qualify_tables(expression).sql()\n'SELECT * FROM (SELECT * FROM tbl1 AS tbl1 JOIN tbl2 AS tbl2 ON id1 = id2) AS _q_0'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: Expression to qualify
  • \n
  • db: Database name
  • \n
  • catalog: Catalog name
  • \n
  • schema: A schema to populate
  • \n
\n\n
Returns:
\n\n
\n

The qualified expression.

\n
\n\n

(*) See section 7.2.1.2 in https://www.postgresql.org/docs/current/queries-table-expressions.html

\n", "signature": "(\texpression: ~E,\tdb: Optional[str] = None,\tcatalog: Optional[str] = None,\tschema: Optional[sqlglot.schema.Schema] = None) -> ~E:", "funcdef": "def"}, "sqlglot.optimizer.scope": {"fullname": "sqlglot.optimizer.scope", "modulename": "sqlglot.optimizer.scope", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.scope.ScopeType": {"fullname": "sqlglot.optimizer.scope.ScopeType", "modulename": "sqlglot.optimizer.scope", "qualname": "ScopeType", "kind": "class", "doc": "

An enumeration.

\n", "bases": "enum.Enum"}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"fullname": "sqlglot.optimizer.scope.ScopeType.ROOT", "modulename": "sqlglot.optimizer.scope", "qualname": "ScopeType.ROOT", "kind": "variable", "doc": "

\n", "default_value": "<ScopeType.ROOT: 1>"}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"fullname": "sqlglot.optimizer.scope.ScopeType.SUBQUERY", "modulename": "sqlglot.optimizer.scope", "qualname": "ScopeType.SUBQUERY", "kind": "variable", "doc": "

\n", "default_value": "<ScopeType.SUBQUERY: 2>"}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"fullname": "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE", "modulename": "sqlglot.optimizer.scope", "qualname": "ScopeType.DERIVED_TABLE", "kind": "variable", "doc": "

\n", "default_value": "<ScopeType.DERIVED_TABLE: 3>"}, "sqlglot.optimizer.scope.ScopeType.CTE": {"fullname": "sqlglot.optimizer.scope.ScopeType.CTE", "modulename": "sqlglot.optimizer.scope", "qualname": "ScopeType.CTE", "kind": "variable", "doc": "

\n", "default_value": "<ScopeType.CTE: 4>"}, "sqlglot.optimizer.scope.ScopeType.UNION": {"fullname": "sqlglot.optimizer.scope.ScopeType.UNION", "modulename": "sqlglot.optimizer.scope", "qualname": "ScopeType.UNION", "kind": "variable", "doc": "

\n", "default_value": "<ScopeType.UNION: 5>"}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"fullname": "sqlglot.optimizer.scope.ScopeType.UDTF", "modulename": "sqlglot.optimizer.scope", "qualname": "ScopeType.UDTF", "kind": "variable", "doc": "

\n", "default_value": "<ScopeType.UDTF: 6>"}, "sqlglot.optimizer.scope.Scope": {"fullname": "sqlglot.optimizer.scope.Scope", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope", "kind": "class", "doc": "

Selection scope.

\n\n
Attributes:
\n\n
    \n
  • expression (exp.Select|exp.Union): Root expression of this scope
  • \n
  • sources (dict[str, exp.Table|Scope]): Mapping of source name to either\na Table expression or another Scope instance. For example:\n SELECT * FROM x {\"x\": Table(this=\"x\")}\n SELECT * FROM x AS y {\"y\": Table(this=\"x\")}\n SELECT * FROM (SELECT ...) AS y {\"y\": Scope(...)}
  • \n
  • lateral_sources (dict[str, exp.Table|Scope]): Sources from laterals\nFor example:\n SELECT c FROM x LATERAL VIEW EXPLODE (a) AS c;\nThe LATERAL VIEW EXPLODE gets x as a source.
  • \n
  • outer_column_list (list[str]): If this is a derived table or CTE, and the outer query\ndefines a column list of it's alias of this scope, this is that list of columns.\nFor example:\n SELECT * FROM (SELECT ...) AS y(col1, col2)\nThe inner query would have [\"col1\", \"col2\"] for its outer_column_list
  • \n
  • parent (Scope): Parent scope
  • \n
  • scope_type (ScopeType): Type of this scope, relative to it's parent
  • \n
  • subquery_scopes (list[Scope]): List of all child scopes for subqueries
  • \n
  • cte_scopes (list[Scope]): List of all child scopes for CTEs
  • \n
  • derived_table_scopes (list[Scope]): List of all child scopes for derived_tables
  • \n
  • udtf_scopes (list[Scope]): List of all child scopes for user defined tabular functions
  • \n
  • table_scopes (list[Scope]): derived_table_scopes + udtf_scopes, in the order that they're defined
  • \n
  • union_scopes (list[Scope, Scope]): If this Scope is for a Union expression, this will be\na list of the left and right child scopes.
  • \n
\n"}, "sqlglot.optimizer.scope.Scope.__init__": {"fullname": "sqlglot.optimizer.scope.Scope.__init__", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.__init__", "kind": "function", "doc": "

\n", "signature": "(\texpression,\tsources=None,\touter_column_list=None,\tparent=None,\tscope_type=<ScopeType.ROOT: 1>,\tlateral_sources=None)"}, "sqlglot.optimizer.scope.Scope.clear_cache": {"fullname": "sqlglot.optimizer.scope.Scope.clear_cache", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.clear_cache", "kind": "function", "doc": "

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.branch": {"fullname": "sqlglot.optimizer.scope.Scope.branch", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.branch", "kind": "function", "doc": "

Branch from the current scope to a new, inner scope

\n", "signature": "(self, expression, scope_type, chain_sources=None, **kwargs):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.walk": {"fullname": "sqlglot.optimizer.scope.Scope.walk", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.walk", "kind": "function", "doc": "

\n", "signature": "(self, bfs=True):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.find": {"fullname": "sqlglot.optimizer.scope.Scope.find", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.find", "kind": "function", "doc": "

Returns the first node in this scope which matches at least one of the specified types.

\n\n

This does NOT traverse into subscopes.

\n\n
Arguments:
\n\n
    \n
  • expression_types (type): the expression type(s) to match.
  • \n
  • bfs (bool): True to use breadth-first search, False to use depth-first.
  • \n
\n\n
Returns:
\n\n
\n

exp.Expression: the node which matches the criteria or None if no node matching\n the criteria was found.

\n
\n", "signature": "(self, *expression_types, bfs=True):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.find_all": {"fullname": "sqlglot.optimizer.scope.Scope.find_all", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.find_all", "kind": "function", "doc": "

Returns a generator object which visits all nodes in this scope and only yields those that\nmatch at least one of the specified expression types.

\n\n

This does NOT traverse into subscopes.

\n\n
Arguments:
\n\n
    \n
  • expression_types (type): the expression type(s) to match.
  • \n
  • bfs (bool): True to use breadth-first search, False to use depth-first.
  • \n
\n\n
Yields:
\n\n
\n

exp.Expression: nodes

\n
\n", "signature": "(self, *expression_types, bfs=True):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.replace": {"fullname": "sqlglot.optimizer.scope.Scope.replace", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.replace", "kind": "function", "doc": "

Replace old with new.

\n\n

This can be used instead of exp.Expression.replace to ensure the Scope is kept up-to-date.

\n\n
Arguments:
\n\n
    \n
  • old (exp.Expression): old node
  • \n
  • new (exp.Expression): new node
  • \n
\n", "signature": "(self, old, new):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.tables": {"fullname": "sqlglot.optimizer.scope.Scope.tables", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.tables", "kind": "variable", "doc": "

List of tables in this scope.

\n\n
Returns:
\n\n
\n

list[exp.Table]: tables

\n
\n"}, "sqlglot.optimizer.scope.Scope.ctes": {"fullname": "sqlglot.optimizer.scope.Scope.ctes", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.ctes", "kind": "variable", "doc": "

List of CTEs in this scope.

\n\n
Returns:
\n\n
\n

list[exp.CTE]: ctes

\n
\n"}, "sqlglot.optimizer.scope.Scope.derived_tables": {"fullname": "sqlglot.optimizer.scope.Scope.derived_tables", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.derived_tables", "kind": "variable", "doc": "

List of derived tables in this scope.

\n\n
For example:
\n\n
\n

SELECT * FROM (SELECT ...) <- that's a derived table

\n
\n\n
Returns:
\n\n
\n

list[exp.Subquery]: derived tables

\n
\n"}, "sqlglot.optimizer.scope.Scope.udtfs": {"fullname": "sqlglot.optimizer.scope.Scope.udtfs", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.udtfs", "kind": "variable", "doc": "

List of \"User Defined Tabular Functions\" in this scope.

\n\n
Returns:
\n\n
\n

list[exp.UDTF]: UDTFs

\n
\n"}, "sqlglot.optimizer.scope.Scope.subqueries": {"fullname": "sqlglot.optimizer.scope.Scope.subqueries", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.subqueries", "kind": "variable", "doc": "

List of subqueries in this scope.

\n\n
For example:
\n\n
\n

SELECT * FROM x WHERE a IN (SELECT ...) <- that's a subquery

\n
\n\n
Returns:
\n\n
\n

list[exp.Subqueryable]: subqueries

\n
\n"}, "sqlglot.optimizer.scope.Scope.columns": {"fullname": "sqlglot.optimizer.scope.Scope.columns", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.columns", "kind": "variable", "doc": "

List of columns in this scope.

\n\n
Returns:
\n\n
\n

list[exp.Column]: Column instances in this scope, plus any\n Columns that reference this scope from correlated subqueries.

\n
\n"}, "sqlglot.optimizer.scope.Scope.selected_sources": {"fullname": "sqlglot.optimizer.scope.Scope.selected_sources", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.selected_sources", "kind": "variable", "doc": "

Mapping of nodes and sources that are actually selected from in this scope.

\n\n

That is, all tables in a schema are selectable at any point. But a\ntable only becomes a selected source if it's included in a FROM or JOIN clause.

\n\n
Returns:
\n\n
\n

dict[str, (exp.Table|exp.Select, exp.Table|Scope)]: selected sources and nodes

\n
\n"}, "sqlglot.optimizer.scope.Scope.cte_sources": {"fullname": "sqlglot.optimizer.scope.Scope.cte_sources", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.cte_sources", "kind": "variable", "doc": "

Sources that are CTEs.

\n\n
Returns:
\n\n
\n

dict[str, Scope]: Mapping of source alias to Scope

\n
\n"}, "sqlglot.optimizer.scope.Scope.selects": {"fullname": "sqlglot.optimizer.scope.Scope.selects", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.selects", "kind": "variable", "doc": "

Select expressions of this scope.

\n\n

For example, for the following expression:\n SELECT 1 as a, 2 as b FROM x

\n\n

The outputs are the \"1 as a\" and \"2 as b\" expressions.

\n\n
Returns:
\n\n
\n

list[exp.Expression]: expressions

\n
\n"}, "sqlglot.optimizer.scope.Scope.external_columns": {"fullname": "sqlglot.optimizer.scope.Scope.external_columns", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.external_columns", "kind": "variable", "doc": "

Columns that appear to reference sources in outer scopes.

\n\n
Returns:
\n\n
\n

list[exp.Column]: Column instances that don't reference\n sources in the current scope.

\n
\n"}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"fullname": "sqlglot.optimizer.scope.Scope.unqualified_columns", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.unqualified_columns", "kind": "variable", "doc": "

Unqualified columns in the current scope.

\n\n
Returns:
\n\n
\n

list[exp.Column]: Unqualified columns

\n
\n"}, "sqlglot.optimizer.scope.Scope.join_hints": {"fullname": "sqlglot.optimizer.scope.Scope.join_hints", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.join_hints", "kind": "variable", "doc": "

Hints that exist in the scope that reference tables

\n\n
Returns:
\n\n
\n

list[exp.JoinHint]: Join hints that are referenced within the scope

\n
\n"}, "sqlglot.optimizer.scope.Scope.source_columns": {"fullname": "sqlglot.optimizer.scope.Scope.source_columns", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.source_columns", "kind": "function", "doc": "

Get all columns in the current scope for a particular source.

\n\n
Arguments:
\n\n
    \n
  • source_name (str): Name of the source
  • \n
\n\n
Returns:
\n\n
\n

list[exp.Column]: Column instances that reference source_name

\n
\n", "signature": "(self, source_name):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.is_subquery": {"fullname": "sqlglot.optimizer.scope.Scope.is_subquery", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.is_subquery", "kind": "variable", "doc": "

Determine if this scope is a subquery

\n"}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"fullname": "sqlglot.optimizer.scope.Scope.is_derived_table", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.is_derived_table", "kind": "variable", "doc": "

Determine if this scope is a derived table

\n"}, "sqlglot.optimizer.scope.Scope.is_union": {"fullname": "sqlglot.optimizer.scope.Scope.is_union", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.is_union", "kind": "variable", "doc": "

Determine if this scope is a union

\n"}, "sqlglot.optimizer.scope.Scope.is_cte": {"fullname": "sqlglot.optimizer.scope.Scope.is_cte", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.is_cte", "kind": "variable", "doc": "

Determine if this scope is a common table expression

\n"}, "sqlglot.optimizer.scope.Scope.is_root": {"fullname": "sqlglot.optimizer.scope.Scope.is_root", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.is_root", "kind": "variable", "doc": "

Determine if this is the root scope

\n"}, "sqlglot.optimizer.scope.Scope.is_udtf": {"fullname": "sqlglot.optimizer.scope.Scope.is_udtf", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.is_udtf", "kind": "variable", "doc": "

Determine if this scope is a UDTF (User Defined Table Function)

\n"}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"fullname": "sqlglot.optimizer.scope.Scope.is_correlated_subquery", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.is_correlated_subquery", "kind": "variable", "doc": "

Determine if this scope is a correlated subquery

\n"}, "sqlglot.optimizer.scope.Scope.rename_source": {"fullname": "sqlglot.optimizer.scope.Scope.rename_source", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.rename_source", "kind": "function", "doc": "

Rename a source in this scope

\n", "signature": "(self, old_name, new_name):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.add_source": {"fullname": "sqlglot.optimizer.scope.Scope.add_source", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.add_source", "kind": "function", "doc": "

Add a source to this scope

\n", "signature": "(self, name, source):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.remove_source": {"fullname": "sqlglot.optimizer.scope.Scope.remove_source", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.remove_source", "kind": "function", "doc": "

Remove a source from this scope

\n", "signature": "(self, name):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.traverse": {"fullname": "sqlglot.optimizer.scope.Scope.traverse", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.traverse", "kind": "function", "doc": "

Traverse the scope tree from this node.

\n\n
Yields:
\n\n
\n

Scope: scope instances in depth-first-search post-order

\n
\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.ref_count": {"fullname": "sqlglot.optimizer.scope.Scope.ref_count", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.ref_count", "kind": "function", "doc": "

Count the number of times each scope in this tree is referenced.

\n\n
Returns:
\n\n
\n

dict[int, int]: Mapping of Scope instance ID to reference count

\n
\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.optimizer.scope.traverse_scope": {"fullname": "sqlglot.optimizer.scope.traverse_scope", "modulename": "sqlglot.optimizer.scope", "qualname": "traverse_scope", "kind": "function", "doc": "

Traverse an expression by it's \"scopes\".

\n\n

\"Scope\" represents the current context of a Select statement.

\n\n

This is helpful for optimizing queries, where we need more information than\nthe expression tree itself. For example, we might care about the source\nnames within a subquery. Returns a list because a generator could result in\nincomplete properties which is confusing.

\n\n
Examples:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one("SELECT a FROM (SELECT a FROM x) AS y")\n>>> scopes = traverse_scope(expression)\n>>> scopes[0].expression.sql(), list(scopes[0].sources)\n('SELECT a FROM x', ['x'])\n>>> scopes[1].expression.sql(), list(scopes[1].sources)\n('SELECT a FROM (SELECT a FROM x) AS y', ['y'])\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (exp.Expression): expression to traverse
  • \n
\n\n
Returns:
\n\n
\n

list[Scope]: scope instances

\n
\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> List[sqlglot.optimizer.scope.Scope]:", "funcdef": "def"}, "sqlglot.optimizer.scope.build_scope": {"fullname": "sqlglot.optimizer.scope.build_scope", "modulename": "sqlglot.optimizer.scope", "qualname": "build_scope", "kind": "function", "doc": "

Build a scope tree.

\n\n
Arguments:
\n\n
    \n
  • expression (exp.Expression): expression to build the scope tree for
  • \n
\n\n
Returns:
\n\n
\n

Scope: root scope

\n
\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> Optional[sqlglot.optimizer.scope.Scope]:", "funcdef": "def"}, "sqlglot.optimizer.scope.walk_in_scope": {"fullname": "sqlglot.optimizer.scope.walk_in_scope", "modulename": "sqlglot.optimizer.scope", "qualname": "walk_in_scope", "kind": "function", "doc": "

Returns a generator object which visits all nodes in the syntrax tree, stopping at\nnodes that start child scopes.

\n\n
Arguments:
\n\n
    \n
  • expression (exp.Expression):
  • \n
  • bfs (bool): if set to True the BFS traversal order will be applied,\notherwise the DFS traversal will be used instead.
  • \n
\n\n
Yields:
\n\n
\n

tuple[exp.Expression, Optional[exp.Expression], str]: node, parent, arg key

\n
\n", "signature": "(expression, bfs=True):", "funcdef": "def"}, "sqlglot.optimizer.simplify": {"fullname": "sqlglot.optimizer.simplify", "modulename": "sqlglot.optimizer.simplify", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.simplify.simplify": {"fullname": "sqlglot.optimizer.simplify.simplify", "modulename": "sqlglot.optimizer.simplify", "qualname": "simplify", "kind": "function", "doc": "

Rewrite sqlglot AST to simplify expressions.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one("TRUE AND TRUE")\n>>> simplify(expression).sql()\n'TRUE'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to simplify
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: simplified expression

\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.simplify.rewrite_between": {"fullname": "sqlglot.optimizer.simplify.rewrite_between", "modulename": "sqlglot.optimizer.simplify", "qualname": "rewrite_between", "kind": "function", "doc": "

Rewrite x between y and z to x >= y AND x <= z.

\n\n

This is done because comparison simplification is only done on lt/lte/gt/gte.

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.optimizer.simplify.simplify_not": {"fullname": "sqlglot.optimizer.simplify.simplify_not", "modulename": "sqlglot.optimizer.simplify", "qualname": "simplify_not", "kind": "function", "doc": "

Demorgan's Law\nNOT (x OR y) -> NOT x AND NOT y\nNOT (x AND y) -> NOT x OR NOT y

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.simplify.flatten": {"fullname": "sqlglot.optimizer.simplify.flatten", "modulename": "sqlglot.optimizer.simplify", "qualname": "flatten", "kind": "function", "doc": "

A AND (B AND C) -> A AND B AND C\nA OR (B OR C) -> A OR B OR C

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.simplify.simplify_connectors": {"fullname": "sqlglot.optimizer.simplify.simplify_connectors", "modulename": "sqlglot.optimizer.simplify", "qualname": "simplify_connectors", "kind": "function", "doc": "

\n", "signature": "(expression, root=True):", "funcdef": "def"}, "sqlglot.optimizer.simplify.remove_compliments": {"fullname": "sqlglot.optimizer.simplify.remove_compliments", "modulename": "sqlglot.optimizer.simplify", "qualname": "remove_compliments", "kind": "function", "doc": "

Removing compliments.

\n\n

A AND NOT A -> FALSE\nA OR NOT A -> TRUE

\n", "signature": "(expression, root=True):", "funcdef": "def"}, "sqlglot.optimizer.simplify.uniq_sort": {"fullname": "sqlglot.optimizer.simplify.uniq_sort", "modulename": "sqlglot.optimizer.simplify", "qualname": "uniq_sort", "kind": "function", "doc": "

Uniq and sort a connector.

\n\n

C AND A AND B AND B -> A AND B AND C

\n", "signature": "(expression, generate, root=True):", "funcdef": "def"}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"fullname": "sqlglot.optimizer.simplify.absorb_and_eliminate", "modulename": "sqlglot.optimizer.simplify", "qualname": "absorb_and_eliminate", "kind": "function", "doc": "

absorption:\n A AND (A OR B) -> A\n A OR (A AND B) -> A\n A AND (NOT A OR B) -> A AND B\n A OR (NOT A AND B) -> A OR B\nelimination:\n (A AND B) OR (A AND NOT B) -> A\n (A OR B) AND (A OR NOT B) -> A

\n", "signature": "(expression, root=True):", "funcdef": "def"}, "sqlglot.optimizer.simplify.simplify_literals": {"fullname": "sqlglot.optimizer.simplify.simplify_literals", "modulename": "sqlglot.optimizer.simplify", "qualname": "simplify_literals", "kind": "function", "doc": "

\n", "signature": "(expression, root=True):", "funcdef": "def"}, "sqlglot.optimizer.simplify.simplify_parens": {"fullname": "sqlglot.optimizer.simplify.simplify_parens", "modulename": "sqlglot.optimizer.simplify", "qualname": "simplify_parens", "kind": "function", "doc": "

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.simplify.remove_where_true": {"fullname": "sqlglot.optimizer.simplify.remove_where_true", "modulename": "sqlglot.optimizer.simplify", "qualname": "remove_where_true", "kind": "function", "doc": "

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.simplify.always_true": {"fullname": "sqlglot.optimizer.simplify.always_true", "modulename": "sqlglot.optimizer.simplify", "qualname": "always_true", "kind": "function", "doc": "

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.simplify.is_complement": {"fullname": "sqlglot.optimizer.simplify.is_complement", "modulename": "sqlglot.optimizer.simplify", "qualname": "is_complement", "kind": "function", "doc": "

\n", "signature": "(a, b):", "funcdef": "def"}, "sqlglot.optimizer.simplify.is_false": {"fullname": "sqlglot.optimizer.simplify.is_false", "modulename": "sqlglot.optimizer.simplify", "qualname": "is_false", "kind": "function", "doc": "

\n", "signature": "(a: sqlglot.expressions.Expression) -> bool:", "funcdef": "def"}, "sqlglot.optimizer.simplify.is_null": {"fullname": "sqlglot.optimizer.simplify.is_null", "modulename": "sqlglot.optimizer.simplify", "qualname": "is_null", "kind": "function", "doc": "

\n", "signature": "(a: sqlglot.expressions.Expression) -> bool:", "funcdef": "def"}, "sqlglot.optimizer.simplify.eval_boolean": {"fullname": "sqlglot.optimizer.simplify.eval_boolean", "modulename": "sqlglot.optimizer.simplify", "qualname": "eval_boolean", "kind": "function", "doc": "

\n", "signature": "(expression, a, b):", "funcdef": "def"}, "sqlglot.optimizer.simplify.extract_date": {"fullname": "sqlglot.optimizer.simplify.extract_date", "modulename": "sqlglot.optimizer.simplify", "qualname": "extract_date", "kind": "function", "doc": "

\n", "signature": "(cast):", "funcdef": "def"}, "sqlglot.optimizer.simplify.extract_interval": {"fullname": "sqlglot.optimizer.simplify.extract_interval", "modulename": "sqlglot.optimizer.simplify", "qualname": "extract_interval", "kind": "function", "doc": "

\n", "signature": "(interval):", "funcdef": "def"}, "sqlglot.optimizer.simplify.date_literal": {"fullname": "sqlglot.optimizer.simplify.date_literal", "modulename": "sqlglot.optimizer.simplify", "qualname": "date_literal", "kind": "function", "doc": "

\n", "signature": "(date):", "funcdef": "def"}, "sqlglot.optimizer.simplify.boolean_literal": {"fullname": "sqlglot.optimizer.simplify.boolean_literal", "modulename": "sqlglot.optimizer.simplify", "qualname": "boolean_literal", "kind": "function", "doc": "

\n", "signature": "(condition):", "funcdef": "def"}, "sqlglot.optimizer.unnest_subqueries": {"fullname": "sqlglot.optimizer.unnest_subqueries", "modulename": "sqlglot.optimizer.unnest_subqueries", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"fullname": "sqlglot.optimizer.unnest_subqueries.unnest_subqueries", "modulename": "sqlglot.optimizer.unnest_subqueries", "qualname": "unnest_subqueries", "kind": "function", "doc": "

Rewrite sqlglot AST to convert some predicates with subqueries into joins.

\n\n

Convert scalar subqueries into cross joins.\nConvert correlated or vectorized subqueries into a group by so it is not a many to many left join.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one("SELECT * FROM x AS x WHERE (SELECT y.a AS a FROM y AS y WHERE x.a = y.a) = 1 ")\n>>> unnest_subqueries(expression).sql()\n'SELECT * FROM x AS x LEFT JOIN (SELECT y.a AS a FROM y AS y WHERE TRUE GROUP BY y.a) AS _u_0 ON x.a = _u_0.a WHERE _u_0.a = 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to unnest
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: unnested expression

\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.unnest_subqueries.unnest": {"fullname": "sqlglot.optimizer.unnest_subqueries.unnest", "modulename": "sqlglot.optimizer.unnest_subqueries", "qualname": "unnest", "kind": "function", "doc": "

\n", "signature": "(select, parent_select, next_alias_name):", "funcdef": "def"}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"fullname": "sqlglot.optimizer.unnest_subqueries.decorrelate", "modulename": "sqlglot.optimizer.unnest_subqueries", "qualname": "decorrelate", "kind": "function", "doc": "

\n", "signature": "(select, parent_select, external_columns, next_alias_name):", "funcdef": "def"}, "sqlglot.parser": {"fullname": "sqlglot.parser", "modulename": "sqlglot.parser", "kind": "module", "doc": "

\n"}, "sqlglot.parser.parse_var_map": {"fullname": "sqlglot.parser.parse_var_map", "modulename": "sqlglot.parser", "qualname": "parse_var_map", "kind": "function", "doc": "

\n", "signature": "(args: List) -> sqlglot.expressions.StarMap | sqlglot.expressions.VarMap:", "funcdef": "def"}, "sqlglot.parser.parse_like": {"fullname": "sqlglot.parser.parse_like", "modulename": "sqlglot.parser", "qualname": "parse_like", "kind": "function", "doc": "

\n", "signature": "(args: List) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.parser.binary_range_parser": {"fullname": "sqlglot.parser.binary_range_parser", "modulename": "sqlglot.parser", "qualname": "binary_range_parser", "kind": "function", "doc": "

\n", "signature": "(\texpr_type: Type[sqlglot.expressions.Expression]) -> Callable[[sqlglot.parser.Parser, Optional[sqlglot.expressions.Expression]], Optional[sqlglot.expressions.Expression]]:", "funcdef": "def"}, "sqlglot.parser.Parser": {"fullname": "sqlglot.parser.Parser", "modulename": "sqlglot.parser", "qualname": "Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n"}, "sqlglot.parser.Parser.__init__": {"fullname": "sqlglot.parser.Parser.__init__", "modulename": "sqlglot.parser", "qualname": "Parser.__init__", "kind": "function", "doc": "

\n", "signature": "(\terror_level: Optional[sqlglot.errors.ErrorLevel] = None,\terror_message_context: int = 100,\tindex_offset: int = 0,\tunnest_column_only: bool = False,\talias_post_tablesample: bool = False,\tmax_errors: int = 3,\tnull_ordering: Optional[str] = None)"}, "sqlglot.parser.Parser.reset": {"fullname": "sqlglot.parser.Parser.reset", "modulename": "sqlglot.parser", "qualname": "Parser.reset", "kind": "function", "doc": "

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.parser.Parser.parse": {"fullname": "sqlglot.parser.Parser.parse", "modulename": "sqlglot.parser", "qualname": "Parser.parse", "kind": "function", "doc": "

Parses a list of tokens and returns a list of syntax trees, one tree\nper parsed SQL statement.

\n\n
Arguments:
\n\n
    \n
  • raw_tokens: the list of tokens.
  • \n
  • sql: the original SQL string, used to produce helpful debug messages.
  • \n
\n\n
Returns:
\n\n
\n

The list of syntax trees.

\n
\n", "signature": "(\tself,\traw_tokens: List[sqlglot.tokens.Token],\tsql: Optional[str] = None) -> List[Optional[sqlglot.expressions.Expression]]:", "funcdef": "def"}, "sqlglot.parser.Parser.parse_into": {"fullname": "sqlglot.parser.Parser.parse_into", "modulename": "sqlglot.parser", "qualname": "Parser.parse_into", "kind": "function", "doc": "

Parses a list of tokens into a given Expression type. If a collection of Expression\ntypes is given instead, this method will try to parse the token list into each one\nof them, stopping at the first for which the parsing succeeds.

\n\n
Arguments:
\n\n
    \n
  • expression_types: the expression type(s) to try and parse the token list into.
  • \n
  • raw_tokens: the list of tokens.
  • \n
  • sql: the original SQL string, used to produce helpful debug messages.
  • \n
\n\n
Returns:
\n\n
\n

The target Expression.

\n
\n", "signature": "(\tself,\texpression_types: Union[str, Type[sqlglot.expressions.Expression], Collection[Union[str, Type[sqlglot.expressions.Expression]]]],\traw_tokens: List[sqlglot.tokens.Token],\tsql: Optional[str] = None) -> List[Optional[sqlglot.expressions.Expression]]:", "funcdef": "def"}, "sqlglot.parser.Parser.check_errors": {"fullname": "sqlglot.parser.Parser.check_errors", "modulename": "sqlglot.parser", "qualname": "Parser.check_errors", "kind": "function", "doc": "

Logs or raises any found errors, depending on the chosen error level setting.

\n", "signature": "(self) -> None:", "funcdef": "def"}, "sqlglot.parser.Parser.raise_error": {"fullname": "sqlglot.parser.Parser.raise_error", "modulename": "sqlglot.parser", "qualname": "Parser.raise_error", "kind": "function", "doc": "

Appends an error in the list of recorded errors or raises it, depending on the chosen\nerror level setting.

\n", "signature": "(self, message: str, token: Optional[sqlglot.tokens.Token] = None) -> None:", "funcdef": "def"}, "sqlglot.parser.Parser.expression": {"fullname": "sqlglot.parser.Parser.expression", "modulename": "sqlglot.parser", "qualname": "Parser.expression", "kind": "function", "doc": "

Creates a new, validated Expression.

\n\n
Arguments:
\n\n
    \n
  • exp_class: the expression class to instantiate.
  • \n
  • comments: an optional list of comments to attach to the expression.
  • \n
  • kwargs: the arguments to set for the expression along with their respective values.
  • \n
\n\n
Returns:
\n\n
\n

The target expression.

\n
\n", "signature": "(\tself,\texp_class: Type[~E],\tcomments: Optional[List[str]] = None,\t**kwargs) -> ~E:", "funcdef": "def"}, "sqlglot.parser.Parser.validate_expression": {"fullname": "sqlglot.parser.Parser.validate_expression", "modulename": "sqlglot.parser", "qualname": "Parser.validate_expression", "kind": "function", "doc": "

Validates an already instantiated expression, making sure that all its mandatory arguments\nare set.

\n\n
Arguments:
\n\n
    \n
  • expression: the expression to validate.
  • \n
  • args: an optional list of items that was used to instantiate the expression, if it's a Func.
  • \n
\n", "signature": "(\tself,\texpression: sqlglot.expressions.Expression,\targs: Optional[List] = None) -> None:", "funcdef": "def"}, "sqlglot.planner": {"fullname": "sqlglot.planner", "modulename": "sqlglot.planner", "kind": "module", "doc": "

\n"}, "sqlglot.planner.Plan": {"fullname": "sqlglot.planner.Plan", "modulename": "sqlglot.planner", "qualname": "Plan", "kind": "class", "doc": "

\n"}, "sqlglot.planner.Plan.__init__": {"fullname": "sqlglot.planner.Plan.__init__", "modulename": "sqlglot.planner", "qualname": "Plan.__init__", "kind": "function", "doc": "

\n", "signature": "(expression: sqlglot.expressions.Expression)"}, "sqlglot.planner.Step": {"fullname": "sqlglot.planner.Step", "modulename": "sqlglot.planner", "qualname": "Step", "kind": "class", "doc": "

\n"}, "sqlglot.planner.Step.from_expression": {"fullname": "sqlglot.planner.Step.from_expression", "modulename": "sqlglot.planner", "qualname": "Step.from_expression", "kind": "function", "doc": "

Builds a DAG of Steps from a SQL expression so that it's easier to execute in an engine.\nNote: the expression's tables and subqueries must be aliased for this method to work. For\nexample, given the following expression:

\n\n

SELECT\n x.a,\n SUM(x.b)\nFROM x AS x\nJOIN y AS y\n ON x.a = y.a\nGROUP BY x.a

\n\n

the following DAG is produced (the expression IDs might differ per execution):

\n\n
    \n
  • Aggregate: x (4347984624)\nContext:\n Aggregations:\n - SUM(x.b)\n Group:\n - x.a\nProjections:\n
      \n
    • x.a
    • \n
    • \"x\".\"\"\nDependencies:\n
        \n
      • Join: x (4347985296)\nContext:\ny:\nOn: x.a = y.a\nProjections:\nDependencies:
      • \n
    • \n
    • Scan: x (4347983136)\nContext:\n Source: x AS x\nProjections:
    • \n
    • Scan: y (4343416624)\nContext:\n Source: y AS y\nProjections:
    • \n
  • \n
\n\n
Arguments:
\n\n
    \n
  • expression: the expression to build the DAG from.
  • \n
  • ctes: a dictionary that maps CTEs to their corresponding Step DAG by name.
  • \n
\n\n
Returns:
\n\n
\n

A Step DAG corresponding to expression.

\n
\n", "signature": "(\tcls,\texpression: sqlglot.expressions.Expression,\tctes: Optional[Dict[str, sqlglot.planner.Step]] = None) -> sqlglot.planner.Step:", "funcdef": "def"}, "sqlglot.planner.Step.add_dependency": {"fullname": "sqlglot.planner.Step.add_dependency", "modulename": "sqlglot.planner", "qualname": "Step.add_dependency", "kind": "function", "doc": "

\n", "signature": "(self, dependency: sqlglot.planner.Step) -> None:", "funcdef": "def"}, "sqlglot.planner.Step.to_s": {"fullname": "sqlglot.planner.Step.to_s", "modulename": "sqlglot.planner", "qualname": "Step.to_s", "kind": "function", "doc": "

\n", "signature": "(self, level: int = 0) -> str:", "funcdef": "def"}, "sqlglot.planner.Scan": {"fullname": "sqlglot.planner.Scan", "modulename": "sqlglot.planner", "qualname": "Scan", "kind": "class", "doc": "

\n", "bases": "Step"}, "sqlglot.planner.Scan.from_expression": {"fullname": "sqlglot.planner.Scan.from_expression", "modulename": "sqlglot.planner", "qualname": "Scan.from_expression", "kind": "function", "doc": "

Builds a DAG of Steps from a SQL expression so that it's easier to execute in an engine.\nNote: the expression's tables and subqueries must be aliased for this method to work. For\nexample, given the following expression:

\n\n

SELECT\n x.a,\n SUM(x.b)\nFROM x AS x\nJOIN y AS y\n ON x.a = y.a\nGROUP BY x.a

\n\n

the following DAG is produced (the expression IDs might differ per execution):

\n\n
    \n
  • Aggregate: x (4347984624)\nContext:\n Aggregations:\n - SUM(x.b)\n Group:\n - x.a\nProjections:\n
      \n
    • x.a
    • \n
    • \"x\".\"\"\nDependencies:\n
        \n
      • Join: x (4347985296)\nContext:\ny:\nOn: x.a = y.a\nProjections:\nDependencies:
      • \n
    • \n
    • Scan: x (4347983136)\nContext:\n Source: x AS x\nProjections:
    • \n
    • Scan: y (4343416624)\nContext:\n Source: y AS y\nProjections:
    • \n
  • \n
\n\n
Arguments:
\n\n
    \n
  • expression: the expression to build the DAG from.
  • \n
  • ctes: a dictionary that maps CTEs to their corresponding Step DAG by name.
  • \n
\n\n
Returns:
\n\n
\n

A Step DAG corresponding to expression.

\n
\n", "signature": "(\tcls,\texpression: sqlglot.expressions.Expression,\tctes: Optional[Dict[str, sqlglot.planner.Step]] = None) -> sqlglot.planner.Step:", "funcdef": "def"}, "sqlglot.planner.Join": {"fullname": "sqlglot.planner.Join", "modulename": "sqlglot.planner", "qualname": "Join", "kind": "class", "doc": "

\n", "bases": "Step"}, "sqlglot.planner.Join.from_joins": {"fullname": "sqlglot.planner.Join.from_joins", "modulename": "sqlglot.planner", "qualname": "Join.from_joins", "kind": "function", "doc": "

\n", "signature": "(\tcls,\tjoins: Iterable[sqlglot.expressions.Join],\tctes: Optional[Dict[str, sqlglot.planner.Step]] = None) -> sqlglot.planner.Step:", "funcdef": "def"}, "sqlglot.planner.Aggregate": {"fullname": "sqlglot.planner.Aggregate", "modulename": "sqlglot.planner", "qualname": "Aggregate", "kind": "class", "doc": "

\n", "bases": "Step"}, "sqlglot.planner.Sort": {"fullname": "sqlglot.planner.Sort", "modulename": "sqlglot.planner", "qualname": "Sort", "kind": "class", "doc": "

\n", "bases": "Step"}, "sqlglot.planner.SetOperation": {"fullname": "sqlglot.planner.SetOperation", "modulename": "sqlglot.planner", "qualname": "SetOperation", "kind": "class", "doc": "

\n", "bases": "Step"}, "sqlglot.planner.SetOperation.__init__": {"fullname": "sqlglot.planner.SetOperation.__init__", "modulename": "sqlglot.planner", "qualname": "SetOperation.__init__", "kind": "function", "doc": "

\n", "signature": "(\top: Type[sqlglot.expressions.Expression],\tleft: str | None,\tright: str | None,\tdistinct: bool = False)"}, "sqlglot.planner.SetOperation.from_expression": {"fullname": "sqlglot.planner.SetOperation.from_expression", "modulename": "sqlglot.planner", "qualname": "SetOperation.from_expression", "kind": "function", "doc": "

Builds a DAG of Steps from a SQL expression so that it's easier to execute in an engine.\nNote: the expression's tables and subqueries must be aliased for this method to work. For\nexample, given the following expression:

\n\n

SELECT\n x.a,\n SUM(x.b)\nFROM x AS x\nJOIN y AS y\n ON x.a = y.a\nGROUP BY x.a

\n\n

the following DAG is produced (the expression IDs might differ per execution):

\n\n
    \n
  • Aggregate: x (4347984624)\nContext:\n Aggregations:\n - SUM(x.b)\n Group:\n - x.a\nProjections:\n
      \n
    • x.a
    • \n
    • \"x\".\"\"\nDependencies:\n
        \n
      • Join: x (4347985296)\nContext:\ny:\nOn: x.a = y.a\nProjections:\nDependencies:
      • \n
    • \n
    • Scan: x (4347983136)\nContext:\n Source: x AS x\nProjections:
    • \n
    • Scan: y (4343416624)\nContext:\n Source: y AS y\nProjections:
    • \n
  • \n
\n\n
Arguments:
\n\n
    \n
  • expression: the expression to build the DAG from.
  • \n
  • ctes: a dictionary that maps CTEs to their corresponding Step DAG by name.
  • \n
\n\n
Returns:
\n\n
\n

A Step DAG corresponding to expression.

\n
\n", "signature": "(\tcls,\texpression: sqlglot.expressions.Expression,\tctes: Optional[Dict[str, sqlglot.planner.Step]] = None) -> sqlglot.planner.Step:", "funcdef": "def"}, "sqlglot.schema.Schema": {"fullname": "sqlglot.schema.Schema", "modulename": "sqlglot.schema", "qualname": "Schema", "kind": "class", "doc": "

Abstract base class for database schemas

\n", "bases": "abc.ABC"}, "sqlglot.schema.Schema.add_table": {"fullname": "sqlglot.schema.Schema.add_table", "modulename": "sqlglot.schema", "qualname": "Schema.add_table", "kind": "function", "doc": "

Register or update a table. Some implementing classes may require column information to also be provided.

\n\n
Arguments:
\n\n
    \n
  • table: the Table expression instance or string representing the table.
  • \n
  • column_mapping: a column mapping that describes the structure of the table.
  • \n
  • dialect: the SQL dialect that will be used to parse table if it's a string.
  • \n
\n", "signature": "(\tself,\ttable: sqlglot.expressions.Table | str,\tcolumn_mapping: Union[Dict, str, sqlglot.dataframe.sql.types.StructType, List, NoneType] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None) -> None:", "funcdef": "def"}, "sqlglot.schema.Schema.column_names": {"fullname": "sqlglot.schema.Schema.column_names", "modulename": "sqlglot.schema", "qualname": "Schema.column_names", "kind": "function", "doc": "

Get the column names for a table.

\n\n
Arguments:
\n\n
    \n
  • table: the Table expression instance.
  • \n
  • only_visible: whether to include invisible columns.
  • \n
  • dialect: the SQL dialect that will be used to parse table if it's a string.
  • \n
\n\n
Returns:
\n\n
\n

The list of column names.

\n
\n", "signature": "(\tself,\ttable: sqlglot.expressions.Table | str,\tonly_visible: bool = False,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None) -> List[str]:", "funcdef": "def"}, "sqlglot.schema.Schema.get_column_type": {"fullname": "sqlglot.schema.Schema.get_column_type", "modulename": "sqlglot.schema", "qualname": "Schema.get_column_type", "kind": "function", "doc": "

Get the sqlglot.exp.DataType type of a column in the schema.

\n\n
Arguments:
\n\n
    \n
  • table: the source table.
  • \n
  • column: the target column.
  • \n
  • dialect: the SQL dialect that will be used to parse table if it's a string.
  • \n
\n\n
Returns:
\n\n
\n

The resulting column type.

\n
\n", "signature": "(\tself,\ttable: sqlglot.expressions.Table | str,\tcolumn: sqlglot.expressions.Column,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None) -> sqlglot.expressions.DataType:", "funcdef": "def"}, "sqlglot.schema.Schema.supported_table_args": {"fullname": "sqlglot.schema.Schema.supported_table_args", "modulename": "sqlglot.schema", "qualname": "Schema.supported_table_args", "kind": "variable", "doc": "

Table arguments this schema support, e.g. (\"this\", \"db\", \"catalog\")

\n", "annotation": ": Tuple[str, ...]"}, "sqlglot.schema.Schema.empty": {"fullname": "sqlglot.schema.Schema.empty", "modulename": "sqlglot.schema", "qualname": "Schema.empty", "kind": "variable", "doc": "

Returns whether or not the schema is empty.

\n", "annotation": ": bool"}, "sqlglot.schema.AbstractMappingSchema": {"fullname": "sqlglot.schema.AbstractMappingSchema", "modulename": "sqlglot.schema", "qualname": "AbstractMappingSchema", "kind": "class", "doc": "

Abstract base class for generic types.

\n\n

A generic type is typically declared by inheriting from\nthis class parameterized with one or more type variables.\nFor example, a generic mapping type might be defined as::

\n\n

class Mapping(Generic[KT, VT]):\n def __getitem__(self, key: KT) -> VT:\n ...\n # Etc.

\n\n

This class can then be used as follows::

\n\n

def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT:\n try:\n return mapping[key]\n except KeyError:\n return default

\n", "bases": "typing.Generic[~T]"}, "sqlglot.schema.AbstractMappingSchema.__init__": {"fullname": "sqlglot.schema.AbstractMappingSchema.__init__", "modulename": "sqlglot.schema", "qualname": "AbstractMappingSchema.__init__", "kind": "function", "doc": "

\n", "signature": "(mapping: Optional[Dict] = None)"}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"fullname": "sqlglot.schema.AbstractMappingSchema.table_parts", "modulename": "sqlglot.schema", "qualname": "AbstractMappingSchema.table_parts", "kind": "function", "doc": "

\n", "signature": "(self, table: sqlglot.expressions.Table) -> List[str]:", "funcdef": "def"}, "sqlglot.schema.AbstractMappingSchema.find": {"fullname": "sqlglot.schema.AbstractMappingSchema.find", "modulename": "sqlglot.schema", "qualname": "AbstractMappingSchema.find", "kind": "function", "doc": "

\n", "signature": "(\tself,\ttable: sqlglot.expressions.Table,\ttrie: Optional[Dict] = None,\traise_on_missing: bool = True) -> Optional[~T]:", "funcdef": "def"}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"fullname": "sqlglot.schema.AbstractMappingSchema.nested_get", "modulename": "sqlglot.schema", "qualname": "AbstractMappingSchema.nested_get", "kind": "function", "doc": "

\n", "signature": "(\tself,\tparts: Sequence[str],\td: Optional[Dict] = None,\traise_on_missing=True) -> Optional[Any]:", "funcdef": "def"}, "sqlglot.schema.MappingSchema": {"fullname": "sqlglot.schema.MappingSchema", "modulename": "sqlglot.schema", "qualname": "MappingSchema", "kind": "class", "doc": "

Schema based on a nested mapping.

\n\n
Arguments:
\n\n
    \n
  • schema: Mapping in one of the following forms:\n
      \n
    1. {table: {col: type}}
    2. \n
    3. {db: {table: {col: type}}}
    4. \n
    5. {catalog: {db: {table: {col: type}}}}
    6. \n
    7. None - Tables will be added later
    8. \n
  • \n
  • visible: Optional mapping of which columns in the schema are visible. If not provided, all columns\nare assumed to be visible. The nesting should mirror that of the schema:\n
      \n
    1. {table: set(cols)}}
    2. \n
    3. {db: {table: set(cols)}}}
    4. \n
    5. {catalog: {db: {table: set(*cols)}}}}
    6. \n
  • \n
  • dialect: The dialect to be used for custom type mappings & parsing string arguments.
  • \n
  • normalize: Whether to normalize identifier names according to the given dialect or not.
  • \n
\n", "bases": "sqlglot.schema.AbstractMappingSchema[typing.Dict[str, str]], Schema"}, "sqlglot.schema.MappingSchema.__init__": {"fullname": "sqlglot.schema.MappingSchema.__init__", "modulename": "sqlglot.schema", "qualname": "MappingSchema.__init__", "kind": "function", "doc": "

\n", "signature": "(\tschema: Optional[Dict] = None,\tvisible: Optional[Dict] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tnormalize: bool = True)"}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"fullname": "sqlglot.schema.MappingSchema.from_mapping_schema", "modulename": "sqlglot.schema", "qualname": "MappingSchema.from_mapping_schema", "kind": "function", "doc": "

\n", "signature": "(\tcls,\tmapping_schema: sqlglot.schema.MappingSchema) -> sqlglot.schema.MappingSchema:", "funcdef": "def"}, "sqlglot.schema.MappingSchema.copy": {"fullname": "sqlglot.schema.MappingSchema.copy", "modulename": "sqlglot.schema", "qualname": "MappingSchema.copy", "kind": "function", "doc": "

\n", "signature": "(self, **kwargs) -> sqlglot.schema.MappingSchema:", "funcdef": "def"}, "sqlglot.schema.MappingSchema.add_table": {"fullname": "sqlglot.schema.MappingSchema.add_table", "modulename": "sqlglot.schema", "qualname": "MappingSchema.add_table", "kind": "function", "doc": "

Register or update a table. Updates are only performed if a new column mapping is provided.

\n\n
Arguments:
\n\n
    \n
  • table: the Table expression instance or string representing the table.
  • \n
  • column_mapping: a column mapping that describes the structure of the table.
  • \n
  • dialect: the SQL dialect that will be used to parse table if it's a string.
  • \n
\n", "signature": "(\tself,\ttable: sqlglot.expressions.Table | str,\tcolumn_mapping: Union[Dict, str, sqlglot.dataframe.sql.types.StructType, List, NoneType] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None) -> None:", "funcdef": "def"}, "sqlglot.schema.MappingSchema.column_names": {"fullname": "sqlglot.schema.MappingSchema.column_names", "modulename": "sqlglot.schema", "qualname": "MappingSchema.column_names", "kind": "function", "doc": "

Get the column names for a table.

\n\n
Arguments:
\n\n
    \n
  • table: the Table expression instance.
  • \n
  • only_visible: whether to include invisible columns.
  • \n
  • dialect: the SQL dialect that will be used to parse table if it's a string.
  • \n
\n\n
Returns:
\n\n
\n

The list of column names.

\n
\n", "signature": "(\tself,\ttable: sqlglot.expressions.Table | str,\tonly_visible: bool = False,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None) -> List[str]:", "funcdef": "def"}, "sqlglot.schema.MappingSchema.get_column_type": {"fullname": "sqlglot.schema.MappingSchema.get_column_type", "modulename": "sqlglot.schema", "qualname": "MappingSchema.get_column_type", "kind": "function", "doc": "

Get the sqlglot.exp.DataType type of a column in the schema.

\n\n
Arguments:
\n\n
    \n
  • table: the source table.
  • \n
  • column: the target column.
  • \n
  • dialect: the SQL dialect that will be used to parse table if it's a string.
  • \n
\n\n
Returns:
\n\n
\n

The resulting column type.

\n
\n", "signature": "(\tself,\ttable: sqlglot.expressions.Table | str,\tcolumn: sqlglot.expressions.Column,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None) -> sqlglot.expressions.DataType:", "funcdef": "def"}, "sqlglot.schema.ensure_schema": {"fullname": "sqlglot.schema.ensure_schema", "modulename": "sqlglot.schema", "qualname": "ensure_schema", "kind": "function", "doc": "

\n", "signature": "(\tschema: Union[sqlglot.schema.Schema, Dict, NoneType],\t**kwargs: Any) -> sqlglot.schema.Schema:", "funcdef": "def"}, "sqlglot.schema.ensure_column_mapping": {"fullname": "sqlglot.schema.ensure_column_mapping", "modulename": "sqlglot.schema", "qualname": "ensure_column_mapping", "kind": "function", "doc": "

\n", "signature": "(\tmapping: Union[Dict, str, sqlglot.dataframe.sql.types.StructType, List, NoneType]) -> Dict:", "funcdef": "def"}, "sqlglot.schema.flatten_schema": {"fullname": "sqlglot.schema.flatten_schema", "modulename": "sqlglot.schema", "qualname": "flatten_schema", "kind": "function", "doc": "

\n", "signature": "(\tschema: Dict,\tdepth: int,\tkeys: Optional[List[str]] = None) -> List[List[str]]:", "funcdef": "def"}, "sqlglot.schema.nested_get": {"fullname": "sqlglot.schema.nested_get", "modulename": "sqlglot.schema", "qualname": "nested_get", "kind": "function", "doc": "

Get a value for a nested dictionary.

\n\n
Arguments:
\n\n
    \n
  • d: the dictionary to search.
  • \n
  • *path: tuples of (name, key), where:\nkey is the key in the dictionary to get.\nname is a string to use in the error if key isn't found.
  • \n
\n\n
Returns:
\n\n
\n

The value or None if it doesn't exist.

\n
\n", "signature": "(\td: Dict,\t*path: Tuple[str, str],\traise_on_missing: bool = True) -> Optional[Any]:", "funcdef": "def"}, "sqlglot.schema.nested_set": {"fullname": "sqlglot.schema.nested_set", "modulename": "sqlglot.schema", "qualname": "nested_set", "kind": "function", "doc": "

In-place set a value for a nested dictionary

\n\n
Example:
\n\n
\n
\n
>>> nested_set({}, ["top_key", "second_key"], "value")\n{'top_key': {'second_key': 'value'}}\n
\n
\n \n
\n
>>> nested_set({"top_key": {"third_key": "third_value"}}, ["top_key", "second_key"], "value")\n{'top_key': {'third_key': 'third_value', 'second_key': 'value'}}\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • d: dictionary to update.
  • \n
  • keys: the keys that makeup the path to value.
  • \n
  • value: the value to set in the dictionary for the given key path.
  • \n
\n\n
Returns:
\n\n
\n

The (possibly) updated dictionary.

\n
\n", "signature": "(d: Dict, keys: Sequence[str], value: Any) -> Dict:", "funcdef": "def"}, "sqlglot.serde": {"fullname": "sqlglot.serde", "modulename": "sqlglot.serde", "kind": "module", "doc": "

\n"}, "sqlglot.serde.dump": {"fullname": "sqlglot.serde.dump", "modulename": "sqlglot.serde", "qualname": "dump", "kind": "function", "doc": "

Recursively dump an AST into a JSON-serializable dict.

\n", "signature": "(\tnode: Union[List[ForwardRef('Node')], sqlglot.expressions.DataType.Type, sqlglot.expressions.Expression, dict, list, str, float, int, bool]) -> Union[dict, list, str, float, int, bool]:", "funcdef": "def"}, "sqlglot.serde.load": {"fullname": "sqlglot.serde.load", "modulename": "sqlglot.serde", "qualname": "load", "kind": "function", "doc": "

Recursively load a dict (as returned by dump) into an AST.

\n", "signature": "(\tobj: Union[dict, list, str, float, int, bool]) -> Union[List[ForwardRef('Node')], sqlglot.expressions.DataType.Type, sqlglot.expressions.Expression, dict, list, str, float, int, bool]:", "funcdef": "def"}, "sqlglot.time": {"fullname": "sqlglot.time", "modulename": "sqlglot.time", "kind": "module", "doc": "

\n"}, "sqlglot.time.format_time": {"fullname": "sqlglot.time.format_time", "modulename": "sqlglot.time", "qualname": "format_time", "kind": "function", "doc": "

Converts a time string given a mapping.

\n\n
Examples:
\n\n
\n
\n
>>> format_time("%Y", {"%Y": "YYYY"})\n'YYYY'\n
\n
\n \n

Args:\n mapping: dictionary of time format to target time format.\n trie: optional trie, can be passed in for performance.

\n \n

Returns:\n The converted time string.

\n
\n", "signature": "(\tstring: str,\tmapping: Dict[str, str],\ttrie: Optional[Dict] = None) -> Optional[str]:", "funcdef": "def"}, "sqlglot.tokens": {"fullname": "sqlglot.tokens", "modulename": "sqlglot.tokens", "kind": "module", "doc": "

\n"}, "sqlglot.tokens.TokenType": {"fullname": "sqlglot.tokens.TokenType", "modulename": "sqlglot.tokens", "qualname": "TokenType", "kind": "class", "doc": "

An enumeration.

\n", "bases": "sqlglot.helper.AutoName"}, "sqlglot.tokens.TokenType.L_PAREN": {"fullname": "sqlglot.tokens.TokenType.L_PAREN", "modulename": "sqlglot.tokens", "qualname": "TokenType.L_PAREN", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.L_PAREN: 'L_PAREN'>"}, "sqlglot.tokens.TokenType.R_PAREN": {"fullname": "sqlglot.tokens.TokenType.R_PAREN", "modulename": "sqlglot.tokens", "qualname": "TokenType.R_PAREN", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.R_PAREN: 'R_PAREN'>"}, "sqlglot.tokens.TokenType.L_BRACKET": {"fullname": "sqlglot.tokens.TokenType.L_BRACKET", "modulename": "sqlglot.tokens", "qualname": "TokenType.L_BRACKET", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.L_BRACKET: 'L_BRACKET'>"}, "sqlglot.tokens.TokenType.R_BRACKET": {"fullname": "sqlglot.tokens.TokenType.R_BRACKET", "modulename": "sqlglot.tokens", "qualname": "TokenType.R_BRACKET", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.R_BRACKET: 'R_BRACKET'>"}, "sqlglot.tokens.TokenType.L_BRACE": {"fullname": "sqlglot.tokens.TokenType.L_BRACE", "modulename": "sqlglot.tokens", "qualname": "TokenType.L_BRACE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.L_BRACE: 'L_BRACE'>"}, "sqlglot.tokens.TokenType.R_BRACE": {"fullname": "sqlglot.tokens.TokenType.R_BRACE", "modulename": "sqlglot.tokens", "qualname": "TokenType.R_BRACE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.R_BRACE: 'R_BRACE'>"}, "sqlglot.tokens.TokenType.COMMA": {"fullname": "sqlglot.tokens.TokenType.COMMA", "modulename": "sqlglot.tokens", "qualname": "TokenType.COMMA", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.COMMA: 'COMMA'>"}, "sqlglot.tokens.TokenType.DOT": {"fullname": "sqlglot.tokens.TokenType.DOT", "modulename": "sqlglot.tokens", "qualname": "TokenType.DOT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DOT: 'DOT'>"}, "sqlglot.tokens.TokenType.DASH": {"fullname": "sqlglot.tokens.TokenType.DASH", "modulename": "sqlglot.tokens", "qualname": "TokenType.DASH", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DASH: 'DASH'>"}, "sqlglot.tokens.TokenType.PLUS": {"fullname": "sqlglot.tokens.TokenType.PLUS", "modulename": "sqlglot.tokens", "qualname": "TokenType.PLUS", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PLUS: 'PLUS'>"}, "sqlglot.tokens.TokenType.COLON": {"fullname": "sqlglot.tokens.TokenType.COLON", "modulename": "sqlglot.tokens", "qualname": "TokenType.COLON", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.COLON: 'COLON'>"}, "sqlglot.tokens.TokenType.DCOLON": {"fullname": "sqlglot.tokens.TokenType.DCOLON", "modulename": "sqlglot.tokens", "qualname": "TokenType.DCOLON", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DCOLON: 'DCOLON'>"}, "sqlglot.tokens.TokenType.SEMICOLON": {"fullname": "sqlglot.tokens.TokenType.SEMICOLON", "modulename": "sqlglot.tokens", "qualname": "TokenType.SEMICOLON", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SEMICOLON: 'SEMICOLON'>"}, "sqlglot.tokens.TokenType.STAR": {"fullname": "sqlglot.tokens.TokenType.STAR", "modulename": "sqlglot.tokens", "qualname": "TokenType.STAR", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.STAR: 'STAR'>"}, "sqlglot.tokens.TokenType.BACKSLASH": {"fullname": "sqlglot.tokens.TokenType.BACKSLASH", "modulename": "sqlglot.tokens", "qualname": "TokenType.BACKSLASH", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BACKSLASH: 'BACKSLASH'>"}, "sqlglot.tokens.TokenType.SLASH": {"fullname": "sqlglot.tokens.TokenType.SLASH", "modulename": "sqlglot.tokens", "qualname": "TokenType.SLASH", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SLASH: 'SLASH'>"}, "sqlglot.tokens.TokenType.LT": {"fullname": "sqlglot.tokens.TokenType.LT", "modulename": "sqlglot.tokens", "qualname": "TokenType.LT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LT: 'LT'>"}, "sqlglot.tokens.TokenType.LTE": {"fullname": "sqlglot.tokens.TokenType.LTE", "modulename": "sqlglot.tokens", "qualname": "TokenType.LTE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LTE: 'LTE'>"}, "sqlglot.tokens.TokenType.GT": {"fullname": "sqlglot.tokens.TokenType.GT", "modulename": "sqlglot.tokens", "qualname": "TokenType.GT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.GT: 'GT'>"}, "sqlglot.tokens.TokenType.GTE": {"fullname": "sqlglot.tokens.TokenType.GTE", "modulename": "sqlglot.tokens", "qualname": "TokenType.GTE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.GTE: 'GTE'>"}, "sqlglot.tokens.TokenType.NOT": {"fullname": "sqlglot.tokens.TokenType.NOT", "modulename": "sqlglot.tokens", "qualname": "TokenType.NOT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NOT: 'NOT'>"}, "sqlglot.tokens.TokenType.EQ": {"fullname": "sqlglot.tokens.TokenType.EQ", "modulename": "sqlglot.tokens", "qualname": "TokenType.EQ", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.EQ: 'EQ'>"}, "sqlglot.tokens.TokenType.NEQ": {"fullname": "sqlglot.tokens.TokenType.NEQ", "modulename": "sqlglot.tokens", "qualname": "TokenType.NEQ", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NEQ: 'NEQ'>"}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"fullname": "sqlglot.tokens.TokenType.NULLSAFE_EQ", "modulename": "sqlglot.tokens", "qualname": "TokenType.NULLSAFE_EQ", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>"}, "sqlglot.tokens.TokenType.AND": {"fullname": "sqlglot.tokens.TokenType.AND", "modulename": "sqlglot.tokens", "qualname": "TokenType.AND", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.AND: 'AND'>"}, "sqlglot.tokens.TokenType.OR": {"fullname": "sqlglot.tokens.TokenType.OR", "modulename": "sqlglot.tokens", "qualname": "TokenType.OR", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.OR: 'OR'>"}, "sqlglot.tokens.TokenType.AMP": {"fullname": "sqlglot.tokens.TokenType.AMP", "modulename": "sqlglot.tokens", "qualname": "TokenType.AMP", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.AMP: 'AMP'>"}, "sqlglot.tokens.TokenType.DPIPE": {"fullname": "sqlglot.tokens.TokenType.DPIPE", "modulename": "sqlglot.tokens", "qualname": "TokenType.DPIPE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DPIPE: 'DPIPE'>"}, "sqlglot.tokens.TokenType.PIPE": {"fullname": "sqlglot.tokens.TokenType.PIPE", "modulename": "sqlglot.tokens", "qualname": "TokenType.PIPE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PIPE: 'PIPE'>"}, "sqlglot.tokens.TokenType.CARET": {"fullname": "sqlglot.tokens.TokenType.CARET", "modulename": "sqlglot.tokens", "qualname": "TokenType.CARET", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CARET: 'CARET'>"}, "sqlglot.tokens.TokenType.TILDA": {"fullname": "sqlglot.tokens.TokenType.TILDA", "modulename": "sqlglot.tokens", "qualname": "TokenType.TILDA", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TILDA: 'TILDA'>"}, "sqlglot.tokens.TokenType.ARROW": {"fullname": "sqlglot.tokens.TokenType.ARROW", "modulename": "sqlglot.tokens", "qualname": "TokenType.ARROW", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ARROW: 'ARROW'>"}, "sqlglot.tokens.TokenType.DARROW": {"fullname": "sqlglot.tokens.TokenType.DARROW", "modulename": "sqlglot.tokens", "qualname": "TokenType.DARROW", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DARROW: 'DARROW'>"}, "sqlglot.tokens.TokenType.FARROW": {"fullname": "sqlglot.tokens.TokenType.FARROW", "modulename": "sqlglot.tokens", "qualname": "TokenType.FARROW", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FARROW: 'FARROW'>"}, "sqlglot.tokens.TokenType.HASH": {"fullname": "sqlglot.tokens.TokenType.HASH", "modulename": "sqlglot.tokens", "qualname": "TokenType.HASH", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.HASH: 'HASH'>"}, "sqlglot.tokens.TokenType.HASH_ARROW": {"fullname": "sqlglot.tokens.TokenType.HASH_ARROW", "modulename": "sqlglot.tokens", "qualname": "TokenType.HASH_ARROW", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.HASH_ARROW: 'HASH_ARROW'>"}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"fullname": "sqlglot.tokens.TokenType.DHASH_ARROW", "modulename": "sqlglot.tokens", "qualname": "TokenType.DHASH_ARROW", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DHASH_ARROW: 'DHASH_ARROW'>"}, "sqlglot.tokens.TokenType.LR_ARROW": {"fullname": "sqlglot.tokens.TokenType.LR_ARROW", "modulename": "sqlglot.tokens", "qualname": "TokenType.LR_ARROW", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LR_ARROW: 'LR_ARROW'>"}, "sqlglot.tokens.TokenType.LT_AT": {"fullname": "sqlglot.tokens.TokenType.LT_AT", "modulename": "sqlglot.tokens", "qualname": "TokenType.LT_AT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LT_AT: 'LT_AT'>"}, "sqlglot.tokens.TokenType.AT_GT": {"fullname": "sqlglot.tokens.TokenType.AT_GT", "modulename": "sqlglot.tokens", "qualname": "TokenType.AT_GT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.AT_GT: 'AT_GT'>"}, "sqlglot.tokens.TokenType.DOLLAR": {"fullname": "sqlglot.tokens.TokenType.DOLLAR", "modulename": "sqlglot.tokens", "qualname": "TokenType.DOLLAR", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DOLLAR: 'DOLLAR'>"}, "sqlglot.tokens.TokenType.PARAMETER": {"fullname": "sqlglot.tokens.TokenType.PARAMETER", "modulename": "sqlglot.tokens", "qualname": "TokenType.PARAMETER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PARAMETER: 'PARAMETER'>"}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"fullname": "sqlglot.tokens.TokenType.SESSION_PARAMETER", "modulename": "sqlglot.tokens", "qualname": "TokenType.SESSION_PARAMETER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SESSION_PARAMETER: 'SESSION_PARAMETER'>"}, "sqlglot.tokens.TokenType.DAMP": {"fullname": "sqlglot.tokens.TokenType.DAMP", "modulename": "sqlglot.tokens", "qualname": "TokenType.DAMP", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DAMP: 'DAMP'>"}, "sqlglot.tokens.TokenType.BLOCK_START": {"fullname": "sqlglot.tokens.TokenType.BLOCK_START", "modulename": "sqlglot.tokens", "qualname": "TokenType.BLOCK_START", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BLOCK_START: 'BLOCK_START'>"}, "sqlglot.tokens.TokenType.BLOCK_END": {"fullname": "sqlglot.tokens.TokenType.BLOCK_END", "modulename": "sqlglot.tokens", "qualname": "TokenType.BLOCK_END", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BLOCK_END: 'BLOCK_END'>"}, "sqlglot.tokens.TokenType.SPACE": {"fullname": "sqlglot.tokens.TokenType.SPACE", "modulename": "sqlglot.tokens", "qualname": "TokenType.SPACE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SPACE: 'SPACE'>"}, "sqlglot.tokens.TokenType.BREAK": {"fullname": "sqlglot.tokens.TokenType.BREAK", "modulename": "sqlglot.tokens", "qualname": "TokenType.BREAK", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BREAK: 'BREAK'>"}, "sqlglot.tokens.TokenType.STRING": {"fullname": "sqlglot.tokens.TokenType.STRING", "modulename": "sqlglot.tokens", "qualname": "TokenType.STRING", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.STRING: 'STRING'>"}, "sqlglot.tokens.TokenType.NUMBER": {"fullname": "sqlglot.tokens.TokenType.NUMBER", "modulename": "sqlglot.tokens", "qualname": "TokenType.NUMBER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NUMBER: 'NUMBER'>"}, "sqlglot.tokens.TokenType.IDENTIFIER": {"fullname": "sqlglot.tokens.TokenType.IDENTIFIER", "modulename": "sqlglot.tokens", "qualname": "TokenType.IDENTIFIER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.IDENTIFIER: 'IDENTIFIER'>"}, "sqlglot.tokens.TokenType.DATABASE": {"fullname": "sqlglot.tokens.TokenType.DATABASE", "modulename": "sqlglot.tokens", "qualname": "TokenType.DATABASE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DATABASE: 'DATABASE'>"}, "sqlglot.tokens.TokenType.COLUMN": {"fullname": "sqlglot.tokens.TokenType.COLUMN", "modulename": "sqlglot.tokens", "qualname": "TokenType.COLUMN", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.COLUMN: 'COLUMN'>"}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"fullname": "sqlglot.tokens.TokenType.COLUMN_DEF", "modulename": "sqlglot.tokens", "qualname": "TokenType.COLUMN_DEF", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.COLUMN_DEF: 'COLUMN_DEF'>"}, "sqlglot.tokens.TokenType.SCHEMA": {"fullname": "sqlglot.tokens.TokenType.SCHEMA", "modulename": "sqlglot.tokens", "qualname": "TokenType.SCHEMA", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SCHEMA: 'SCHEMA'>"}, "sqlglot.tokens.TokenType.TABLE": {"fullname": "sqlglot.tokens.TokenType.TABLE", "modulename": "sqlglot.tokens", "qualname": "TokenType.TABLE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TABLE: 'TABLE'>"}, "sqlglot.tokens.TokenType.VAR": {"fullname": "sqlglot.tokens.TokenType.VAR", "modulename": "sqlglot.tokens", "qualname": "TokenType.VAR", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.VAR: 'VAR'>"}, "sqlglot.tokens.TokenType.BIT_STRING": {"fullname": "sqlglot.tokens.TokenType.BIT_STRING", "modulename": "sqlglot.tokens", "qualname": "TokenType.BIT_STRING", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BIT_STRING: 'BIT_STRING'>"}, "sqlglot.tokens.TokenType.HEX_STRING": {"fullname": "sqlglot.tokens.TokenType.HEX_STRING", "modulename": "sqlglot.tokens", "qualname": "TokenType.HEX_STRING", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.HEX_STRING: 'HEX_STRING'>"}, "sqlglot.tokens.TokenType.BYTE_STRING": {"fullname": "sqlglot.tokens.TokenType.BYTE_STRING", "modulename": "sqlglot.tokens", "qualname": "TokenType.BYTE_STRING", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BYTE_STRING: 'BYTE_STRING'>"}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"fullname": "sqlglot.tokens.TokenType.NATIONAL_STRING", "modulename": "sqlglot.tokens", "qualname": "TokenType.NATIONAL_STRING", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NATIONAL_STRING: 'NATIONAL_STRING'>"}, "sqlglot.tokens.TokenType.RAW_STRING": {"fullname": "sqlglot.tokens.TokenType.RAW_STRING", "modulename": "sqlglot.tokens", "qualname": "TokenType.RAW_STRING", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.RAW_STRING: 'RAW_STRING'>"}, "sqlglot.tokens.TokenType.BIT": {"fullname": "sqlglot.tokens.TokenType.BIT", "modulename": "sqlglot.tokens", "qualname": "TokenType.BIT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BIT: 'BIT'>"}, "sqlglot.tokens.TokenType.BOOLEAN": {"fullname": "sqlglot.tokens.TokenType.BOOLEAN", "modulename": "sqlglot.tokens", "qualname": "TokenType.BOOLEAN", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BOOLEAN: 'BOOLEAN'>"}, "sqlglot.tokens.TokenType.TINYINT": {"fullname": "sqlglot.tokens.TokenType.TINYINT", "modulename": "sqlglot.tokens", "qualname": "TokenType.TINYINT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TINYINT: 'TINYINT'>"}, "sqlglot.tokens.TokenType.UTINYINT": {"fullname": "sqlglot.tokens.TokenType.UTINYINT", "modulename": "sqlglot.tokens", "qualname": "TokenType.UTINYINT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UTINYINT: 'UTINYINT'>"}, "sqlglot.tokens.TokenType.SMALLINT": {"fullname": "sqlglot.tokens.TokenType.SMALLINT", "modulename": "sqlglot.tokens", "qualname": "TokenType.SMALLINT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SMALLINT: 'SMALLINT'>"}, "sqlglot.tokens.TokenType.USMALLINT": {"fullname": "sqlglot.tokens.TokenType.USMALLINT", "modulename": "sqlglot.tokens", "qualname": "TokenType.USMALLINT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.USMALLINT: 'USMALLINT'>"}, "sqlglot.tokens.TokenType.INT": {"fullname": "sqlglot.tokens.TokenType.INT", "modulename": "sqlglot.tokens", "qualname": "TokenType.INT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INT: 'INT'>"}, "sqlglot.tokens.TokenType.UINT": {"fullname": "sqlglot.tokens.TokenType.UINT", "modulename": "sqlglot.tokens", "qualname": "TokenType.UINT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UINT: 'UINT'>"}, "sqlglot.tokens.TokenType.BIGINT": {"fullname": "sqlglot.tokens.TokenType.BIGINT", "modulename": "sqlglot.tokens", "qualname": "TokenType.BIGINT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BIGINT: 'BIGINT'>"}, "sqlglot.tokens.TokenType.UBIGINT": {"fullname": "sqlglot.tokens.TokenType.UBIGINT", "modulename": "sqlglot.tokens", "qualname": "TokenType.UBIGINT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UBIGINT: 'UBIGINT'>"}, "sqlglot.tokens.TokenType.INT128": {"fullname": "sqlglot.tokens.TokenType.INT128", "modulename": "sqlglot.tokens", "qualname": "TokenType.INT128", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INT128: 'INT128'>"}, "sqlglot.tokens.TokenType.UINT128": {"fullname": "sqlglot.tokens.TokenType.UINT128", "modulename": "sqlglot.tokens", "qualname": "TokenType.UINT128", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UINT128: 'UINT128'>"}, "sqlglot.tokens.TokenType.INT256": {"fullname": "sqlglot.tokens.TokenType.INT256", "modulename": "sqlglot.tokens", "qualname": "TokenType.INT256", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INT256: 'INT256'>"}, "sqlglot.tokens.TokenType.UINT256": {"fullname": "sqlglot.tokens.TokenType.UINT256", "modulename": "sqlglot.tokens", "qualname": "TokenType.UINT256", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UINT256: 'UINT256'>"}, "sqlglot.tokens.TokenType.FLOAT": {"fullname": "sqlglot.tokens.TokenType.FLOAT", "modulename": "sqlglot.tokens", "qualname": "TokenType.FLOAT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FLOAT: 'FLOAT'>"}, "sqlglot.tokens.TokenType.DOUBLE": {"fullname": "sqlglot.tokens.TokenType.DOUBLE", "modulename": "sqlglot.tokens", "qualname": "TokenType.DOUBLE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DOUBLE: 'DOUBLE'>"}, "sqlglot.tokens.TokenType.DECIMAL": {"fullname": "sqlglot.tokens.TokenType.DECIMAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.DECIMAL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DECIMAL: 'DECIMAL'>"}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"fullname": "sqlglot.tokens.TokenType.BIGDECIMAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.BIGDECIMAL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BIGDECIMAL: 'BIGDECIMAL'>"}, "sqlglot.tokens.TokenType.CHAR": {"fullname": "sqlglot.tokens.TokenType.CHAR", "modulename": "sqlglot.tokens", "qualname": "TokenType.CHAR", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CHAR: 'CHAR'>"}, "sqlglot.tokens.TokenType.NCHAR": {"fullname": "sqlglot.tokens.TokenType.NCHAR", "modulename": "sqlglot.tokens", "qualname": "TokenType.NCHAR", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NCHAR: 'NCHAR'>"}, "sqlglot.tokens.TokenType.VARCHAR": {"fullname": "sqlglot.tokens.TokenType.VARCHAR", "modulename": "sqlglot.tokens", "qualname": "TokenType.VARCHAR", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.VARCHAR: 'VARCHAR'>"}, "sqlglot.tokens.TokenType.NVARCHAR": {"fullname": "sqlglot.tokens.TokenType.NVARCHAR", "modulename": "sqlglot.tokens", "qualname": "TokenType.NVARCHAR", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NVARCHAR: 'NVARCHAR'>"}, "sqlglot.tokens.TokenType.TEXT": {"fullname": "sqlglot.tokens.TokenType.TEXT", "modulename": "sqlglot.tokens", "qualname": "TokenType.TEXT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TEXT: 'TEXT'>"}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"fullname": "sqlglot.tokens.TokenType.MEDIUMTEXT", "modulename": "sqlglot.tokens", "qualname": "TokenType.MEDIUMTEXT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>"}, "sqlglot.tokens.TokenType.LONGTEXT": {"fullname": "sqlglot.tokens.TokenType.LONGTEXT", "modulename": "sqlglot.tokens", "qualname": "TokenType.LONGTEXT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LONGTEXT: 'LONGTEXT'>"}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"fullname": "sqlglot.tokens.TokenType.MEDIUMBLOB", "modulename": "sqlglot.tokens", "qualname": "TokenType.MEDIUMBLOB", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>"}, "sqlglot.tokens.TokenType.LONGBLOB": {"fullname": "sqlglot.tokens.TokenType.LONGBLOB", "modulename": "sqlglot.tokens", "qualname": "TokenType.LONGBLOB", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LONGBLOB: 'LONGBLOB'>"}, "sqlglot.tokens.TokenType.BINARY": {"fullname": "sqlglot.tokens.TokenType.BINARY", "modulename": "sqlglot.tokens", "qualname": "TokenType.BINARY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BINARY: 'BINARY'>"}, "sqlglot.tokens.TokenType.VARBINARY": {"fullname": "sqlglot.tokens.TokenType.VARBINARY", "modulename": "sqlglot.tokens", "qualname": "TokenType.VARBINARY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.VARBINARY: 'VARBINARY'>"}, "sqlglot.tokens.TokenType.JSON": {"fullname": "sqlglot.tokens.TokenType.JSON", "modulename": "sqlglot.tokens", "qualname": "TokenType.JSON", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.JSON: 'JSON'>"}, "sqlglot.tokens.TokenType.JSONB": {"fullname": "sqlglot.tokens.TokenType.JSONB", "modulename": "sqlglot.tokens", "qualname": "TokenType.JSONB", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.JSONB: 'JSONB'>"}, "sqlglot.tokens.TokenType.TIME": {"fullname": "sqlglot.tokens.TokenType.TIME", "modulename": "sqlglot.tokens", "qualname": "TokenType.TIME", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TIME: 'TIME'>"}, "sqlglot.tokens.TokenType.TIMESTAMP": {"fullname": "sqlglot.tokens.TokenType.TIMESTAMP", "modulename": "sqlglot.tokens", "qualname": "TokenType.TIMESTAMP", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TIMESTAMP: 'TIMESTAMP'>"}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"fullname": "sqlglot.tokens.TokenType.TIMESTAMPTZ", "modulename": "sqlglot.tokens", "qualname": "TokenType.TIMESTAMPTZ", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>"}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"fullname": "sqlglot.tokens.TokenType.TIMESTAMPLTZ", "modulename": "sqlglot.tokens", "qualname": "TokenType.TIMESTAMPLTZ", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>"}, "sqlglot.tokens.TokenType.DATETIME": {"fullname": "sqlglot.tokens.TokenType.DATETIME", "modulename": "sqlglot.tokens", "qualname": "TokenType.DATETIME", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DATETIME: 'DATETIME'>"}, "sqlglot.tokens.TokenType.DATETIME64": {"fullname": "sqlglot.tokens.TokenType.DATETIME64", "modulename": "sqlglot.tokens", "qualname": "TokenType.DATETIME64", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DATETIME64: 'DATETIME64'>"}, "sqlglot.tokens.TokenType.DATE": {"fullname": "sqlglot.tokens.TokenType.DATE", "modulename": "sqlglot.tokens", "qualname": "TokenType.DATE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DATE: 'DATE'>"}, "sqlglot.tokens.TokenType.INT4RANGE": {"fullname": "sqlglot.tokens.TokenType.INT4RANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.INT4RANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INT4RANGE: 'INT4RANGE'>"}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"fullname": "sqlglot.tokens.TokenType.INT4MULTIRANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.INT4MULTIRANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>"}, "sqlglot.tokens.TokenType.INT8RANGE": {"fullname": "sqlglot.tokens.TokenType.INT8RANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.INT8RANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INT8RANGE: 'INT8RANGE'>"}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"fullname": "sqlglot.tokens.TokenType.INT8MULTIRANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.INT8MULTIRANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>"}, "sqlglot.tokens.TokenType.NUMRANGE": {"fullname": "sqlglot.tokens.TokenType.NUMRANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.NUMRANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NUMRANGE: 'NUMRANGE'>"}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"fullname": "sqlglot.tokens.TokenType.NUMMULTIRANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.NUMMULTIRANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>"}, "sqlglot.tokens.TokenType.TSRANGE": {"fullname": "sqlglot.tokens.TokenType.TSRANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.TSRANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TSRANGE: 'TSRANGE'>"}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"fullname": "sqlglot.tokens.TokenType.TSMULTIRANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.TSMULTIRANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>"}, "sqlglot.tokens.TokenType.TSTZRANGE": {"fullname": "sqlglot.tokens.TokenType.TSTZRANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.TSTZRANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TSTZRANGE: 'TSTZRANGE'>"}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"fullname": "sqlglot.tokens.TokenType.TSTZMULTIRANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.TSTZMULTIRANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>"}, "sqlglot.tokens.TokenType.DATERANGE": {"fullname": "sqlglot.tokens.TokenType.DATERANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.DATERANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DATERANGE: 'DATERANGE'>"}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"fullname": "sqlglot.tokens.TokenType.DATEMULTIRANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.DATEMULTIRANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>"}, "sqlglot.tokens.TokenType.UUID": {"fullname": "sqlglot.tokens.TokenType.UUID", "modulename": "sqlglot.tokens", "qualname": "TokenType.UUID", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UUID: 'UUID'>"}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"fullname": "sqlglot.tokens.TokenType.GEOGRAPHY", "modulename": "sqlglot.tokens", "qualname": "TokenType.GEOGRAPHY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.GEOGRAPHY: 'GEOGRAPHY'>"}, "sqlglot.tokens.TokenType.NULLABLE": {"fullname": "sqlglot.tokens.TokenType.NULLABLE", "modulename": "sqlglot.tokens", "qualname": "TokenType.NULLABLE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NULLABLE: 'NULLABLE'>"}, "sqlglot.tokens.TokenType.GEOMETRY": {"fullname": "sqlglot.tokens.TokenType.GEOMETRY", "modulename": "sqlglot.tokens", "qualname": "TokenType.GEOMETRY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.GEOMETRY: 'GEOMETRY'>"}, "sqlglot.tokens.TokenType.HLLSKETCH": {"fullname": "sqlglot.tokens.TokenType.HLLSKETCH", "modulename": "sqlglot.tokens", "qualname": "TokenType.HLLSKETCH", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.HLLSKETCH: 'HLLSKETCH'>"}, "sqlglot.tokens.TokenType.HSTORE": {"fullname": "sqlglot.tokens.TokenType.HSTORE", "modulename": "sqlglot.tokens", "qualname": "TokenType.HSTORE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.HSTORE: 'HSTORE'>"}, "sqlglot.tokens.TokenType.SUPER": {"fullname": "sqlglot.tokens.TokenType.SUPER", "modulename": "sqlglot.tokens", "qualname": "TokenType.SUPER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SUPER: 'SUPER'>"}, "sqlglot.tokens.TokenType.SERIAL": {"fullname": "sqlglot.tokens.TokenType.SERIAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.SERIAL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SERIAL: 'SERIAL'>"}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"fullname": "sqlglot.tokens.TokenType.SMALLSERIAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.SMALLSERIAL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SMALLSERIAL: 'SMALLSERIAL'>"}, "sqlglot.tokens.TokenType.BIGSERIAL": {"fullname": "sqlglot.tokens.TokenType.BIGSERIAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.BIGSERIAL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BIGSERIAL: 'BIGSERIAL'>"}, "sqlglot.tokens.TokenType.XML": {"fullname": "sqlglot.tokens.TokenType.XML", "modulename": "sqlglot.tokens", "qualname": "TokenType.XML", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.XML: 'XML'>"}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"fullname": "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER", "modulename": "sqlglot.tokens", "qualname": "TokenType.UNIQUEIDENTIFIER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>"}, "sqlglot.tokens.TokenType.MONEY": {"fullname": "sqlglot.tokens.TokenType.MONEY", "modulename": "sqlglot.tokens", "qualname": "TokenType.MONEY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.MONEY: 'MONEY'>"}, "sqlglot.tokens.TokenType.SMALLMONEY": {"fullname": "sqlglot.tokens.TokenType.SMALLMONEY", "modulename": "sqlglot.tokens", "qualname": "TokenType.SMALLMONEY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SMALLMONEY: 'SMALLMONEY'>"}, "sqlglot.tokens.TokenType.ROWVERSION": {"fullname": "sqlglot.tokens.TokenType.ROWVERSION", "modulename": "sqlglot.tokens", "qualname": "TokenType.ROWVERSION", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ROWVERSION: 'ROWVERSION'>"}, "sqlglot.tokens.TokenType.IMAGE": {"fullname": "sqlglot.tokens.TokenType.IMAGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.IMAGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.IMAGE: 'IMAGE'>"}, "sqlglot.tokens.TokenType.VARIANT": {"fullname": "sqlglot.tokens.TokenType.VARIANT", "modulename": "sqlglot.tokens", "qualname": "TokenType.VARIANT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.VARIANT: 'VARIANT'>"}, "sqlglot.tokens.TokenType.OBJECT": {"fullname": "sqlglot.tokens.TokenType.OBJECT", "modulename": "sqlglot.tokens", "qualname": "TokenType.OBJECT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.OBJECT: 'OBJECT'>"}, "sqlglot.tokens.TokenType.INET": {"fullname": "sqlglot.tokens.TokenType.INET", "modulename": "sqlglot.tokens", "qualname": "TokenType.INET", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INET: 'INET'>"}, "sqlglot.tokens.TokenType.ALIAS": {"fullname": "sqlglot.tokens.TokenType.ALIAS", "modulename": "sqlglot.tokens", "qualname": "TokenType.ALIAS", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ALIAS: 'ALIAS'>"}, "sqlglot.tokens.TokenType.ALTER": {"fullname": "sqlglot.tokens.TokenType.ALTER", "modulename": "sqlglot.tokens", "qualname": "TokenType.ALTER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ALTER: 'ALTER'>"}, "sqlglot.tokens.TokenType.ALWAYS": {"fullname": "sqlglot.tokens.TokenType.ALWAYS", "modulename": "sqlglot.tokens", "qualname": "TokenType.ALWAYS", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ALWAYS: 'ALWAYS'>"}, "sqlglot.tokens.TokenType.ALL": {"fullname": "sqlglot.tokens.TokenType.ALL", "modulename": "sqlglot.tokens", "qualname": "TokenType.ALL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ALL: 'ALL'>"}, "sqlglot.tokens.TokenType.ANTI": {"fullname": "sqlglot.tokens.TokenType.ANTI", "modulename": "sqlglot.tokens", "qualname": "TokenType.ANTI", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ANTI: 'ANTI'>"}, "sqlglot.tokens.TokenType.ANY": {"fullname": "sqlglot.tokens.TokenType.ANY", "modulename": "sqlglot.tokens", "qualname": "TokenType.ANY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ANY: 'ANY'>"}, "sqlglot.tokens.TokenType.APPLY": {"fullname": "sqlglot.tokens.TokenType.APPLY", "modulename": "sqlglot.tokens", "qualname": "TokenType.APPLY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.APPLY: 'APPLY'>"}, "sqlglot.tokens.TokenType.ARRAY": {"fullname": "sqlglot.tokens.TokenType.ARRAY", "modulename": "sqlglot.tokens", "qualname": "TokenType.ARRAY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ARRAY: 'ARRAY'>"}, "sqlglot.tokens.TokenType.ASC": {"fullname": "sqlglot.tokens.TokenType.ASC", "modulename": "sqlglot.tokens", "qualname": "TokenType.ASC", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ASC: 'ASC'>"}, "sqlglot.tokens.TokenType.ASOF": {"fullname": "sqlglot.tokens.TokenType.ASOF", "modulename": "sqlglot.tokens", "qualname": "TokenType.ASOF", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ASOF: 'ASOF'>"}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"fullname": "sqlglot.tokens.TokenType.AUTO_INCREMENT", "modulename": "sqlglot.tokens", "qualname": "TokenType.AUTO_INCREMENT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>"}, "sqlglot.tokens.TokenType.BEGIN": {"fullname": "sqlglot.tokens.TokenType.BEGIN", "modulename": "sqlglot.tokens", "qualname": "TokenType.BEGIN", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BEGIN: 'BEGIN'>"}, "sqlglot.tokens.TokenType.BETWEEN": {"fullname": "sqlglot.tokens.TokenType.BETWEEN", "modulename": "sqlglot.tokens", "qualname": "TokenType.BETWEEN", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BETWEEN: 'BETWEEN'>"}, "sqlglot.tokens.TokenType.CACHE": {"fullname": "sqlglot.tokens.TokenType.CACHE", "modulename": "sqlglot.tokens", "qualname": "TokenType.CACHE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CACHE: 'CACHE'>"}, "sqlglot.tokens.TokenType.CASE": {"fullname": "sqlglot.tokens.TokenType.CASE", "modulename": "sqlglot.tokens", "qualname": "TokenType.CASE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CASE: 'CASE'>"}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"fullname": "sqlglot.tokens.TokenType.CHARACTER_SET", "modulename": "sqlglot.tokens", "qualname": "TokenType.CHARACTER_SET", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CHARACTER_SET: 'CHARACTER_SET'>"}, "sqlglot.tokens.TokenType.COLLATE": {"fullname": "sqlglot.tokens.TokenType.COLLATE", "modulename": "sqlglot.tokens", "qualname": "TokenType.COLLATE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.COLLATE: 'COLLATE'>"}, "sqlglot.tokens.TokenType.COMMAND": {"fullname": "sqlglot.tokens.TokenType.COMMAND", "modulename": "sqlglot.tokens", "qualname": "TokenType.COMMAND", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.COMMAND: 'COMMAND'>"}, "sqlglot.tokens.TokenType.COMMENT": {"fullname": "sqlglot.tokens.TokenType.COMMENT", "modulename": "sqlglot.tokens", "qualname": "TokenType.COMMENT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.COMMENT: 'COMMENT'>"}, "sqlglot.tokens.TokenType.COMMIT": {"fullname": "sqlglot.tokens.TokenType.COMMIT", "modulename": "sqlglot.tokens", "qualname": "TokenType.COMMIT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.COMMIT: 'COMMIT'>"}, "sqlglot.tokens.TokenType.CONSTRAINT": {"fullname": "sqlglot.tokens.TokenType.CONSTRAINT", "modulename": "sqlglot.tokens", "qualname": "TokenType.CONSTRAINT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CONSTRAINT: 'CONSTRAINT'>"}, "sqlglot.tokens.TokenType.CREATE": {"fullname": "sqlglot.tokens.TokenType.CREATE", "modulename": "sqlglot.tokens", "qualname": "TokenType.CREATE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CREATE: 'CREATE'>"}, "sqlglot.tokens.TokenType.CROSS": {"fullname": "sqlglot.tokens.TokenType.CROSS", "modulename": "sqlglot.tokens", "qualname": "TokenType.CROSS", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CROSS: 'CROSS'>"}, "sqlglot.tokens.TokenType.CUBE": {"fullname": "sqlglot.tokens.TokenType.CUBE", "modulename": "sqlglot.tokens", "qualname": "TokenType.CUBE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CUBE: 'CUBE'>"}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"fullname": "sqlglot.tokens.TokenType.CURRENT_DATE", "modulename": "sqlglot.tokens", "qualname": "TokenType.CURRENT_DATE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CURRENT_DATE: 'CURRENT_DATE'>"}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"fullname": "sqlglot.tokens.TokenType.CURRENT_DATETIME", "modulename": "sqlglot.tokens", "qualname": "TokenType.CURRENT_DATETIME", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>"}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"fullname": "sqlglot.tokens.TokenType.CURRENT_TIME", "modulename": "sqlglot.tokens", "qualname": "TokenType.CURRENT_TIME", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CURRENT_TIME: 'CURRENT_TIME'>"}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"fullname": "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP", "modulename": "sqlglot.tokens", "qualname": "TokenType.CURRENT_TIMESTAMP", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>"}, "sqlglot.tokens.TokenType.CURRENT_USER": {"fullname": "sqlglot.tokens.TokenType.CURRENT_USER", "modulename": "sqlglot.tokens", "qualname": "TokenType.CURRENT_USER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CURRENT_USER: 'CURRENT_USER'>"}, "sqlglot.tokens.TokenType.DEFAULT": {"fullname": "sqlglot.tokens.TokenType.DEFAULT", "modulename": "sqlglot.tokens", "qualname": "TokenType.DEFAULT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DEFAULT: 'DEFAULT'>"}, "sqlglot.tokens.TokenType.DELETE": {"fullname": "sqlglot.tokens.TokenType.DELETE", "modulename": "sqlglot.tokens", "qualname": "TokenType.DELETE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DELETE: 'DELETE'>"}, "sqlglot.tokens.TokenType.DESC": {"fullname": "sqlglot.tokens.TokenType.DESC", "modulename": "sqlglot.tokens", "qualname": "TokenType.DESC", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DESC: 'DESC'>"}, "sqlglot.tokens.TokenType.DESCRIBE": {"fullname": "sqlglot.tokens.TokenType.DESCRIBE", "modulename": "sqlglot.tokens", "qualname": "TokenType.DESCRIBE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DESCRIBE: 'DESCRIBE'>"}, "sqlglot.tokens.TokenType.DICTIONARY": {"fullname": "sqlglot.tokens.TokenType.DICTIONARY", "modulename": "sqlglot.tokens", "qualname": "TokenType.DICTIONARY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DICTIONARY: 'DICTIONARY'>"}, "sqlglot.tokens.TokenType.DISTINCT": {"fullname": "sqlglot.tokens.TokenType.DISTINCT", "modulename": "sqlglot.tokens", "qualname": "TokenType.DISTINCT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DISTINCT: 'DISTINCT'>"}, "sqlglot.tokens.TokenType.DIV": {"fullname": "sqlglot.tokens.TokenType.DIV", "modulename": "sqlglot.tokens", "qualname": "TokenType.DIV", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DIV: 'DIV'>"}, "sqlglot.tokens.TokenType.DROP": {"fullname": "sqlglot.tokens.TokenType.DROP", "modulename": "sqlglot.tokens", "qualname": "TokenType.DROP", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DROP: 'DROP'>"}, "sqlglot.tokens.TokenType.ELSE": {"fullname": "sqlglot.tokens.TokenType.ELSE", "modulename": "sqlglot.tokens", "qualname": "TokenType.ELSE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ELSE: 'ELSE'>"}, "sqlglot.tokens.TokenType.END": {"fullname": "sqlglot.tokens.TokenType.END", "modulename": "sqlglot.tokens", "qualname": "TokenType.END", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.END: 'END'>"}, "sqlglot.tokens.TokenType.ESCAPE": {"fullname": "sqlglot.tokens.TokenType.ESCAPE", "modulename": "sqlglot.tokens", "qualname": "TokenType.ESCAPE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ESCAPE: 'ESCAPE'>"}, "sqlglot.tokens.TokenType.EXCEPT": {"fullname": "sqlglot.tokens.TokenType.EXCEPT", "modulename": "sqlglot.tokens", "qualname": "TokenType.EXCEPT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.EXCEPT: 'EXCEPT'>"}, "sqlglot.tokens.TokenType.EXECUTE": {"fullname": "sqlglot.tokens.TokenType.EXECUTE", "modulename": "sqlglot.tokens", "qualname": "TokenType.EXECUTE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.EXECUTE: 'EXECUTE'>"}, "sqlglot.tokens.TokenType.EXISTS": {"fullname": "sqlglot.tokens.TokenType.EXISTS", "modulename": "sqlglot.tokens", "qualname": "TokenType.EXISTS", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.EXISTS: 'EXISTS'>"}, "sqlglot.tokens.TokenType.FALSE": {"fullname": "sqlglot.tokens.TokenType.FALSE", "modulename": "sqlglot.tokens", "qualname": "TokenType.FALSE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FALSE: 'FALSE'>"}, "sqlglot.tokens.TokenType.FETCH": {"fullname": "sqlglot.tokens.TokenType.FETCH", "modulename": "sqlglot.tokens", "qualname": "TokenType.FETCH", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FETCH: 'FETCH'>"}, "sqlglot.tokens.TokenType.FILTER": {"fullname": "sqlglot.tokens.TokenType.FILTER", "modulename": "sqlglot.tokens", "qualname": "TokenType.FILTER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FILTER: 'FILTER'>"}, "sqlglot.tokens.TokenType.FINAL": {"fullname": "sqlglot.tokens.TokenType.FINAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.FINAL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FINAL: 'FINAL'>"}, "sqlglot.tokens.TokenType.FIRST": {"fullname": "sqlglot.tokens.TokenType.FIRST", "modulename": "sqlglot.tokens", "qualname": "TokenType.FIRST", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FIRST: 'FIRST'>"}, "sqlglot.tokens.TokenType.FOR": {"fullname": "sqlglot.tokens.TokenType.FOR", "modulename": "sqlglot.tokens", "qualname": "TokenType.FOR", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FOR: 'FOR'>"}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"fullname": "sqlglot.tokens.TokenType.FOREIGN_KEY", "modulename": "sqlglot.tokens", "qualname": "TokenType.FOREIGN_KEY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>"}, "sqlglot.tokens.TokenType.FORMAT": {"fullname": "sqlglot.tokens.TokenType.FORMAT", "modulename": "sqlglot.tokens", "qualname": "TokenType.FORMAT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FORMAT: 'FORMAT'>"}, "sqlglot.tokens.TokenType.FROM": {"fullname": "sqlglot.tokens.TokenType.FROM", "modulename": "sqlglot.tokens", "qualname": "TokenType.FROM", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FROM: 'FROM'>"}, "sqlglot.tokens.TokenType.FULL": {"fullname": "sqlglot.tokens.TokenType.FULL", "modulename": "sqlglot.tokens", "qualname": "TokenType.FULL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FULL: 'FULL'>"}, "sqlglot.tokens.TokenType.FUNCTION": {"fullname": "sqlglot.tokens.TokenType.FUNCTION", "modulename": "sqlglot.tokens", "qualname": "TokenType.FUNCTION", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FUNCTION: 'FUNCTION'>"}, "sqlglot.tokens.TokenType.GLOB": {"fullname": "sqlglot.tokens.TokenType.GLOB", "modulename": "sqlglot.tokens", "qualname": "TokenType.GLOB", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.GLOB: 'GLOB'>"}, "sqlglot.tokens.TokenType.GLOBAL": {"fullname": "sqlglot.tokens.TokenType.GLOBAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.GLOBAL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.GLOBAL: 'GLOBAL'>"}, "sqlglot.tokens.TokenType.GROUP_BY": {"fullname": "sqlglot.tokens.TokenType.GROUP_BY", "modulename": "sqlglot.tokens", "qualname": "TokenType.GROUP_BY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.GROUP_BY: 'GROUP_BY'>"}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"fullname": "sqlglot.tokens.TokenType.GROUPING_SETS", "modulename": "sqlglot.tokens", "qualname": "TokenType.GROUPING_SETS", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.GROUPING_SETS: 'GROUPING_SETS'>"}, "sqlglot.tokens.TokenType.HAVING": {"fullname": "sqlglot.tokens.TokenType.HAVING", "modulename": "sqlglot.tokens", "qualname": "TokenType.HAVING", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.HAVING: 'HAVING'>"}, "sqlglot.tokens.TokenType.HINT": {"fullname": "sqlglot.tokens.TokenType.HINT", "modulename": "sqlglot.tokens", "qualname": "TokenType.HINT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.HINT: 'HINT'>"}, "sqlglot.tokens.TokenType.IF": {"fullname": "sqlglot.tokens.TokenType.IF", "modulename": "sqlglot.tokens", "qualname": "TokenType.IF", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.IF: 'IF'>"}, "sqlglot.tokens.TokenType.ILIKE": {"fullname": "sqlglot.tokens.TokenType.ILIKE", "modulename": "sqlglot.tokens", "qualname": "TokenType.ILIKE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ILIKE: 'ILIKE'>"}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"fullname": "sqlglot.tokens.TokenType.ILIKE_ANY", "modulename": "sqlglot.tokens", "qualname": "TokenType.ILIKE_ANY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ILIKE_ANY: 'ILIKE_ANY'>"}, "sqlglot.tokens.TokenType.IN": {"fullname": "sqlglot.tokens.TokenType.IN", "modulename": "sqlglot.tokens", "qualname": "TokenType.IN", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.IN: 'IN'>"}, "sqlglot.tokens.TokenType.INDEX": {"fullname": "sqlglot.tokens.TokenType.INDEX", "modulename": "sqlglot.tokens", "qualname": "TokenType.INDEX", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INDEX: 'INDEX'>"}, "sqlglot.tokens.TokenType.INNER": {"fullname": "sqlglot.tokens.TokenType.INNER", "modulename": "sqlglot.tokens", "qualname": "TokenType.INNER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INNER: 'INNER'>"}, "sqlglot.tokens.TokenType.INSERT": {"fullname": "sqlglot.tokens.TokenType.INSERT", "modulename": "sqlglot.tokens", "qualname": "TokenType.INSERT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INSERT: 'INSERT'>"}, "sqlglot.tokens.TokenType.INTERSECT": {"fullname": "sqlglot.tokens.TokenType.INTERSECT", "modulename": "sqlglot.tokens", "qualname": "TokenType.INTERSECT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INTERSECT: 'INTERSECT'>"}, "sqlglot.tokens.TokenType.INTERVAL": {"fullname": "sqlglot.tokens.TokenType.INTERVAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.INTERVAL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INTERVAL: 'INTERVAL'>"}, "sqlglot.tokens.TokenType.INTO": {"fullname": "sqlglot.tokens.TokenType.INTO", "modulename": "sqlglot.tokens", "qualname": "TokenType.INTO", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INTO: 'INTO'>"}, "sqlglot.tokens.TokenType.INTRODUCER": {"fullname": "sqlglot.tokens.TokenType.INTRODUCER", "modulename": "sqlglot.tokens", "qualname": "TokenType.INTRODUCER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INTRODUCER: 'INTRODUCER'>"}, "sqlglot.tokens.TokenType.IRLIKE": {"fullname": "sqlglot.tokens.TokenType.IRLIKE", "modulename": "sqlglot.tokens", "qualname": "TokenType.IRLIKE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.IRLIKE: 'IRLIKE'>"}, "sqlglot.tokens.TokenType.IS": {"fullname": "sqlglot.tokens.TokenType.IS", "modulename": "sqlglot.tokens", "qualname": "TokenType.IS", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.IS: 'IS'>"}, "sqlglot.tokens.TokenType.ISNULL": {"fullname": "sqlglot.tokens.TokenType.ISNULL", "modulename": "sqlglot.tokens", "qualname": "TokenType.ISNULL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ISNULL: 'ISNULL'>"}, "sqlglot.tokens.TokenType.JOIN": {"fullname": "sqlglot.tokens.TokenType.JOIN", "modulename": "sqlglot.tokens", "qualname": "TokenType.JOIN", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.JOIN: 'JOIN'>"}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"fullname": "sqlglot.tokens.TokenType.JOIN_MARKER", "modulename": "sqlglot.tokens", "qualname": "TokenType.JOIN_MARKER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.JOIN_MARKER: 'JOIN_MARKER'>"}, "sqlglot.tokens.TokenType.KEEP": {"fullname": "sqlglot.tokens.TokenType.KEEP", "modulename": "sqlglot.tokens", "qualname": "TokenType.KEEP", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.KEEP: 'KEEP'>"}, "sqlglot.tokens.TokenType.LANGUAGE": {"fullname": "sqlglot.tokens.TokenType.LANGUAGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.LANGUAGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LANGUAGE: 'LANGUAGE'>"}, "sqlglot.tokens.TokenType.LATERAL": {"fullname": "sqlglot.tokens.TokenType.LATERAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.LATERAL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LATERAL: 'LATERAL'>"}, "sqlglot.tokens.TokenType.LEFT": {"fullname": "sqlglot.tokens.TokenType.LEFT", "modulename": "sqlglot.tokens", "qualname": "TokenType.LEFT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LEFT: 'LEFT'>"}, "sqlglot.tokens.TokenType.LIKE": {"fullname": "sqlglot.tokens.TokenType.LIKE", "modulename": "sqlglot.tokens", "qualname": "TokenType.LIKE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LIKE: 'LIKE'>"}, "sqlglot.tokens.TokenType.LIKE_ANY": {"fullname": "sqlglot.tokens.TokenType.LIKE_ANY", "modulename": "sqlglot.tokens", "qualname": "TokenType.LIKE_ANY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LIKE_ANY: 'LIKE_ANY'>"}, "sqlglot.tokens.TokenType.LIMIT": {"fullname": "sqlglot.tokens.TokenType.LIMIT", "modulename": "sqlglot.tokens", "qualname": "TokenType.LIMIT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LIMIT: 'LIMIT'>"}, "sqlglot.tokens.TokenType.LOAD": {"fullname": "sqlglot.tokens.TokenType.LOAD", "modulename": "sqlglot.tokens", "qualname": "TokenType.LOAD", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LOAD: 'LOAD'>"}, "sqlglot.tokens.TokenType.LOCK": {"fullname": "sqlglot.tokens.TokenType.LOCK", "modulename": "sqlglot.tokens", "qualname": "TokenType.LOCK", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LOCK: 'LOCK'>"}, "sqlglot.tokens.TokenType.MAP": {"fullname": "sqlglot.tokens.TokenType.MAP", "modulename": "sqlglot.tokens", "qualname": "TokenType.MAP", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.MAP: 'MAP'>"}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"fullname": "sqlglot.tokens.TokenType.MATCH_RECOGNIZE", "modulename": "sqlglot.tokens", "qualname": "TokenType.MATCH_RECOGNIZE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>"}, "sqlglot.tokens.TokenType.MERGE": {"fullname": "sqlglot.tokens.TokenType.MERGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.MERGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.MERGE: 'MERGE'>"}, "sqlglot.tokens.TokenType.MOD": {"fullname": "sqlglot.tokens.TokenType.MOD", "modulename": "sqlglot.tokens", "qualname": "TokenType.MOD", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.MOD: 'MOD'>"}, "sqlglot.tokens.TokenType.NATURAL": {"fullname": "sqlglot.tokens.TokenType.NATURAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.NATURAL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NATURAL: 'NATURAL'>"}, "sqlglot.tokens.TokenType.NEXT": {"fullname": "sqlglot.tokens.TokenType.NEXT", "modulename": "sqlglot.tokens", "qualname": "TokenType.NEXT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NEXT: 'NEXT'>"}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"fullname": "sqlglot.tokens.TokenType.NEXT_VALUE_FOR", "modulename": "sqlglot.tokens", "qualname": "TokenType.NEXT_VALUE_FOR", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NEXT_VALUE_FOR: 'NEXT_VALUE_FOR'>"}, "sqlglot.tokens.TokenType.NOTNULL": {"fullname": "sqlglot.tokens.TokenType.NOTNULL", "modulename": "sqlglot.tokens", "qualname": "TokenType.NOTNULL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NOTNULL: 'NOTNULL'>"}, "sqlglot.tokens.TokenType.NULL": {"fullname": "sqlglot.tokens.TokenType.NULL", "modulename": "sqlglot.tokens", "qualname": "TokenType.NULL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NULL: 'NULL'>"}, "sqlglot.tokens.TokenType.OFFSET": {"fullname": "sqlglot.tokens.TokenType.OFFSET", "modulename": "sqlglot.tokens", "qualname": "TokenType.OFFSET", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.OFFSET: 'OFFSET'>"}, "sqlglot.tokens.TokenType.ON": {"fullname": "sqlglot.tokens.TokenType.ON", "modulename": "sqlglot.tokens", "qualname": "TokenType.ON", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ON: 'ON'>"}, "sqlglot.tokens.TokenType.ORDER_BY": {"fullname": "sqlglot.tokens.TokenType.ORDER_BY", "modulename": "sqlglot.tokens", "qualname": "TokenType.ORDER_BY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ORDER_BY: 'ORDER_BY'>"}, "sqlglot.tokens.TokenType.ORDERED": {"fullname": "sqlglot.tokens.TokenType.ORDERED", "modulename": "sqlglot.tokens", "qualname": "TokenType.ORDERED", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ORDERED: 'ORDERED'>"}, "sqlglot.tokens.TokenType.ORDINALITY": {"fullname": "sqlglot.tokens.TokenType.ORDINALITY", "modulename": "sqlglot.tokens", "qualname": "TokenType.ORDINALITY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ORDINALITY: 'ORDINALITY'>"}, "sqlglot.tokens.TokenType.OUTER": {"fullname": "sqlglot.tokens.TokenType.OUTER", "modulename": "sqlglot.tokens", "qualname": "TokenType.OUTER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.OUTER: 'OUTER'>"}, "sqlglot.tokens.TokenType.OVER": {"fullname": "sqlglot.tokens.TokenType.OVER", "modulename": "sqlglot.tokens", "qualname": "TokenType.OVER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.OVER: 'OVER'>"}, "sqlglot.tokens.TokenType.OVERLAPS": {"fullname": "sqlglot.tokens.TokenType.OVERLAPS", "modulename": "sqlglot.tokens", "qualname": "TokenType.OVERLAPS", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.OVERLAPS: 'OVERLAPS'>"}, "sqlglot.tokens.TokenType.OVERWRITE": {"fullname": "sqlglot.tokens.TokenType.OVERWRITE", "modulename": "sqlglot.tokens", "qualname": "TokenType.OVERWRITE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.OVERWRITE: 'OVERWRITE'>"}, "sqlglot.tokens.TokenType.PARTITION": {"fullname": "sqlglot.tokens.TokenType.PARTITION", "modulename": "sqlglot.tokens", "qualname": "TokenType.PARTITION", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PARTITION: 'PARTITION'>"}, "sqlglot.tokens.TokenType.PARTITION_BY": {"fullname": "sqlglot.tokens.TokenType.PARTITION_BY", "modulename": "sqlglot.tokens", "qualname": "TokenType.PARTITION_BY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PARTITION_BY: 'PARTITION_BY'>"}, "sqlglot.tokens.TokenType.PERCENT": {"fullname": "sqlglot.tokens.TokenType.PERCENT", "modulename": "sqlglot.tokens", "qualname": "TokenType.PERCENT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PERCENT: 'PERCENT'>"}, "sqlglot.tokens.TokenType.PIVOT": {"fullname": "sqlglot.tokens.TokenType.PIVOT", "modulename": "sqlglot.tokens", "qualname": "TokenType.PIVOT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PIVOT: 'PIVOT'>"}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"fullname": "sqlglot.tokens.TokenType.PLACEHOLDER", "modulename": "sqlglot.tokens", "qualname": "TokenType.PLACEHOLDER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PLACEHOLDER: 'PLACEHOLDER'>"}, "sqlglot.tokens.TokenType.PRAGMA": {"fullname": "sqlglot.tokens.TokenType.PRAGMA", "modulename": "sqlglot.tokens", "qualname": "TokenType.PRAGMA", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PRAGMA: 'PRAGMA'>"}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"fullname": "sqlglot.tokens.TokenType.PRIMARY_KEY", "modulename": "sqlglot.tokens", "qualname": "TokenType.PRIMARY_KEY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>"}, "sqlglot.tokens.TokenType.PROCEDURE": {"fullname": "sqlglot.tokens.TokenType.PROCEDURE", "modulename": "sqlglot.tokens", "qualname": "TokenType.PROCEDURE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PROCEDURE: 'PROCEDURE'>"}, "sqlglot.tokens.TokenType.PROPERTIES": {"fullname": "sqlglot.tokens.TokenType.PROPERTIES", "modulename": "sqlglot.tokens", "qualname": "TokenType.PROPERTIES", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PROPERTIES: 'PROPERTIES'>"}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"fullname": "sqlglot.tokens.TokenType.PSEUDO_TYPE", "modulename": "sqlglot.tokens", "qualname": "TokenType.PSEUDO_TYPE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>"}, "sqlglot.tokens.TokenType.QUALIFY": {"fullname": "sqlglot.tokens.TokenType.QUALIFY", "modulename": "sqlglot.tokens", "qualname": "TokenType.QUALIFY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.QUALIFY: 'QUALIFY'>"}, "sqlglot.tokens.TokenType.QUOTE": {"fullname": "sqlglot.tokens.TokenType.QUOTE", "modulename": "sqlglot.tokens", "qualname": "TokenType.QUOTE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.QUOTE: 'QUOTE'>"}, "sqlglot.tokens.TokenType.RANGE": {"fullname": "sqlglot.tokens.TokenType.RANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.RANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.RANGE: 'RANGE'>"}, "sqlglot.tokens.TokenType.RECURSIVE": {"fullname": "sqlglot.tokens.TokenType.RECURSIVE", "modulename": "sqlglot.tokens", "qualname": "TokenType.RECURSIVE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.RECURSIVE: 'RECURSIVE'>"}, "sqlglot.tokens.TokenType.REPLACE": {"fullname": "sqlglot.tokens.TokenType.REPLACE", "modulename": "sqlglot.tokens", "qualname": "TokenType.REPLACE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.REPLACE: 'REPLACE'>"}, "sqlglot.tokens.TokenType.RETURNING": {"fullname": "sqlglot.tokens.TokenType.RETURNING", "modulename": "sqlglot.tokens", "qualname": "TokenType.RETURNING", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.RETURNING: 'RETURNING'>"}, "sqlglot.tokens.TokenType.REFERENCES": {"fullname": "sqlglot.tokens.TokenType.REFERENCES", "modulename": "sqlglot.tokens", "qualname": "TokenType.REFERENCES", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.REFERENCES: 'REFERENCES'>"}, "sqlglot.tokens.TokenType.RIGHT": {"fullname": "sqlglot.tokens.TokenType.RIGHT", "modulename": "sqlglot.tokens", "qualname": "TokenType.RIGHT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.RIGHT: 'RIGHT'>"}, "sqlglot.tokens.TokenType.RLIKE": {"fullname": "sqlglot.tokens.TokenType.RLIKE", "modulename": "sqlglot.tokens", "qualname": "TokenType.RLIKE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.RLIKE: 'RLIKE'>"}, "sqlglot.tokens.TokenType.ROLLBACK": {"fullname": "sqlglot.tokens.TokenType.ROLLBACK", "modulename": "sqlglot.tokens", "qualname": "TokenType.ROLLBACK", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ROLLBACK: 'ROLLBACK'>"}, "sqlglot.tokens.TokenType.ROLLUP": {"fullname": "sqlglot.tokens.TokenType.ROLLUP", "modulename": "sqlglot.tokens", "qualname": "TokenType.ROLLUP", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ROLLUP: 'ROLLUP'>"}, "sqlglot.tokens.TokenType.ROW": {"fullname": "sqlglot.tokens.TokenType.ROW", "modulename": "sqlglot.tokens", "qualname": "TokenType.ROW", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ROW: 'ROW'>"}, "sqlglot.tokens.TokenType.ROWS": {"fullname": "sqlglot.tokens.TokenType.ROWS", "modulename": "sqlglot.tokens", "qualname": "TokenType.ROWS", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ROWS: 'ROWS'>"}, "sqlglot.tokens.TokenType.SELECT": {"fullname": "sqlglot.tokens.TokenType.SELECT", "modulename": "sqlglot.tokens", "qualname": "TokenType.SELECT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SELECT: 'SELECT'>"}, "sqlglot.tokens.TokenType.SEMI": {"fullname": "sqlglot.tokens.TokenType.SEMI", "modulename": "sqlglot.tokens", "qualname": "TokenType.SEMI", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SEMI: 'SEMI'>"}, "sqlglot.tokens.TokenType.SEPARATOR": {"fullname": "sqlglot.tokens.TokenType.SEPARATOR", "modulename": "sqlglot.tokens", "qualname": "TokenType.SEPARATOR", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SEPARATOR: 'SEPARATOR'>"}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"fullname": "sqlglot.tokens.TokenType.SERDE_PROPERTIES", "modulename": "sqlglot.tokens", "qualname": "TokenType.SERDE_PROPERTIES", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SERDE_PROPERTIES: 'SERDE_PROPERTIES'>"}, "sqlglot.tokens.TokenType.SET": {"fullname": "sqlglot.tokens.TokenType.SET", "modulename": "sqlglot.tokens", "qualname": "TokenType.SET", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SET: 'SET'>"}, "sqlglot.tokens.TokenType.SETTINGS": {"fullname": "sqlglot.tokens.TokenType.SETTINGS", "modulename": "sqlglot.tokens", "qualname": "TokenType.SETTINGS", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SETTINGS: 'SETTINGS'>"}, "sqlglot.tokens.TokenType.SHOW": {"fullname": "sqlglot.tokens.TokenType.SHOW", "modulename": "sqlglot.tokens", "qualname": "TokenType.SHOW", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SHOW: 'SHOW'>"}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"fullname": "sqlglot.tokens.TokenType.SIMILAR_TO", "modulename": "sqlglot.tokens", "qualname": "TokenType.SIMILAR_TO", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SIMILAR_TO: 'SIMILAR_TO'>"}, "sqlglot.tokens.TokenType.SOME": {"fullname": "sqlglot.tokens.TokenType.SOME", "modulename": "sqlglot.tokens", "qualname": "TokenType.SOME", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SOME: 'SOME'>"}, "sqlglot.tokens.TokenType.STRUCT": {"fullname": "sqlglot.tokens.TokenType.STRUCT", "modulename": "sqlglot.tokens", "qualname": "TokenType.STRUCT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.STRUCT: 'STRUCT'>"}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"fullname": "sqlglot.tokens.TokenType.TABLE_SAMPLE", "modulename": "sqlglot.tokens", "qualname": "TokenType.TABLE_SAMPLE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>"}, "sqlglot.tokens.TokenType.TEMPORARY": {"fullname": "sqlglot.tokens.TokenType.TEMPORARY", "modulename": "sqlglot.tokens", "qualname": "TokenType.TEMPORARY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TEMPORARY: 'TEMPORARY'>"}, "sqlglot.tokens.TokenType.TOP": {"fullname": "sqlglot.tokens.TokenType.TOP", "modulename": "sqlglot.tokens", "qualname": "TokenType.TOP", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TOP: 'TOP'>"}, "sqlglot.tokens.TokenType.THEN": {"fullname": "sqlglot.tokens.TokenType.THEN", "modulename": "sqlglot.tokens", "qualname": "TokenType.THEN", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.THEN: 'THEN'>"}, "sqlglot.tokens.TokenType.TRUE": {"fullname": "sqlglot.tokens.TokenType.TRUE", "modulename": "sqlglot.tokens", "qualname": "TokenType.TRUE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TRUE: 'TRUE'>"}, "sqlglot.tokens.TokenType.UNCACHE": {"fullname": "sqlglot.tokens.TokenType.UNCACHE", "modulename": "sqlglot.tokens", "qualname": "TokenType.UNCACHE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UNCACHE: 'UNCACHE'>"}, "sqlglot.tokens.TokenType.UNION": {"fullname": "sqlglot.tokens.TokenType.UNION", "modulename": "sqlglot.tokens", "qualname": "TokenType.UNION", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UNION: 'UNION'>"}, "sqlglot.tokens.TokenType.UNNEST": {"fullname": "sqlglot.tokens.TokenType.UNNEST", "modulename": "sqlglot.tokens", "qualname": "TokenType.UNNEST", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UNNEST: 'UNNEST'>"}, "sqlglot.tokens.TokenType.UNPIVOT": {"fullname": "sqlglot.tokens.TokenType.UNPIVOT", "modulename": "sqlglot.tokens", "qualname": "TokenType.UNPIVOT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UNPIVOT: 'UNPIVOT'>"}, "sqlglot.tokens.TokenType.UPDATE": {"fullname": "sqlglot.tokens.TokenType.UPDATE", "modulename": "sqlglot.tokens", "qualname": "TokenType.UPDATE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UPDATE: 'UPDATE'>"}, "sqlglot.tokens.TokenType.USE": {"fullname": "sqlglot.tokens.TokenType.USE", "modulename": "sqlglot.tokens", "qualname": "TokenType.USE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.USE: 'USE'>"}, "sqlglot.tokens.TokenType.USING": {"fullname": "sqlglot.tokens.TokenType.USING", "modulename": "sqlglot.tokens", "qualname": "TokenType.USING", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.USING: 'USING'>"}, "sqlglot.tokens.TokenType.VALUES": {"fullname": "sqlglot.tokens.TokenType.VALUES", "modulename": "sqlglot.tokens", "qualname": "TokenType.VALUES", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.VALUES: 'VALUES'>"}, "sqlglot.tokens.TokenType.VIEW": {"fullname": "sqlglot.tokens.TokenType.VIEW", "modulename": "sqlglot.tokens", "qualname": "TokenType.VIEW", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.VIEW: 'VIEW'>"}, "sqlglot.tokens.TokenType.VOLATILE": {"fullname": "sqlglot.tokens.TokenType.VOLATILE", "modulename": "sqlglot.tokens", "qualname": "TokenType.VOLATILE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.VOLATILE: 'VOLATILE'>"}, "sqlglot.tokens.TokenType.WHEN": {"fullname": "sqlglot.tokens.TokenType.WHEN", "modulename": "sqlglot.tokens", "qualname": "TokenType.WHEN", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.WHEN: 'WHEN'>"}, "sqlglot.tokens.TokenType.WHERE": {"fullname": "sqlglot.tokens.TokenType.WHERE", "modulename": "sqlglot.tokens", "qualname": "TokenType.WHERE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.WHERE: 'WHERE'>"}, "sqlglot.tokens.TokenType.WINDOW": {"fullname": "sqlglot.tokens.TokenType.WINDOW", "modulename": "sqlglot.tokens", "qualname": "TokenType.WINDOW", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.WINDOW: 'WINDOW'>"}, "sqlglot.tokens.TokenType.WITH": {"fullname": "sqlglot.tokens.TokenType.WITH", "modulename": "sqlglot.tokens", "qualname": "TokenType.WITH", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.WITH: 'WITH'>"}, "sqlglot.tokens.TokenType.UNIQUE": {"fullname": "sqlglot.tokens.TokenType.UNIQUE", "modulename": "sqlglot.tokens", "qualname": "TokenType.UNIQUE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UNIQUE: 'UNIQUE'>"}, "sqlglot.tokens.Token": {"fullname": "sqlglot.tokens.Token", "modulename": "sqlglot.tokens", "qualname": "Token", "kind": "class", "doc": "

\n"}, "sqlglot.tokens.Token.__init__": {"fullname": "sqlglot.tokens.Token.__init__", "modulename": "sqlglot.tokens", "qualname": "Token.__init__", "kind": "function", "doc": "

Token initializer.

\n\n
Arguments:
\n\n
    \n
  • token_type: The TokenType Enum.
  • \n
  • text: The text of the token.
  • \n
  • line: The line that the token ends on.
  • \n
  • col: The column that the token ends on.
  • \n
  • start: The start index of the token.
  • \n
  • end: The ending index of the token.
  • \n
\n", "signature": "(\ttoken_type: sqlglot.tokens.TokenType,\ttext: str,\tline: int = 1,\tcol: int = 1,\tstart: int = 0,\tend: int = 0,\tcomments: List[str] = [])"}, "sqlglot.tokens.Token.number": {"fullname": "sqlglot.tokens.Token.number", "modulename": "sqlglot.tokens", "qualname": "Token.number", "kind": "function", "doc": "

Returns a NUMBER token with number as its text.

\n", "signature": "(cls, number: int) -> sqlglot.tokens.Token:", "funcdef": "def"}, "sqlglot.tokens.Token.string": {"fullname": "sqlglot.tokens.Token.string", "modulename": "sqlglot.tokens", "qualname": "Token.string", "kind": "function", "doc": "

Returns a STRING token with string as its text.

\n", "signature": "(cls, string: str) -> sqlglot.tokens.Token:", "funcdef": "def"}, "sqlglot.tokens.Token.identifier": {"fullname": "sqlglot.tokens.Token.identifier", "modulename": "sqlglot.tokens", "qualname": "Token.identifier", "kind": "function", "doc": "

Returns an IDENTIFIER token with identifier as its text.

\n", "signature": "(cls, identifier: str) -> sqlglot.tokens.Token:", "funcdef": "def"}, "sqlglot.tokens.Token.var": {"fullname": "sqlglot.tokens.Token.var", "modulename": "sqlglot.tokens", "qualname": "Token.var", "kind": "function", "doc": "

Returns an VAR token with var as its text.

\n", "signature": "(cls, var: str) -> sqlglot.tokens.Token:", "funcdef": "def"}, "sqlglot.tokens.Tokenizer": {"fullname": "sqlglot.tokens.Tokenizer", "modulename": "sqlglot.tokens", "qualname": "Tokenizer", "kind": "class", "doc": "

\n"}, "sqlglot.tokens.Tokenizer.reset": {"fullname": "sqlglot.tokens.Tokenizer.reset", "modulename": "sqlglot.tokens", "qualname": "Tokenizer.reset", "kind": "function", "doc": "

\n", "signature": "(self) -> None:", "funcdef": "def"}, "sqlglot.tokens.Tokenizer.tokenize": {"fullname": "sqlglot.tokens.Tokenizer.tokenize", "modulename": "sqlglot.tokens", "qualname": "Tokenizer.tokenize", "kind": "function", "doc": "

Returns a list of tokens corresponding to the SQL string sql.

\n", "signature": "(self, sql: str) -> List[sqlglot.tokens.Token]:", "funcdef": "def"}, "sqlglot.transforms": {"fullname": "sqlglot.transforms", "modulename": "sqlglot.transforms", "kind": "module", "doc": "

\n"}, "sqlglot.transforms.unalias_group": {"fullname": "sqlglot.transforms.unalias_group", "modulename": "sqlglot.transforms", "qualname": "unalias_group", "kind": "function", "doc": "

Replace references to select aliases in GROUP BY clauses.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sqlglot.parse_one("SELECT a AS b FROM x GROUP BY b").transform(unalias_group).sql()\n'SELECT a AS b FROM x GROUP BY 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the expression that will be transformed.
  • \n
\n\n
Returns:
\n\n
\n

The transformed expression.

\n
\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transforms.eliminate_distinct_on": {"fullname": "sqlglot.transforms.eliminate_distinct_on", "modulename": "sqlglot.transforms", "qualname": "eliminate_distinct_on", "kind": "function", "doc": "

Convert SELECT DISTINCT ON statements to a subquery with a window function.

\n\n

This is useful for dialects that don't support SELECT DISTINCT ON but support window functions.

\n\n
Arguments:
\n\n
    \n
  • expression: the expression that will be transformed.
  • \n
\n\n
Returns:
\n\n
\n

The transformed expression.

\n
\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transforms.eliminate_qualify": {"fullname": "sqlglot.transforms.eliminate_qualify", "modulename": "sqlglot.transforms", "qualname": "eliminate_qualify", "kind": "function", "doc": "

Convert SELECT statements that contain the QUALIFY clause into subqueries, filtered equivalently.

\n\n

The idea behind this transformation can be seen in Snowflake's documentation for QUALIFY:\nhttps://docs.snowflake.com/en/sql-reference/constructs/qualify

\n\n

Some dialects don't support window functions in the WHERE clause, so we need to include them as\nprojections in the subquery, in order to refer to them in the outer filter using aliases. Also,\nif a column is referenced in the QUALIFY clause but is not selected, we need to include it too,\notherwise we won't be able to refer to it in the outer query's WHERE clause.

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transforms.remove_precision_parameterized_types": {"fullname": "sqlglot.transforms.remove_precision_parameterized_types", "modulename": "sqlglot.transforms", "qualname": "remove_precision_parameterized_types", "kind": "function", "doc": "

Some dialects only allow the precision for parameterized types to be defined in the DDL and not in\nother expressions. This transforms removes the precision from parameterized types in expressions.

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transforms.unnest_to_explode": {"fullname": "sqlglot.transforms.unnest_to_explode", "modulename": "sqlglot.transforms", "qualname": "unnest_to_explode", "kind": "function", "doc": "

Convert cross join unnest into lateral view explode (used in presto -> hive).

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transforms.explode_to_unnest": {"fullname": "sqlglot.transforms.explode_to_unnest", "modulename": "sqlglot.transforms", "qualname": "explode_to_unnest", "kind": "function", "doc": "

Convert explode/posexplode into unnest (used in hive -> presto).

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transforms.remove_target_from_merge": {"fullname": "sqlglot.transforms.remove_target_from_merge", "modulename": "sqlglot.transforms", "qualname": "remove_target_from_merge", "kind": "function", "doc": "

Remove table refs from columns in when statements.

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transforms.remove_within_group_for_percentiles": {"fullname": "sqlglot.transforms.remove_within_group_for_percentiles", "modulename": "sqlglot.transforms", "qualname": "remove_within_group_for_percentiles", "kind": "function", "doc": "

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transforms.add_recursive_cte_column_names": {"fullname": "sqlglot.transforms.add_recursive_cte_column_names", "modulename": "sqlglot.transforms", "qualname": "add_recursive_cte_column_names", "kind": "function", "doc": "

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transforms.epoch_cast_to_ts": {"fullname": "sqlglot.transforms.epoch_cast_to_ts", "modulename": "sqlglot.transforms", "qualname": "epoch_cast_to_ts", "kind": "function", "doc": "

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transforms.preprocess": {"fullname": "sqlglot.transforms.preprocess", "modulename": "sqlglot.transforms", "qualname": "preprocess", "kind": "function", "doc": "

Creates a new transform by chaining a sequence of transformations and converts the resulting\nexpression to SQL, using either the \"_sql\" method corresponding to the resulting expression,\nor the appropriate Generator.TRANSFORMS function (when applicable -- see below).

\n\n
Arguments:
\n\n
    \n
  • transforms: sequence of transform functions. These will be called in order.
  • \n
\n\n
Returns:
\n\n
\n

Function that can be used as a generator transform.

\n
\n", "signature": "(\ttransforms: List[Callable[[sqlglot.expressions.Expression], sqlglot.expressions.Expression]]) -> Callable[[sqlglot.generator.Generator, sqlglot.expressions.Expression], str]:", "funcdef": "def"}, "sqlglot.trie": {"fullname": "sqlglot.trie", "modulename": "sqlglot.trie", "kind": "module", "doc": "

\n"}, "sqlglot.trie.new_trie": {"fullname": "sqlglot.trie.new_trie", "modulename": "sqlglot.trie", "qualname": "new_trie", "kind": "function", "doc": "

Creates a new trie out of a collection of keywords.

\n\n

The trie is represented as a sequence of nested dictionaries keyed by either single character\nstrings, or by 0, which is used to designate that a keyword is in the trie.

\n\n
Example:
\n\n
\n
\n
>>> new_trie(["bla", "foo", "blab"])\n{'b': {'l': {'a': {0: True, 'b': {0: True}}}}, 'f': {'o': {'o': {0: True}}}}\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • keywords: the keywords to create the trie from.
  • \n
  • trie: a trie to mutate instead of creating a new one
  • \n
\n\n
Returns:
\n\n
\n

The trie corresponding to keywords.

\n
\n", "signature": "(\tkeywords: Iterable[Sequence[Hashable]],\ttrie: Optional[Dict] = None) -> Dict:", "funcdef": "def"}, "sqlglot.trie.in_trie": {"fullname": "sqlglot.trie.in_trie", "modulename": "sqlglot.trie", "qualname": "in_trie", "kind": "function", "doc": "

Checks whether a key is in a trie.

\n\n
Examples:
\n\n
\n
\n
>>> in_trie(new_trie(["cat"]), "bob")\n(0, {'c': {'a': {'t': {0: True}}}})\n
\n
\n \n
\n
>>> in_trie(new_trie(["cat"]), "ca")\n(1, {'t': {0: True}})\n
\n
\n \n
\n
>>> in_trie(new_trie(["cat"]), "cat")\n(2, {0: True})\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • trie: the trie to be searched.
  • \n
  • key: the target key.
  • \n
\n\n
Returns:
\n\n
\n

A pair (value, subtrie), where subtrie is the sub-trie we get at the point where the search stops, and value\n is either 0 (search was unsuccessful), 1 (value is a prefix of a keyword in trie) or 2 (key is intrie`).

\n
\n", "signature": "(trie: Dict, key: Sequence[Hashable]) -> Tuple[int, Dict]:", "funcdef": "def"}}, "docInfo": {"sqlglot": {"qualname": 0, "fullname": 1, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 5861}, "sqlglot.pretty": {"qualname": 1, "fullname": 2, "annotation": 0, "default_value": 1, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.schema": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.parse": {"qualname": 1, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 124, "bases": 0, "doc": 84}, "sqlglot.parse_one": {"qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 198, "bases": 0, "doc": 99}, "sqlglot.transpile": {"qualname": 1, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 239, "bases": 0, "doc": 177}, "sqlglot.dataframe": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3543}, "sqlglot.dataframe.sql": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.SparkSession": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.SparkSession.table": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 208, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.SparkSession.sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 259, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.copy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.select": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 48, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.alias": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.where": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 86, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.filter": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 86, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 48, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.agg": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 48, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.join": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 180, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 120, "bases": 0, "doc": 44}, "sqlglot.dataframe.sql.DataFrame.sort": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 120, "bases": 0, "doc": 44}, "sqlglot.dataframe.sql.DataFrame.union": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 66, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 66, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 61, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.intersect": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 66, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 66, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 66, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.distinct": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 38, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.dropna": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 138, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.fillna": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 123, "bases": 0, "doc": 100}, "sqlglot.dataframe.sql.DataFrame.replace": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 217, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 77, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.drop": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 80, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.limit": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.hint": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 77, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.repartition": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 111, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.cache": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.persist": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 60, "bases": 0, "doc": 20}, "sqlglot.dataframe.sql.GroupedData": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 106, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.agg": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 90, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.count": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.mean": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.avg": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.max": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.min": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.sum": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.pivot": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 63, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.ensure_col": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 71, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.ensure_cols": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 98, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 123, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"qualname": 5, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 92, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.binary_op": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 85, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 85, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.unary_op": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.ensure_literal": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 39, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.copy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.set_table_name": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 55, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.alias": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.asc": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.desc": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.when": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 77, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.otherwise": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.isNull": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.isNotNull": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.cast": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 53, "bases": 0, "doc": 27}, "sqlglot.dataframe.sql.Column.startswith": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 78, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.endswith": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 78, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.rlike": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.like": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.ilike": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.substr": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 121, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.isin": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 81, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.between": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 97, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.over": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 66, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameNaFunctions": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 138, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 143, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 177, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Window": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Window.partitionBy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 104, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Window.orderBy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 104, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Window.rowsBetween": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 54, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Window.rangeBetween": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 54, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.WindowSpec": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 38, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.WindowSpec.copy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.WindowSpec.sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 104, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 104, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 54, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 54, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameReader": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameReader.table": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameWriter": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 122, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 27, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 52, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 70, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 71, "bases": 0, "doc": 3}, "sqlglot.dialects": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 764}, "sqlglot.dialects.bigquery": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 692}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 22, "bases": 0, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 22, "bases": 0, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 22, "bases": 0, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"qualname": 5, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.clickhouse": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.clickhouse.ClickHouse": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 692}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"qualname": 5, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.databricks": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.databricks.Databricks": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.databricks.Databricks.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 174}, "sqlglot.dialects.databricks.Databricks.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 692}, "sqlglot.dialects.databricks.Databricks.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 3}, "sqlglot.dialects.dialect": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 5}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 8, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.HIVE": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.SPARK": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.SPARK2": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.TRINO": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.TSQL": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.DRILL": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 105, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.format_time": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 70, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.parse": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.parse_into": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 126, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.generate": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 50, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.transpile": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 37, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.tokenize": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.rename_func": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 58, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.if_sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 67, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"qualname": 5, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 67, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.inline_array_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_ilike_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"qualname": 5, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_tablesample_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_pivot_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_trycast_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_properties_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"qualname": 5, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.str_position_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.struct_extract_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.var_map_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 91, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.format_time_lambda": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 90, "bases": 0, "doc": 71}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 46}, "sqlglot.dialects.dialect.parse_date_delta": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 78, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"qualname": 5, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.date_trunc_to_time": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.locate_to_strposition": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.left_to_substring_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.right_to_substring_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.timestrtotime_sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.datestrtodate_sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.min_or_least": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.max_or_greatest": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.count_if_to_sum": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.trim_sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.str_to_time_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"qualname": 6, "fullname": 9, "annotation": 0, "default_value": 0, "signature": 19, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.pivot_column_names": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 111, "bases": 0, "doc": 3}, "sqlglot.dialects.drill": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.drill.Drill": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.drill.Drill.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.drill.Drill.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.drill.Drill.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 692}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.dialects.duckdb": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.duckdb.DuckDB": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 692}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 81, "bases": 0, "doc": 3}, "sqlglot.dialects.hive": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.hive.Hive": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.hive.Hive.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.hive.Hive.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.hive.Hive.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 692}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"qualname": 5, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.dialects.mysql": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.mysql.MySQL": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.mysql.MySQL.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.mysql.MySQL.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 692}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.oracle": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.oracle.Oracle": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.oracle.Oracle.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.oracle.Oracle.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 692}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.postgres": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.postgres.Postgres": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.postgres.Postgres.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.postgres.Postgres.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 692}, "sqlglot.dialects.presto": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.presto.Presto": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.presto.Presto.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.presto.Presto.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.presto.Presto.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 692}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.redshift": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.redshift.Redshift": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.redshift.Redshift.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 174}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 3}, "sqlglot.dialects.redshift.Redshift.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 692}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 58}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 19}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 75}, "sqlglot.dialects.snowflake": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.snowflake.Snowflake": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 692}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 36, "bases": 0, "doc": 3}, "sqlglot.dialects.spark": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.spark.Spark": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.spark.Spark.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 174}, "sqlglot.dialects.spark.Spark.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 692}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.spark2": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.spark2.Spark2": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.spark2.Spark2.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 174}, "sqlglot.dialects.spark2.Spark2.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 692}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 54, "bases": 0, "doc": 3}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 3}, "sqlglot.dialects.sqlite": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.sqlite.SQLite": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.sqlite.SQLite.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.sqlite.SQLite.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 692}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.starrocks": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.starrocks.StarRocks": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 174}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 692}, "sqlglot.dialects.tableau": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.tableau.Tableau": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.tableau.Tableau.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 692}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.tableau.Tableau.Generator.coalesce_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.tableau.Tableau.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.teradata": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.teradata.Teradata": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.teradata.Teradata.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.teradata.Teradata.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.teradata.Teradata.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 692}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.trino": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.trino.Trino": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.trino.Trino.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 692}, "sqlglot.dialects.trino.Trino.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 3}, "sqlglot.dialects.tsql": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"qualname": 6, "fullname": 9, "annotation": 0, "default_value": 0, "signature": 67, "bases": 0, "doc": 3}, "sqlglot.dialects.tsql.TSQL": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.tsql.TSQL.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.tsql.TSQL.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 692}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.diff": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 6444}, "sqlglot.diff.Insert": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.diff.Insert.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.diff.Remove": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.diff.Remove.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.diff.Move": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 14}, "sqlglot.diff.Move.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.diff.Update": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.diff.Update.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.diff.Keep": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 11}, "sqlglot.diff.Keep.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.diff.diff": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 206, "bases": 0, "doc": 306}, "sqlglot.diff.ChangeDistiller": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 46}, "sqlglot.diff.ChangeDistiller.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.diff.ChangeDistiller.diff": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 199, "bases": 0, "doc": 3}, "sqlglot.errors": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.errors.ErrorLevel": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 5}, "sqlglot.errors.ErrorLevel.IGNORE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 6}, "sqlglot.errors.ErrorLevel.WARN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 6}, "sqlglot.errors.ErrorLevel.RAISE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 11}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 12}, "sqlglot.errors.SqlglotError": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 11}, "sqlglot.errors.UnsupportedError": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 11}, "sqlglot.errors.ParseError": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 11}, "sqlglot.errors.ParseError.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.errors.ParseError.new": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 207, "bases": 0, "doc": 3}, "sqlglot.errors.TokenError": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 11}, "sqlglot.errors.OptimizeError": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 11}, "sqlglot.errors.SchemaError": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 11}, "sqlglot.errors.ExecuteError": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 11}, "sqlglot.errors.concat_messages": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 3}, "sqlglot.errors.merge_errors": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.executor": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 2950}, "sqlglot.executor.execute": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 197, "bases": 0, "doc": 115}, "sqlglot.executor.context": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 64}, "sqlglot.executor.context.Context.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 66, "bases": 0, "doc": 21}, "sqlglot.executor.context.Context.eval": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context.eval_tuple": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context.add_columns": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 26, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context.table_iter": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 72, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context.filter": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 19, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context.sort": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 19, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context.set_row": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context.set_index": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context.set_range": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.executor.env": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.env.reverse_key": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.env.reverse_key.__init__": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 9, "bases": 0, "doc": 3}, "sqlglot.executor.env.filter_nulls": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 22, "bases": 0, "doc": 3}, "sqlglot.executor.env.null_if_any": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 13, "bases": 0, "doc": 59}, "sqlglot.executor.env.str_position": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 26, "bases": 0, "doc": 3}, "sqlglot.executor.env.substring": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 3}, "sqlglot.executor.env.cast": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.env.ordered": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 22, "bases": 0, "doc": 3}, "sqlglot.executor.env.interval": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.python": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.execute": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.generate": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 16}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 15}, "sqlglot.executor.python.PythonExecutor.context": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.table": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.scan": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.static": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.scan_table": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.join": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.hash_join": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 28, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.aggregate": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.sort": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.set_operation": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.executor.python.Python": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.executor.python.Python.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.executor.python.Python.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 692}, "sqlglot.executor.table": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.table.Table": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.table.Table.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 30, "bases": 0, "doc": 3}, "sqlglot.executor.table.Table.add_columns": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 26, "bases": 0, "doc": 3}, "sqlglot.executor.table.Table.append": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.table.Table.pop": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.executor.table.TableIter": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.table.TableIter.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 9, "bases": 0, "doc": 3}, "sqlglot.executor.table.RangeReader": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.table.RangeReader.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 9, "bases": 0, "doc": 3}, "sqlglot.executor.table.RowReader": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.table.RowReader.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 20, "bases": 0, "doc": 3}, "sqlglot.executor.table.Tables": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 6, "doc": 87}, "sqlglot.executor.table.ensure_tables": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.expressions": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 61}, "sqlglot.expressions.Expression": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 346}, "sqlglot.expressions.Expression.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.expressions.Expression.this": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 9}, "sqlglot.expressions.Expression.expression": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 9}, "sqlglot.expressions.Expression.expressions": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 9}, "sqlglot.expressions.Expression.text": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 19, "bases": 0, "doc": 32}, "sqlglot.expressions.Expression.is_string": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 11}, "sqlglot.expressions.Expression.is_number": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 11}, "sqlglot.expressions.Expression.is_int": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 11}, "sqlglot.expressions.Expression.is_star": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.expressions.Expression.alias": {"qualname": 2, "fullname": 4, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 18}, "sqlglot.expressions.Expression.output_name": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.Expression.copy": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 10}, "sqlglot.expressions.Expression.add_comments": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.expressions.Expression.append": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 54}, "sqlglot.expressions.Expression.set": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 45}, "sqlglot.expressions.Expression.depth": {"qualname": 2, "fullname": 4, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 9}, "sqlglot.expressions.Expression.iter_expressions": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 14}, "sqlglot.expressions.Expression.find": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 60, "bases": 0, "doc": 83}, "sqlglot.expressions.Expression.find_all": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 60, "bases": 0, "doc": 81}, "sqlglot.expressions.Expression.find_ancestor": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 43, "bases": 0, "doc": 43}, "sqlglot.expressions.Expression.parent_select": {"qualname": 3, "fullname": 5, "annotation": 4, "default_value": 0, "signature": 0, "bases": 0, "doc": 8}, "sqlglot.expressions.Expression.same_parent": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 13}, "sqlglot.expressions.Expression.root": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 10}, "sqlglot.expressions.Expression.walk": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 89}, "sqlglot.expressions.Expression.dfs": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 33}, "sqlglot.expressions.Expression.bfs": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 33}, "sqlglot.expressions.Expression.unnest": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 11}, "sqlglot.expressions.Expression.unalias": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 12}, "sqlglot.expressions.Expression.unnest_operands": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 9}, "sqlglot.expressions.Expression.flatten": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 28}, "sqlglot.expressions.Expression.sql": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 99, "bases": 0, "doc": 61}, "sqlglot.expressions.Expression.transform": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 111}, "sqlglot.expressions.Expression.replace": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 81}, "sqlglot.expressions.Expression.pop": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 22}, "sqlglot.expressions.Expression.assert_is": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 162}, "sqlglot.expressions.Expression.error_messages": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 43, "bases": 0, "doc": 79}, "sqlglot.expressions.Expression.dump": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 11}, "sqlglot.expressions.Expression.load": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 17}, "sqlglot.expressions.Condition": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Condition.and_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 168, "bases": 0, "doc": 183}, "sqlglot.expressions.Condition.or_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 168, "bases": 0, "doc": 183}, "sqlglot.expressions.Condition.not_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 28, "bases": 0, "doc": 108}, "sqlglot.expressions.Condition.as_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 178, "bases": 0, "doc": 3}, "sqlglot.expressions.Condition.isin": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 110, "bases": 0, "doc": 3}, "sqlglot.expressions.Condition.between": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 73, "bases": 0, "doc": 3}, "sqlglot.expressions.Condition.is_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 58, "bases": 0, "doc": 3}, "sqlglot.expressions.Condition.like": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 58, "bases": 0, "doc": 3}, "sqlglot.expressions.Condition.ilike": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 58, "bases": 0, "doc": 3}, "sqlglot.expressions.Condition.eq": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.expressions.Condition.neq": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.expressions.Condition.rlike": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 58, "bases": 0, "doc": 3}, "sqlglot.expressions.Predicate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 13}, "sqlglot.expressions.DerivedTable": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Unionable": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Unionable.union": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 160, "bases": 0, "doc": 201}, "sqlglot.expressions.Unionable.intersect": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 160, "bases": 0, "doc": 201}, "sqlglot.expressions.Unionable.except_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 160, "bases": 0, "doc": 202}, "sqlglot.expressions.UDTF": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.Cache": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Uncache": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Create": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Clone": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Describe": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Pragma": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Set": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SetItem": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Show": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.UserDefinedFunction": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CharacterSet": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.With": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.WithinGroup": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CTE": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TableAlias": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.BitString": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.HexString": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ByteString": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RawString": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Column": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Column.output_name": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.Column.parts": {"qualname": 2, "fullname": 4, "annotation": 4, "default_value": 0, "signature": 0, "bases": 0, "doc": 15}, "sqlglot.expressions.Column.to_dot": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 10}, "sqlglot.expressions.ColumnPosition": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ColumnDef": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AlterColumn": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RenameTable": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SetTag": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Comment": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.MergeTreeTTLAction": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.MergeTreeTTL": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ColumnConstraintKind": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AutoIncrementColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CaseSpecificColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CharacterSetColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CheckColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CollateColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CommentColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CompressColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DateFormatColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DefaultColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.EncodeColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.GeneratedAsIdentityColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.InlineLengthColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.NotNullColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.OnUpdateColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.PrimaryKeyColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TitleColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.UniqueColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.UppercaseColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.PathColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Constraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Delete": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Delete.delete": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 160, "bases": 0, "doc": 150}, "sqlglot.expressions.Delete.where": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 226}, "sqlglot.expressions.Delete.returning": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 160, "bases": 0, "doc": 194}, "sqlglot.expressions.Drop": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Filter": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Check": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Directory": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ForeignKey": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.PrimaryKey": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Into": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.From": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Having": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Hint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.JoinHint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Identifier": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Identifier.output_name": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.Index": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Insert": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Insert.with_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 236, "bases": 0, "doc": 291}, "sqlglot.expressions.OnConflict": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Returning": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Introducer": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.National": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LoadData": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Partition": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Fetch": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Group": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Lambda": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Limit": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Literal": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Literal.number": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 3}, "sqlglot.expressions.Literal.string": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 3}, "sqlglot.expressions.Literal.output_name": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.Join": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Join.on": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 239}, "sqlglot.expressions.Join.using": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 241}, "sqlglot.expressions.Lateral": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.MatchRecognize": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Final": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Offset": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Order": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Cluster": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Distribute": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Sort": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Ordered": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Property": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AlgorithmProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AutoIncrementProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.BlockCompressionProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CharacterSetProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ChecksumProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CollateProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DataBlocksizeProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DefinerProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DistKeyProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DistStyleProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.EngineProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ExecuteAsProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ExternalProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.FallbackProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.FileFormatProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.FreespaceProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.InputOutputFormat": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.IsolatedLoadingProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.JournalProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LanguageProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DictProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DictSubProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DictRange": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LikeProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LocationProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LockingProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LogProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.MaterializedProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.MergeBlockRatioProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.NoPrimaryIndexProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.OnCommitProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.PartitionedByProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ReturnsProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RowFormatProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RowFormatDelimitedProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RowFormatSerdeProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SchemaCommentProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SerdeProperties": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SetProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SettingsProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SortKeyProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SqlSecurityProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StabilityProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TemporaryProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TransientProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.VolatileProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.WithDataProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.WithJournalTableProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Properties": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Properties.Location": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 5}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.Properties.Location.POST_NAME": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.Properties.Location.POST_WITH": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.Properties.from_dict": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 3}, "sqlglot.expressions.Qualify": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Return": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Reference": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Tuple": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Tuple.isin": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 110, "bases": 0, "doc": 3}, "sqlglot.expressions.Subqueryable": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Subqueryable.subquery": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 89, "bases": 0, "doc": 213}, "sqlglot.expressions.Subqueryable.limit": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 166, "bases": 0, "doc": 3}, "sqlglot.expressions.Subqueryable.with_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 236, "bases": 0, "doc": 301}, "sqlglot.expressions.Table": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Table.parts": {"qualname": 2, "fullname": 4, "annotation": 4, "default_value": 0, "signature": 0, "bases": 0, "doc": 14}, "sqlglot.expressions.SystemTime": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Union": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Union.limit": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 166, "bases": 0, "doc": 221}, "sqlglot.expressions.Union.select": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 245}, "sqlglot.expressions.Union.is_star": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.expressions.Except": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Intersect": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Unnest": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Update": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Values": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Var": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Schema": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Lock": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Select": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Select.from_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 160, "bases": 0, "doc": 199}, "sqlglot.expressions.Select.group_by": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 275}, "sqlglot.expressions.Select.order_by": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 250}, "sqlglot.expressions.Select.sort_by": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 264}, "sqlglot.expressions.Select.cluster_by": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 264}, "sqlglot.expressions.Select.limit": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 166, "bases": 0, "doc": 219}, "sqlglot.expressions.Select.offset": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 166, "bases": 0, "doc": 219}, "sqlglot.expressions.Select.select": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 196}, "sqlglot.expressions.Select.lateral": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 235}, "sqlglot.expressions.Select.join": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 372, "bases": 0, "doc": 621}, "sqlglot.expressions.Select.where": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 253}, "sqlglot.expressions.Select.having": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 275}, "sqlglot.expressions.Select.window": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 3}, "sqlglot.expressions.Select.qualify": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 3}, "sqlglot.expressions.Select.distinct": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 102, "bases": 0, "doc": 157}, "sqlglot.expressions.Select.ctas": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 184, "bases": 0, "doc": 223}, "sqlglot.expressions.Select.lock": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 61, "bases": 0, "doc": 331}, "sqlglot.expressions.Select.is_star": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.expressions.Subquery": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.Subquery.unnest": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 8}, "sqlglot.expressions.Subquery.is_star": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.expressions.Subquery.output_name": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.TableSample": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Tag": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 15}, "sqlglot.expressions.Pivot": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Window": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.WindowSpec": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Where": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Star": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Star.output_name": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.Parameter": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SessionParameter": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Placeholder": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Null": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Boolean": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DataTypeSize": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DataType": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DataType.Type": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 5}, "sqlglot.expressions.DataType.Type.ARRAY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.BIGINT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.BINARY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.BIT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.CHAR": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.DATE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.DATETIME": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.DATETIME64": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TSRANGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.DATERANGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.DECIMAL": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.DOUBLE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.FLOAT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.HSTORE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.IMAGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.INET": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.INT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.INT128": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.INT256": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.INTERVAL": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.JSON": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.JSONB": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.MAP": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.MONEY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.NCHAR": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.NULL": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.NULLABLE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.OBJECT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.SERIAL": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.SMALLINT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.STRUCT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.SUPER": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TEXT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TIME": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TINYINT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.UBIGINT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.UINT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.USMALLINT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.UTINYINT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.UINT128": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.UINT256": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.UUID": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.VARBINARY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.VARCHAR": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.VARIANT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.XML": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.build": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 157, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.is_type": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 65, "bases": 0, "doc": 3}, "sqlglot.expressions.PseudoType": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SubqueryPredicate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.All": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Any": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Exists": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Command": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Transaction": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Commit": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Rollback": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AlterTable": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AddConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DropPartition": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Binary": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Add": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Connector": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.And": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Or": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.BitwiseAnd": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.BitwiseLeftShift": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.BitwiseOr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.BitwiseRightShift": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.BitwiseXor": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Div": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Overlaps": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Dot": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Dot.build": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 52, "bases": 0, "doc": 12}, "sqlglot.expressions.DPipe": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.EQ": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.NullSafeEQ": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.NullSafeNEQ": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.Distance": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Escape": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Glob": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.GT": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.GTE": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.ILike": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.ILikeAny": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.IntDiv": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Is": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.Kwarg": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 12}, "sqlglot.expressions.Like": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.LikeAny": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.LT": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.LTE": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.Mod": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Mul": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.NEQ": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.SimilarTo": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.Slice": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Sub": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayOverlaps": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Unary": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.BitwiseNot": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Not": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Paren": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Neg": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Alias": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Alias.output_name": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.Aliases": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AtTimeZone": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Between": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Bracket": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Distinct": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.In": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TimeUnit": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 10}, "sqlglot.expressions.TimeUnit.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.expressions.Interval": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.IgnoreNulls": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RespectNulls": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Func": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 128}, "sqlglot.expressions.Func.from_arg_list": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.expressions.Func.sql_names": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.expressions.Func.sql_name": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.expressions.Func.default_parser_mappings": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.expressions.AggFunc": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ParameterizedAgg": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Abs": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Anonymous": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Hll": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ApproxDistinct": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Array": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ToChar": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.GenerateSeries": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayAgg": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayAll": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayAny": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayConcat": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayContains": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.ArrayContained": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayFilter": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayJoin": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArraySize": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArraySort": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArraySum": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayUnionAgg": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Avg": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AnyValue": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Case": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Case.when": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 117, "bases": 0, "doc": 3}, "sqlglot.expressions.Case.else_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 84, "bases": 0, "doc": 3}, "sqlglot.expressions.Cast": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Cast.output_name": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.Cast.is_type": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 65, "bases": 0, "doc": 3}, "sqlglot.expressions.CastToStrType": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Collate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TryCast": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Ceil": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Coalesce": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Concat": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ConcatWs": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Count": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CountIf": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CurrentDate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CurrentDatetime": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CurrentTime": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CurrentTimestamp": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CurrentUser": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DateAdd": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.DateSub": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.DateDiff": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.DateTrunc": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DatetimeAdd": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.DatetimeSub": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.DatetimeDiff": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.DatetimeTrunc": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.DayOfWeek": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DayOfMonth": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DayOfYear": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.WeekOfYear": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LastDateOfMonth": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Extract": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TimestampAdd": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.TimestampSub": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.TimestampDiff": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.TimestampTrunc": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.TimeAdd": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.TimeSub": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.TimeDiff": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.TimeTrunc": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.DateFromParts": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DateStrToDate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DateToDateStr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DateToDi": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Day": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Decode": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DiToDate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Encode": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Exp": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Explode": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Floor": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.FromBase64": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ToBase64": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Greatest": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.GroupConcat": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Hex": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.If": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.IfNull": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Initcap": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.JSONKeyValue": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.JSONObject": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.OpenJSONColumnDef": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.OpenJSON": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.JSONBContains": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.JSONExtract": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.JSONExtractScalar": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.JSONBExtract": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.JSONBExtractScalar": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.JSONFormat": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Least": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Left": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Right": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Length": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Levenshtein": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Ln": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Log": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Log2": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Log10": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LogicalOr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LogicalAnd": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Lower": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Map": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StarMap": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.VarMap": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.MatchAgainst": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Max": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.MD5": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Min": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Month": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Nvl2": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Posexplode": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Pow": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.PercentileCont": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.PercentileDisc": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Quantile": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ApproxQuantile": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RangeN": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ReadCSV": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Reduce": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RegexpExtract": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RegexpLike": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RegexpILike": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RegexpSplit": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Repeat": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Round": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RowNumber": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SafeDivide": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SetAgg": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SHA": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SHA2": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SortArray": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Split": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Substring": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StandardHash": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StrPosition": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StrToDate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StrToTime": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StrToUnix": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.NumberToStr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Struct": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StructExtract": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Sum": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Sqrt": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Stddev": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StddevPop": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StddevSamp": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TimeToStr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TimeToTimeStr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TimeToUnix": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TimeStrToDate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TimeStrToTime": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TimeStrToUnix": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Trim": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TsOrDsAdd": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.TsOrDsToDateStr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TsOrDsToDate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TsOrDiToDi": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Unhex": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.UnixToStr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.UnixToTime": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.UnixToTimeStr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Upper": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Variance": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.VariancePop": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Week": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.XMLTable": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Year": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Use": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Merge": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.When": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.NextValueFor": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.maybe_parse": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 270, "bases": 0, "doc": 231}, "sqlglot.expressions.union": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 187, "bases": 0, "doc": 216}, "sqlglot.expressions.intersect": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 187, "bases": 0, "doc": 216}, "sqlglot.expressions.except_": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 187, "bases": 0, "doc": 217}, "sqlglot.expressions.select": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 138, "bases": 0, "doc": 206}, "sqlglot.expressions.from_": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 136, "bases": 0, "doc": 205}, "sqlglot.expressions.update": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 234, "bases": 0, "doc": 263}, "sqlglot.expressions.delete": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 228, "bases": 0, "doc": 164}, "sqlglot.expressions.insert": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 259, "bases": 0, "doc": 197}, "sqlglot.expressions.condition": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 154, "bases": 0, "doc": 347}, "sqlglot.expressions.and_": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 162, "bases": 0, "doc": 192}, "sqlglot.expressions.or_": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 162, "bases": 0, "doc": 192}, "sqlglot.expressions.not_": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 154, "bases": 0, "doc": 159}, "sqlglot.expressions.paren": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 70, "bases": 0, "doc": 122}, "sqlglot.expressions.to_identifier": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 69}, "sqlglot.expressions.to_interval": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 16}, "sqlglot.expressions.to_table": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 149, "bases": 0, "doc": 101}, "sqlglot.expressions.to_column": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 55, "bases": 0, "doc": 62}, "sqlglot.expressions.alias_": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 242, "bases": 0, "doc": 305}, "sqlglot.expressions.subquery": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 182, "bases": 0, "doc": 188}, "sqlglot.expressions.column": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 208, "bases": 0, "doc": 74}, "sqlglot.expressions.cast": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 108, "bases": 0, "doc": 123}, "sqlglot.expressions.table_": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 208, "bases": 0, "doc": 75}, "sqlglot.expressions.values": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 131, "bases": 0, "doc": 143}, "sqlglot.expressions.var": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 58, "bases": 0, "doc": 168}, "sqlglot.expressions.rename_table": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 75, "bases": 0, "doc": 50}, "sqlglot.expressions.convert": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 74}, "sqlglot.expressions.replace_children": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 57, "bases": 0, "doc": 18}, "sqlglot.expressions.column_table_names": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 127}, "sqlglot.expressions.table_name": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 147}, "sqlglot.expressions.replace_tables": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 45, "bases": 0, "doc": 170}, "sqlglot.expressions.replace_placeholders": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 56, "bases": 0, "doc": 246}, "sqlglot.expressions.expand": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 91, "bases": 0, "doc": 338}, "sqlglot.expressions.func": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 122, "bases": 0, "doc": 272}, "sqlglot.expressions.true": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 20, "bases": 0, "doc": 8}, "sqlglot.expressions.false": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 20, "bases": 0, "doc": 8}, "sqlglot.expressions.null": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 20, "bases": 0, "doc": 7}, "sqlglot.generator": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.generator.Generator": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 692}, "sqlglot.generator.Generator.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 411, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.generate": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 76, "bases": 0, "doc": 55}, "sqlglot.generator.Generator.unsupported": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.sep": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.seg": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.pad_comment": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.maybe_comment": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 88, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.wrap": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.no_identify": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.normalize_func": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.indent": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 106, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.sql": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 96, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.uncache_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.cache_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.characterset_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.column_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.columnposition_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.columndef_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 54, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.columnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 20, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 36, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.create_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.clone_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.describe_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.prepend_ctes": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.with_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.cte_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.tablealias_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bitstring_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.hexstring_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bytestring_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.rawstring_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.datatypesize_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.datatype_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.directory_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.delete_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.drop_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.except_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.except_op": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.fetch_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.filter_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.hint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.index_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.identifier_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.inputoutputformat_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.national_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 55, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.partition_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.properties_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.root_properties": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.properties": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 117, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.with_properties": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.locate_properties": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 77, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.property_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.likeproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.fallbackproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.journalproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.freespaceproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.checksumproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.lockingproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.withdataproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.insert_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.intersect_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.intersect_op": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.introducer_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.pseudotype_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.onconflict_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.returning_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.table_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 55, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.tablesample_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 74, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.pivot_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.tuple_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.update_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.values_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.var_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.into_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.from_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.group_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.having_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.join_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.lambda_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 59, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.lateral_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.limit_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.offset_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.setitem_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.set_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.pragma_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.lock_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.literal_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.loaddata_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.null_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 22, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.boolean_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.order_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.cluster_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.distribute_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.sort_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.ordered_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.matchrecognize_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.query_modifiers": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.after_having_modifiers": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.after_limit_modifiers": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.select_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.schema_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.star_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.parameter_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.sessionparameter_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.placeholder_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.subquery_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 55, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.qualify_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.union_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.union_op": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.unnest_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.where_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.window_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.partition_by_sql": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 52, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.windowspec_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.withingroup_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.between_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bracket_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.all_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.any_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.exists_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.case_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.constraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.nextvaluefor_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.extract_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.trim_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.concat_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.check_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.foreignkey_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.primarykey_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.if_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.matchagainst_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.jsonobject_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.openjson_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.in_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.in_unnest_op": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.interval_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.return_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.reference_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.anonymous_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.paren_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.neg_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.not_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.alias_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.aliases_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.attimezone_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.add_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.and_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.connector_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bitwiseand_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bitwisenot_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bitwiseor_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bitwisexor_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.cast_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.currentdate_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.collate_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.command_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.comment_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.mergetreettl_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.transaction_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.commit_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.rollback_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.altercolumn_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.renametable_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.altertable_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.droppartition_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.addconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.distinct_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.ignorenulls_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.respectnulls_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.intdiv_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.dpipe_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.div_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.overlaps_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.distance_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.dot_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.eq_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.escape_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.glob_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.gt_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.gte_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.ilike_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.ilikeany_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.is_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.like_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.likeany_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.similarto_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.lt_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.lte_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.mod_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.mul_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.neq_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.nullsafeeq_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.nullsafeneq_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.or_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.slice_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.sub_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.trycast_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.use_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.binary": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.function_fallback_sql": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.func": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 67, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.format_args": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 54, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.text_width": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.format_time": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.expressions": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 179, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.op_expressions": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 65, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.naked_property": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.set_operation": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.tag_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.token_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.joinhint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.kwarg_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.when_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.merge_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.tochar_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.dictproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.dictrange_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.dictsubproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.cached_generator": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 65, "bases": 0, "doc": 7}, "sqlglot.helper": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.helper.AutoName": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 25}, "sqlglot.helper.seq_get": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 45, "bases": 0, "doc": 27}, "sqlglot.helper.ensure_list": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 66}, "sqlglot.helper.ensure_collection": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 66}, "sqlglot.helper.csv": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 55}, "sqlglot.helper.subclasses": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 103, "bases": 0, "doc": 84}, "sqlglot.helper.apply_index_offset": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 76, "bases": 0, "doc": 98}, "sqlglot.helper.camel_to_snake_case": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 19, "bases": 0, "doc": 16}, "sqlglot.helper.while_changing": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 67, "bases": 0, "doc": 58}, "sqlglot.helper.tsort": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 47, "bases": 0, "doc": 53}, "sqlglot.helper.open_file": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 19}, "sqlglot.helper.csv_reader": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 30, "bases": 0, "doc": 53}, "sqlglot.helper.find_new_name": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 50}, "sqlglot.helper.name_sequence": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 27, "bases": 0, "doc": 20}, "sqlglot.helper.object_to_dict": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 26, "bases": 0, "doc": 12}, "sqlglot.helper.split_num_words": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 74, "bases": 0, "doc": 312}, "sqlglot.helper.is_iterable": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 19, "bases": 0, "doc": 132}, "sqlglot.helper.flatten": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 45, "bases": 0, "doc": 195}, "sqlglot.helper.dict_depth": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 19, "bases": 0, "doc": 194}, "sqlglot.helper.first": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 16}, "sqlglot.helper.case_sensitive": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 89, "bases": 0, "doc": 14}, "sqlglot.helper.should_identify": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 113, "bases": 0, "doc": 102}, "sqlglot.lineage": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.lineage.Node": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.lineage.Node.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 120, "bases": 0, "doc": 3}, "sqlglot.lineage.Node.walk": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 30, "bases": 0, "doc": 3}, "sqlglot.lineage.Node.to_html": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 3}, "sqlglot.lineage.lineage": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 253, "bases": 0, "doc": 106}, "sqlglot.lineage.LineageHTML": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 18}, "sqlglot.lineage.LineageHTML.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 132, "bases": 0, "doc": 3}, "sqlglot.optimizer": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.annotate_types": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.annotate_types.annotate_types": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 42, "bases": 0, "doc": 331}, "sqlglot.optimizer.annotate_types.TypeAnnotator": {"qualname": 1, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"qualname": 3, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 3}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.optimizer.canonicalize": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.canonicalize.canonicalize": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 46}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 39, "bases": 0, "doc": 3}, "sqlglot.optimizer.canonicalize.coerce_type": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 39, "bases": 0, "doc": 3}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.optimizer.eliminate_ctes": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 166}, "sqlglot.optimizer.eliminate_joins": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 195}, "sqlglot.optimizer.eliminate_joins.join_condition": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 46}, "sqlglot.optimizer.eliminate_subqueries": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 278}, "sqlglot.optimizer.isolate_table_selects": {"qualname": 0, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"qualname": 3, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.optimizer.merge_subqueries": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 276}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 3}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"qualname": 3, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 3}, "sqlglot.optimizer.normalize": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.normalize.normalize": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 64, "bases": 0, "doc": 188}, "sqlglot.optimizer.normalize.normalized": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.optimizer.normalize.normalization_distance": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 171}, "sqlglot.optimizer.normalize.distributive_law": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 27, "bases": 0, "doc": 39}, "sqlglot.optimizer.normalize_identifiers": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 100, "bases": 0, "doc": 187}, "sqlglot.optimizer.optimize_joins": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 132}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 13}, "sqlglot.optimizer.optimize_joins.normalize": {"qualname": 1, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 13}, "sqlglot.optimizer.optimize_joins.other_table_names": {"qualname": 3, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.optimizer.optimizer": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.optimizer.optimize": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 580, "bases": 0, "doc": 221}, "sqlglot.optimizer.pushdown_predicates": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 198}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"qualname": 1, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 3}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 20}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 31}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"qualname": 3, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 3}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.optimizer.pushdown_projections": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 7, "bases": 0, "doc": 3}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 33, "bases": 0, "doc": 201}, "sqlglot.optimizer.qualify": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.qualify.qualify": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 350, "bases": 0, "doc": 392}, "sqlglot.optimizer.qualify_columns": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 112, "bases": 0, "doc": 232}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"qualname": 3, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 13}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 118, "bases": 0, "doc": 14}, "sqlglot.optimizer.qualify_columns.Resolver": {"qualname": 1, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 27}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"qualname": 3, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 32, "bases": 0, "doc": 3}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"qualname": 3, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 50}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"qualname": 3, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 11}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"qualname": 4, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 27, "bases": 0, "doc": 13}, "sqlglot.optimizer.qualify_tables": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 106, "bases": 0, "doc": 313}, "sqlglot.optimizer.scope": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.ScopeType": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 5}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 7, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 7, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 8, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.ScopeType.CTE": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 7, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.ScopeType.UNION": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 7, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 7, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.Scope": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 328}, "sqlglot.optimizer.scope.Scope.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 84, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.Scope.clear_cache": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.Scope.branch": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 12}, "sqlglot.optimizer.scope.Scope.walk": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.Scope.find": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 97}, "sqlglot.optimizer.scope.Scope.find_all": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 90}, "sqlglot.optimizer.scope.Scope.replace": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 63}, "sqlglot.optimizer.scope.Scope.tables": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 21}, "sqlglot.optimizer.scope.Scope.ctes": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 21}, "sqlglot.optimizer.scope.Scope.derived_tables": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 43}, "sqlglot.optimizer.scope.Scope.udtfs": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 24}, "sqlglot.optimizer.scope.Scope.subqueries": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 44}, "sqlglot.optimizer.scope.Scope.columns": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 36}, "sqlglot.optimizer.scope.Scope.selected_sources": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 69}, "sqlglot.optimizer.scope.Scope.cte_sources": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 24}, "sqlglot.optimizer.scope.Scope.selects": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 52}, "sqlglot.optimizer.scope.Scope.external_columns": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 35}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 22}, "sqlglot.optimizer.scope.Scope.join_hints": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 30}, "sqlglot.optimizer.scope.Scope.source_columns": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 17, "bases": 0, "doc": 52}, "sqlglot.optimizer.scope.Scope.is_subquery": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 9}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.optimizer.scope.Scope.is_union": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 9}, "sqlglot.optimizer.scope.Scope.is_cte": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 11}, "sqlglot.optimizer.scope.Scope.is_root": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 9}, "sqlglot.optimizer.scope.Scope.is_udtf": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 14}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.optimizer.scope.Scope.rename_source": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 8}, "sqlglot.optimizer.scope.Scope.add_source": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 8}, "sqlglot.optimizer.scope.Scope.remove_source": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 8}, "sqlglot.optimizer.scope.Scope.traverse": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 28}, "sqlglot.optimizer.scope.Scope.ref_count": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 34}, "sqlglot.optimizer.scope.traverse_scope": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 330}, "sqlglot.optimizer.scope.build_scope": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 41}, "sqlglot.optimizer.scope.walk_in_scope": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 83}, "sqlglot.optimizer.simplify": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.simplify": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 138}, "sqlglot.optimizer.simplify.rewrite_between": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 31}, "sqlglot.optimizer.simplify.simplify_not": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 25}, "sqlglot.optimizer.simplify.flatten": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 24}, "sqlglot.optimizer.simplify.simplify_connectors": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.remove_compliments": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 19}, "sqlglot.optimizer.simplify.uniq_sort": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 26, "bases": 0, "doc": 23}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 58}, "sqlglot.optimizer.simplify.simplify_literals": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.simplify_parens": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.remove_where_true": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.always_true": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.is_complement": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.is_false": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.is_null": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.eval_boolean": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.extract_date": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.extract_interval": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.date_literal": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.boolean_literal": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.unnest_subqueries": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 227}, "sqlglot.optimizer.unnest_subqueries.unnest": {"qualname": 1, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"qualname": 1, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 30, "bases": 0, "doc": 3}, "sqlglot.parser": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.parser.parse_var_map": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 45, "bases": 0, "doc": 3}, "sqlglot.parser.parse_like": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 3}, "sqlglot.parser.binary_range_parser": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 93, "bases": 0, "doc": 3}, "sqlglot.parser.Parser": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 174}, "sqlglot.parser.Parser.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 162, "bases": 0, "doc": 3}, "sqlglot.parser.Parser.reset": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.parser.Parser.parse": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 87, "bases": 0, "doc": 70}, "sqlglot.parser.Parser.parse_into": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 159, "bases": 0, "doc": 111}, "sqlglot.parser.Parser.check_errors": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 14, "bases": 0, "doc": 16}, "sqlglot.parser.Parser.raise_error": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 57, "bases": 0, "doc": 22}, "sqlglot.parser.Parser.expression": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 73, "bases": 0, "doc": 74}, "sqlglot.parser.Parser.validate_expression": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 60, "bases": 0, "doc": 57}, "sqlglot.planner": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.planner.Plan": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.planner.Plan.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.planner.Step": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.planner.Step.from_expression": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 90, "bases": 0, "doc": 209}, "sqlglot.planner.Step.add_dependency": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.planner.Step.to_s": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 3}, "sqlglot.planner.Scan": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.planner.Scan.from_expression": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 90, "bases": 0, "doc": 209}, "sqlglot.planner.Join": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.planner.Join.from_joins": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 96, "bases": 0, "doc": 3}, "sqlglot.planner.Aggregate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.planner.Sort": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.planner.SetOperation": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.planner.SetOperation.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 83, "bases": 0, "doc": 3}, "sqlglot.planner.SetOperation.from_expression": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 90, "bases": 0, "doc": 209}, "sqlglot.schema.Schema": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 8}, "sqlglot.schema.Schema.add_table": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 187, "bases": 0, "doc": 83}, "sqlglot.schema.Schema.column_names": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 143, "bases": 0, "doc": 79}, "sqlglot.schema.Schema.get_column_type": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 149, "bases": 0, "doc": 79}, "sqlglot.schema.Schema.supported_table_args": {"qualname": 4, "fullname": 6, "annotation": 3, "default_value": 0, "signature": 0, "bases": 0, "doc": 16}, "sqlglot.schema.Schema.empty": {"qualname": 2, "fullname": 4, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 11}, "sqlglot.schema.AbstractMappingSchema": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 87}, "sqlglot.schema.AbstractMappingSchema.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 27, "bases": 0, "doc": 3}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.schema.AbstractMappingSchema.find": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 88, "bases": 0, "doc": 3}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 75, "bases": 0, "doc": 3}, "sqlglot.schema.MappingSchema": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 6, "doc": 155}, "sqlglot.schema.MappingSchema.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 146, "bases": 0, "doc": 3}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 47, "bases": 0, "doc": 3}, "sqlglot.schema.MappingSchema.copy": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 3}, "sqlglot.schema.MappingSchema.add_table": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 187, "bases": 0, "doc": 83}, "sqlglot.schema.MappingSchema.column_names": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 143, "bases": 0, "doc": 79}, "sqlglot.schema.MappingSchema.get_column_type": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 149, "bases": 0, "doc": 79}, "sqlglot.schema.ensure_schema": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 71, "bases": 0, "doc": 3}, "sqlglot.schema.ensure_column_mapping": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 70, "bases": 0, "doc": 3}, "sqlglot.schema.flatten_schema": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 69, "bases": 0, "doc": 3}, "sqlglot.schema.nested_get": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 71, "bases": 0, "doc": 86}, "sqlglot.schema.nested_set": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 45, "bases": 0, "doc": 276}, "sqlglot.serde": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.serde.dump": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 149, "bases": 0, "doc": 12}, "sqlglot.serde.load": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 149, "bases": 0, "doc": 16}, "sqlglot.time": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.time.format_time": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 73, "bases": 0, "doc": 108}, "sqlglot.tokens": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 5}, "sqlglot.tokens.TokenType.L_PAREN": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.R_PAREN": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.L_BRACKET": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.R_BRACKET": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.L_BRACE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.R_BRACE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COMMA": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DOT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DASH": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PLUS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COLON": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DCOLON": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SEMICOLON": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.STAR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BACKSLASH": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SLASH": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LTE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GTE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NOT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.EQ": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NEQ": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.AND": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.OR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.AMP": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DPIPE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PIPE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CARET": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TILDA": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ARROW": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DARROW": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FARROW": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.HASH": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.HASH_ARROW": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LR_ARROW": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LT_AT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.AT_GT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DOLLAR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PARAMETER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DAMP": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BLOCK_START": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BLOCK_END": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SPACE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BREAK": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.STRING": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NUMBER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.IDENTIFIER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DATABASE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COLUMN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SCHEMA": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TABLE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.VAR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BIT_STRING": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.HEX_STRING": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BYTE_STRING": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.RAW_STRING": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BIT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BOOLEAN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TINYINT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UTINYINT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SMALLINT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.USMALLINT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UINT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BIGINT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UBIGINT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INT128": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UINT128": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INT256": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UINT256": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FLOAT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DOUBLE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DECIMAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CHAR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NCHAR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.VARCHAR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NVARCHAR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TEXT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LONGTEXT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LONGBLOB": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BINARY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.VARBINARY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.JSON": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.JSONB": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TIME": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TIMESTAMP": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DATETIME": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DATETIME64": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DATE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INT4RANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INT8RANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NUMRANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TSRANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TSTZRANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DATERANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UUID": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NULLABLE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GEOMETRY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.HLLSKETCH": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.HSTORE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SUPER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SERIAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BIGSERIAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.XML": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.MONEY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SMALLMONEY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ROWVERSION": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.IMAGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.VARIANT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.OBJECT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INET": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ALIAS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ALTER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ALWAYS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ALL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ANTI": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ANY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.APPLY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ARRAY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ASC": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ASOF": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BEGIN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BETWEEN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CACHE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CASE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COLLATE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COMMAND": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COMMENT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COMMIT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CONSTRAINT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CREATE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CROSS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CUBE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CURRENT_USER": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DEFAULT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DELETE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DESC": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DESCRIBE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DICTIONARY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DISTINCT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DIV": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DROP": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ELSE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.END": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ESCAPE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.EXCEPT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.EXECUTE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.EXISTS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FALSE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FETCH": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FILTER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FINAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FIRST": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FOR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FORMAT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FROM": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FULL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FUNCTION": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GLOB": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GLOBAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GROUP_BY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.HAVING": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.HINT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.IF": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ILIKE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.IN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INDEX": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INNER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INSERT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INTERSECT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INTERVAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INTO": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INTRODUCER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.IRLIKE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.IS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ISNULL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.JOIN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.KEEP": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LANGUAGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LATERAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LEFT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LIKE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LIKE_ANY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LIMIT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LOAD": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LOCK": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.MAP": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.MERGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.MOD": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NATURAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NEXT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 13, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NOTNULL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NULL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.OFFSET": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ON": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ORDER_BY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ORDERED": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ORDINALITY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.OUTER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.OVER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.OVERLAPS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.OVERWRITE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PARTITION": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PARTITION_BY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PERCENT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PIVOT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PRAGMA": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PROCEDURE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PROPERTIES": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.QUALIFY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.QUOTE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.RANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.RECURSIVE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.REPLACE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.RETURNING": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.REFERENCES": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.RIGHT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.RLIKE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ROLLBACK": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ROLLUP": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ROW": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ROWS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SELECT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SEMI": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SEPARATOR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SET": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SETTINGS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SHOW": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SOME": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.STRUCT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TEMPORARY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TOP": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.THEN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TRUE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UNCACHE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UNION": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UNNEST": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UNPIVOT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UPDATE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.USE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.USING": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.VALUES": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.VIEW": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.VOLATILE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.WHEN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.WHERE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.WINDOW": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.WITH": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UNIQUE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.Token": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.Token.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 133, "bases": 0, "doc": 83}, "sqlglot.tokens.Token.number": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 14}, "sqlglot.tokens.Token.string": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 14}, "sqlglot.tokens.Token.identifier": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 14}, "sqlglot.tokens.Token.var": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 14}, "sqlglot.tokens.Tokenizer": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.Tokenizer.reset": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 14, "bases": 0, "doc": 3}, "sqlglot.tokens.Tokenizer.tokenize": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 16}, "sqlglot.transforms": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.transforms.unalias_group": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 146}, "sqlglot.transforms.eliminate_distinct_on": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 66}, "sqlglot.transforms.eliminate_qualify": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 111}, "sqlglot.transforms.remove_precision_parameterized_types": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 33}, "sqlglot.transforms.unnest_to_explode": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 16}, "sqlglot.transforms.explode_to_unnest": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 12}, "sqlglot.transforms.remove_target_from_merge": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 11}, "sqlglot.transforms.remove_within_group_for_percentiles": {"qualname": 5, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.transforms.add_recursive_cte_column_names": {"qualname": 5, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.transforms.epoch_cast_to_ts": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.transforms.preprocess": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 94, "bases": 0, "doc": 84}, "sqlglot.trie": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.trie.new_trie": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 54, "bases": 0, "doc": 200}, "sqlglot.trie.in_trie": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 47, "bases": 0, "doc": 299}}, "length": 1843, "save": true}, "index": {"qualname": {"root": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 52, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.pretty": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1}, "sqlglot.dialects.presto.Presto": {"tf": 1}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}}, "df": 8}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Predicate": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}}, "df": 2}}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.prepend_ctes": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.transforms.preprocess": {"tf": 1}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.expressions.Properties": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}}, "df": 22}}}, "y": {"docs": {"sqlglot.expressions.Property": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.naked_property": {"tf": 1}}, "df": 3}}}}}, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}}, "df": 1}}}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.expressions.Pragma": {"tf": 1}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PRAGMA": {"tf": 1}}, "df": 3}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}}, "df": 1, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.PrimaryKey": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.PrimaryKeyColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.parser.parse_like": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 11, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.parser.binary_range_parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.parser.Parser.reset": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 31}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}}, "df": 3}}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Partition": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}}, "df": 5, "b": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}}, "df": 2}}, "s": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.expressions.PartitionedByProperty": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}, "s": {"docs": {"sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}}, "df": 3}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Parameter": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.expressions.ParameterizedAgg": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.expressions.Paren": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}}, "df": 6, "t": {"docs": {"sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}}, "df": 2}, "s": {"docs": {"sqlglot.optimizer.simplify.simplify_parens": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.PathColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "d": {"docs": {"sqlglot.generator.Generator.pad_comment": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.PERCENT": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.PercentileCont": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.PercentileDisc": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.expressions.Pivot": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}}, "df": 6}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.PIPE": {"tf": 1}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.expressions.Placeholder": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.expressions.replace_placeholders": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {"sqlglot.planner.Plan": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}}, "df": 2}}, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.PLUS": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}}, "df": 7, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1}, "sqlglot.dialects.postgres.Postgres": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}}, "df": 5}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.executor.env.str_position": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Posexplode": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {"sqlglot.executor.table.Table.pop": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}}, "df": 2}, "w": {"docs": {"sqlglot.expressions.Pow": {"tf": 1}}, "df": 1}}, "y": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.python.Python": {"tf": 1}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.python.PythonExecutor": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.static": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}}, "df": 17}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.PseudoType": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}}, "df": 5}}}}}}}}, "s": {"docs": {"sqlglot.planner.Step.to_s": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Schema": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.schema.Schema": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.Schema.empty": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}}, "df": 14, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.SchemaError": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SchemaCommentProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}}, "df": 1}}}, "n": {"docs": {"sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.planner.Scan": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}}, "df": 5}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 36, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.ScopeType": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}}, "df": 7}}}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"2": {"docs": {"sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1}, "sqlglot.dialects.spark2.Spark2": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"tf": 1}}, "df": 7}, "docs": {"sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1}, "sqlglot.dialects.spark.Spark": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}}, "df": 5, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.SparkSession": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}}, "df": 4}}}}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.SPACE": {"tf": 1}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Split": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2}}}}, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.coalesce_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.clone_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1}, "sqlglot.generator.Generator.returning_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1}, "sqlglot.generator.Generator.set_sql": {"tf": 1}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.null_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.concat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.comment_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1}}, "df": 279, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}}, "df": 10}}}, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.SqlglotError": {"tf": 1}}, "df": 1}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SqlSecurityProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Sqrt": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}}, "df": 25, "s": {"docs": {"sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}}, "df": 2}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {"sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Set": {"tf": 1}, "sqlglot.generator.Generator.set_sql": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}}, "df": 12, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.expressions.SetTag": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.SETTINGS": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SettingsProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.SetItem": {"tf": 1}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1}}, "df": 2}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SetProperty": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.SetAgg": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.planner.SetOperation": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 3}}}}}}}}}, "s": {"docs": {"sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.SerdeProperties": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}}, "df": 2}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.SessionParameter": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}, "p": {"docs": {"sqlglot.generator.Generator.sep": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}}, "df": 1}}}}}}}, "g": {"docs": {"sqlglot.generator.Generator.seg": {"tf": 1}}, "df": 1}, "q": {"docs": {"sqlglot.helper.seq_get": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.name_sequence": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.case_sensitive": {"tf": 1}}, "df": 1}}}}}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.tokens.TokenType.SEMI": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.executor.context.Context.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.expressions.Sort": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}, "sqlglot.planner.Sort": {"tf": 1}}, "df": 8, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SortKeyProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SortArray": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}}, "df": 5, "s": {"docs": {"sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.SOME": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.expressions.Sum": {"tf": 1}}, "df": 3}, "b": {"docs": {"sqlglot.expressions.Sub": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.Column.substr": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1}, "sqlglot.expressions.Substring": {"tf": 1}}, "df": 4}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subquery": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}}, "df": 10, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Subqueryable": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}}, "df": 4}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.SubqueryPredicate": {"tf": 1}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}}, "df": 4}}}}}}}, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.subclasses": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}}, "df": 2}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.schema.Schema.supported_table_args": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.Star": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}}, "df": 8, "t": {"docs": {"sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe.sql.Column.startswith": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}}, "df": 4}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.StarMap": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.executor.python.PythonExecutor.static": {"tf": 1}}, "df": 1}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.StabilityProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.StandardHash": {"tf": 1}}, "df": 1}}}}}}}}}}, "r": {"docs": {"sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.executor.env.str_position": {"tf": 1}}, "df": 3, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.Struct": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}}, "df": 4, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.StructExtract": {"tf": 1}}, "df": 1}}}}}}}}}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.expressions.StrPosition": {"tf": 1}}, "df": 3}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}}, "df": 9}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.StrToDate": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.StrToTime": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.StrToUnix": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.expressions.Stddev": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.StddevPop": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.StddevSamp": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.planner.Step": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}}, "df": 4}}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}}, "df": 1}}}}}}}}}, "f": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}}, "df": 1, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.SafeDivide": {"tf": 1}}, "df": 1}}}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression.same_parent": {"tf": 1}}, "df": 1}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}}, "df": 10}}}}}}}, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.camel_to_snake_case": {"tf": 1}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.expressions.Show": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}}, "df": 3}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.should_identify": {"tf": 1}}, "df": 1}}}}, "a": {"2": {"docs": {"sqlglot.expressions.SHA2": {"tf": 1}}, "df": 1}, "docs": {"sqlglot.expressions.SHA": {"tf": 1}}, "df": 1}}, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.expressions.SystemTime": {"tf": 1}}, "df": 2}}}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}}, "df": 2}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}}, "df": 2}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}}, "df": 2}}}}}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.expressions.SimilarTo": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}}, "df": 2}}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_parens": {"tf": 1}}, "df": 5}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Slice": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.SLASH": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}}, "df": 3, "e": {"docs": {"sqlglot.parse_one": {"tf": 1}}, "df": 1}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.OnUpdateColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.OnConflict": {"tf": 1}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1}}, "df": 2}}}}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.OnCommitProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "r": {"docs": {"sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Or": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}}, "df": 9, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Order": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}}, "df": 4, "b": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.env.ordered": {"tf": 1}, "sqlglot.expressions.Ordered": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}}, "df": 4}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1}, "sqlglot.dialects.oracle.Oracle": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1}}, "df": 8}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}}, "df": 3, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Overlaps": {"tf": 1}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1}, "sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1}}, "df": 3}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {"sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}}, "df": 13, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 2, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.OptimizeError": {"tf": 1}}, "df": 1}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}}, "df": 2}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.unnest_operands": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1, "j": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.OpenJSON": {"tf": 1}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.OpenJSONColumnDef": {"tf": 1}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}}, "df": 1, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}}, "df": 1}}}}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.expressions.Offset": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}}, "df": 7}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}}, "df": 8}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.OUTER": {"tf": 1}}, "df": 1}}}}, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}}, "df": 3}}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.expressions.Transaction": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}}, "df": 5}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.Expression.transform": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.TransientProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 2}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1}, "sqlglot.dialects.trino.Trino": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1}}, "df": 4}}, "m": {"docs": {"sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.expressions.Trim": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}}, "df": 3}, "e": {"docs": {"sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 2}}, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.expressions.TryCast": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}}, "df": 3}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}}, "df": 1}}, "e": {"docs": {"sqlglot.expressions.true": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.optimizer.simplify.always_true": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}}, "df": 4}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.table.Table": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.append": {"tf": 1}, "sqlglot.executor.table.Table.pop": {"tf": 1}, "sqlglot.expressions.Table": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}}, "df": 30, "a": {"docs": {}, "df": 0, "u": {"docs": {"sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1}, "sqlglot.dialects.tableau.Tableau": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.coalesce_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}}, "df": 7}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.TableAlias": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}}, "df": 2}}}}}, "s": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}}, "df": 7, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.expressions.TableSample": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}}, "df": 4}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.table.TableIter": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}}, "df": 2}}}}}}}, "g": {"docs": {"sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}}, "df": 2}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.transforms.remove_target_from_merge": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {"sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 1}}, "df": 22, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.tokens.Token": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 7, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 2, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Tokenizer": {"tf": 1}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1}, "sqlglot.tokens.Tokenizer": {"tf": 1}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 21}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.TokenError": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType": {"tf": 1}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}, "sqlglot.tokens.TokenType.DASH": {"tf": 1}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1}, "sqlglot.tokens.TokenType.COLON": {"tf": 1}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}, "sqlglot.tokens.TokenType.AMP": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1}, "sqlglot.tokens.TokenType.CARET": {"tf": 1}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LT_AT": {"tf": 1}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.DAMP": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.DATABASE": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BIT": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}, "sqlglot.tokens.TokenType.UINT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT128": {"tf": 1}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1}, "sqlglot.tokens.TokenType.INT256": {"tf": 1}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}, "sqlglot.tokens.TokenType.INET": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}, "sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.tokens.TokenType.FULL": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}, "sqlglot.tokens.TokenType.INNER": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1}, "sqlglot.tokens.TokenType.KEEP": {"tf": 1}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD": {"tf": 1}, "sqlglot.tokens.TokenType.LOCK": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}, "sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}, "sqlglot.tokens.TokenType.PRAGMA": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}, "sqlglot.tokens.TokenType.RETURNING": {"tf": 1}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}, "sqlglot.tokens.TokenType.SETTINGS": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.tokens.TokenType.SOME": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}, "sqlglot.tokens.TokenType.TOP": {"tf": 1}, "sqlglot.tokens.TokenType.THEN": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}}, "df": 288}}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.ToChar": {"tf": 1}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1}}, "df": 2}}}}, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"6": {"4": {"docs": {"sqlglot.expressions.ToBase64": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}}}, "p": {"docs": {"sqlglot.tokens.TokenType.TOP": {"tf": 1}}, "df": 1}}, "s": {"docs": {"sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 1}}, "df": 2, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1}, "sqlglot.dialects.tsql.TSQL": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}}, "df": 8}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1}}, "df": 2}}}}}}}}}}, "t": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1}}, "df": 2}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.TsOrDsAdd": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.TsOrDsToDate": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.TsOrDsToDateStr": {"tf": 1}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.expressions.TsOrDiToDi": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {"sqlglot.helper.tsort": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1}, "sqlglot.dialects.teradata.Teradata": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Tokenizer": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}}, "df": 10}}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.generator.Generator.text_width": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}}, "df": 5}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.TemporaryProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}}, "df": 9, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}}, "df": 3, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1}, "sqlglot.expressions.TimestampTrunc": {"tf": 1}}, "df": 2}}}}, "z": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}}, "df": 2}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "z": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.TimestampAdd": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.TimestampSub": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.TimestampDiff": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.expressions.TimeStrToTime": {"tf": 1}}, "df": 2}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.TimeStrToDate": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.TimeStrToUnix": {"tf": 1}}, "df": 1}}}}}}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.TimeSub": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.TimeAdd": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.TimeDiff": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.TimeTrunc": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.TimeToStr": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.TimeToTimeStr": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.TimeToUnix": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.TitleColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.tokens.TokenType.TILDA": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.context.Context.eval_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.expressions.Tuple": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}}, "df": 5}}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.this": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.THEN": {"tf": 1}}, "df": 1}}}, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INET": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}}, "df": 82, "s": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 2}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.annotate_types.TypeAnnotator": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1}}, "df": 3}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.expressions.Create": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}}, "df": 5, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.CROSS": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}}, "df": 6}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.coalesce_sql": {"tf": 1}, "sqlglot.expressions.Coalesce": {"tf": 1}}, "df": 3}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.expressions.Count": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 6, "i": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.CountIf": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {"sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.expressions.Column": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1}}, "df": 53, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.expressions.ColumnDef": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}}, "df": 3}}}, "s": {"docs": {"sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}}, "df": 10}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.ColumnPosition": {"tf": 1}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1}}, "df": 2}}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}}, "df": 2, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.ColumnConstraintKind": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "s": {"docs": {"sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}}, "df": 1}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Collate": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}}, "df": 3, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CollateColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.CollateProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.helper.ensure_collection": {"tf": 1}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.COLON": {"tf": 1}}, "df": 1}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.expressions.Commit": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}}, "df": 4}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.expressions.Comment": {"tf": 1}, "sqlglot.generator.Generator.pad_comment": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.generator.Generator.comment_sql": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.expressions.Expression.add_comments": {"tf": 1}}, "df": 1}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CommentColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "a": {"docs": {"sqlglot.tokens.TokenType.COMMA": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Command": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}}, "df": 3}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CompressColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.simplify.is_complement": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.expressions.Constraint": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}}, "df": 4}}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.expressions.Concat": {"tf": 1}, "sqlglot.generator.Generator.concat_sql": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}}, "df": 4, "w": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.ConcatWs": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.context.Context.eval": {"tf": 1}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 1}, "sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.executor.context.Context.filter": {"tf": 1}, "sqlglot.executor.context.Context.sort": {"tf": 1}, "sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}}, "df": 12}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Condition": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Condition.like": {"tf": 1}, "sqlglot.expressions.Condition.ilike": {"tf": 1}, "sqlglot.expressions.Condition.eq": {"tf": 1}, "sqlglot.expressions.Condition.neq": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}}, "df": 15}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Connector": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1}}, "df": 1}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.convert": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1}, "sqlglot.expressions.Cache": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}}, "df": 5, "d": {"docs": {"sqlglot.generator.cached_generator": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.executor.env.cast": {"tf": 1}, "sqlglot.expressions.Cast": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 1}}, "df": 10, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.CastToStrType": {"tf": 1}}, "df": 1}}}}}}}}}, "s": {"docs": {"sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}}, "df": 1}}, "e": {"docs": {"sqlglot.expressions.Case": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}}, "df": 7, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CaseSpecificColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.helper.camel_to_snake_case": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}}, "df": 1}}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.CARET": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1}}, "df": 9}}}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Clone": {"tf": 1}, "sqlglot.generator.Generator.clone_sql": {"tf": 1}}, "df": 2}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Cluster": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}}, "df": 3}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.expressions.CTE": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1}}, "df": 8, "s": {"docs": {"sqlglot.generator.Generator.prepend_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}}, "df": 4}}, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Select.ctas": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1}}, "df": 6, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.CurrentDate": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}}, "df": 2, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.CurrentDatetime": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.CurrentTime": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.CurrentTimestamp": {"tf": 1}}, "df": 1}}}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.CurrentUser": {"tf": 1}}, "df": 1}}}}}}}}}, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.CUBE": {"tf": 1}}, "df": 1}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}}, "df": 3}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.helper.while_changing": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {"sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CharacterSet": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CharacterSetColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.CharacterSetProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.Check": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}}, "df": 3, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CheckColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ChecksumProperty": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.replace_children": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Ceil": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 33, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrameNaFunctions": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}}, "df": 5}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrameReader": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}}, "df": 3}}}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}}, "df": 7}}}}}}}}}}}, "b": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.databricks.Databricks": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Tokenizer": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1}}, "df": 5}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataBlocksizeProperty": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.DATABASE": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.expressions.DataType": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INET": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}}, "df": 83, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataTypeSize": {"tf": 1}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "e": {"docs": {"sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1}, "sqlglot.optimizer.simplify.date_literal": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}}, "df": 11, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.expressions.DateStrToDate": {"tf": 1}}, "df": 2}}}}}}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DateSub": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.expressions.DateDiff": {"tf": 1}}, "df": 3}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DateFormatColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.DateFromParts": {"tf": 1}}, "df": 1}}}}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"6": {"4": {"docs": {"sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {"sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}}, "df": 3, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DatetimeAdd": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DatetimeSub": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.DatetimeDiff": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.DatetimeTrunc": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.DateTrunc": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DateToDateStr": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {"sqlglot.expressions.DateToDi": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1}}, "df": 2}}}}}}}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DateAdd": {"tf": 1}}, "df": 1}}}}}, "y": {"docs": {"sqlglot.expressions.Day": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.DayOfWeek": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.DayOfMonth": {"tf": 1}}, "df": 1}}}}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DayOfYear": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.DASH": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.DARROW": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.DAMP": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Distinct": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}}, "df": 7}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Distribute": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}}, "df": 2}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.normalize.distributive_law": {"tf": 1}}, "df": 1}}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DistKeyProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DistStyleProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Distance": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 3}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1}, "sqlglot.dialects.dialect.Dialect": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1}}, "df": 11, "s": {"docs": {"sqlglot.dialects.dialect.Dialects": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1}}, "df": 22}}}}}}, "v": {"docs": {"sqlglot.expressions.Div": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}}, "df": 2}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Directory": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}}, "df": 2}}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}}, "df": 3, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DictProperty": {"tf": 1}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DictSubProperty": {"tf": 1}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DictRange": {"tf": 1}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DiToDate": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.expressions.Drop": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}}, "df": 5, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}}, "df": 1}}}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}}, "df": 1}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DropPartition": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1}, "sqlglot.dialects.drill.Drill": {"tf": 1}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}}, "df": 6}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}}, "df": 4, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.expressions.Describe": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}}, "df": 4}}}}}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Delete": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}}, "df": 7}}}}, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}}, "df": 2}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.planner.Step.add_dependency": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}}, "df": 4, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DerivedTable": {"tf": 1}}, "df": 1}}}}}}}}}}, "f": {"docs": {"sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}}, "df": 3, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DefaultColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DefinerProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Decode": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 1}}}}}}}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}}, "df": 6}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}}, "df": 2}}}, "s": {"docs": {"sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}}, "df": 1}, "f": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.dfs": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.Dot": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}}, "df": 5}, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}}, "df": 2}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DPipe": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}}, "df": 3}}}}, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 1}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.DCOLON": {"tf": 1}}, "df": 1}}}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.expressions.In": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 7, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 35, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Initcap": {"tf": 1}}, "df": 1}}}}}, "t": {"1": {"2": {"8": {"docs": {"sqlglot.expressions.DataType.Type.INT128": {"tf": 1}, "sqlglot.tokens.TokenType.INT128": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"5": {"6": {"docs": {"sqlglot.expressions.DataType.Type.INT256": {"tf": 1}, "sqlglot.tokens.TokenType.INT256": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "4": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1}}, "df": 2}}}}}}}}}}}, "8": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1}}, "df": 2}}}}}}}}}}}, "docs": {"sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Intersect": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}}, "df": 9, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1}}, "df": 1}}}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.executor.env.interval": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.Interval": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}}, "df": 9}}}}}, "o": {"docs": {"sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.expressions.Into": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}}, "df": 5}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Introducer": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}}, "df": 3}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.expressions.IntDiv": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}}, "df": 2}}}}, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.expressions.Insert": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}}, "df": 7, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.InlineLengthColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.expressions.Index": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}}, "df": 6}, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.indent": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.InputOutputFormat": {"tf": 1}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.INET": {"tf": 1}, "sqlglot.tokens.TokenType.INET": {"tf": 1}}, "df": 2}}, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.INNER": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Is": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.simplify.is_complement": {"tf": 1}, "sqlglot.optimizer.simplify.is_false": {"tf": 1}, "sqlglot.optimizer.simplify.is_null": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}}, "df": 25, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}}, "df": 3}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1}}, "df": 1, "d": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.IsolatedLoadingProperty": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.expressions.Condition.ilike": {"tf": 1}, "sqlglot.expressions.ILike": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1}}, "df": 7, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ILikeAny": {"tf": 1}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1}}, "df": 2}}}}}}}, "f": {"docs": {"sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.If": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}}, "df": 7, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.IfNull": {"tf": 1}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.IgnoreNulls": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}}, "df": 2}}}}}}}}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.is_iterable": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Identifier": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}}, "df": 2}}}}, "y": {"docs": {"sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}}, "df": 2}}}}}}}, "r": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Alias": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}}, "df": 9, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Aliases": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}}, "df": 3}}}}}, "l": {"docs": {"sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.All": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}}, "df": 6}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.ALTER": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.AlterColumn": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}}, "df": 2}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.AlterTable": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}}, "df": 2}}}}}}}}, "g": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.AlgorithmProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.simplify.always_true": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}}, "df": 2}}}}}, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}}, "df": 2, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.planner.Aggregate": {"tf": 1}}, "df": 2}}}}}}, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.AggFunc": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.expressions.Avg": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.expressions.Anonymous": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}}, "df": 3}}}}}}}, "y": {"docs": {"sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Any": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1}}, "df": 6, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.AnyValue": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.find_ancestor": {"tf": 1}}, "df": 1}}}}}}, "d": {"docs": {"sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.And": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}}, "df": 6}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1}}, "df": 2}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.tokens.TokenType.ANTI": {"tf": 1}}, "df": 1}}}, "s": {"docs": {"sqlglot.expressions.Condition.as_": {"tf": 1}}, "df": 1, "c": {"docs": {"sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}}, "df": 4}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.assert_is": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.tokens.TokenType.ASOF": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.Array": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}}, "df": 5, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.expressions.ArrayAgg": {"tf": 1}}, "df": 2}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.ArrayAll": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ArrayAny": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.ArrayOverlaps": {"tf": 1}}, "df": 1}}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ArrayConcat": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.ArrayContains": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.ArrayContained": {"tf": 1}}, "df": 1}}}}}}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.ArrayFilter": {"tf": 1}}, "df": 1}}}}}}, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.ArrayJoin": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.ArraySize": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ArraySort": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.ArraySum": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.ArrayUnionAgg": {"tf": 1}}, "df": 1}}}}}}}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}}, "df": 6}}}, "g": {"docs": {"sqlglot.expressions.Func.from_arg_list": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}}, "df": 2}}}, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}}, "df": 4}}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}}, "df": 1, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ApproxDistinct": {"tf": 1}}, "df": 1}}}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.ApproxQuantile": {"tf": 1}}, "df": 1}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.table.Table.append": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}}, "df": 2}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.expressions.Expression.add_comments": {"tf": 1}, "sqlglot.expressions.Add": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1}}, "df": 11, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.AddConstraint": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.AutoIncrementColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.AutoIncrementProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.AutoName": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {"sqlglot.tokens.TokenType.LT_AT": {"tf": 1}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1}}, "df": 2, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.AtTimeZone": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "b": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Abs": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}}, "df": 5}}}}}}}}}}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.AMP": {"tf": 1}}, "df": 1}}}, "w": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Where": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}}, "df": 7}}, "n": {"docs": {"sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.When": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}}, "df": 5}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.while_changing": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.expressions.With": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}}, "df": 13, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1}}, "df": 1, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.WithinGroup": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}}, "df": 2}}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.WithDataProperty": {"tf": 1}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.WithJournalTableProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dataframe.sql.Window": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Window": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}}, "df": 9, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dataframe.sql.WindowSpec": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.expressions.WindowSpec": {"tf": 1}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1}}, "df": 10}}}}}}}}, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.generator.Generator.text_width": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.errors.ErrorLevel.WARN": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 4}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.Week": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.WeekOfYear": {"tf": 1}}, "df": 1}}}}}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.generator.Generator.wrap": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1}}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.executor.context.Context.filter": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.expressions.Filter": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}}, "df": 6}}}, "l": {"docs": {"sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.FileFormatProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}}, "df": 4}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}}, "df": 7}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Final": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}}, "df": 2}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.normalize_func": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}}, "df": 10, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}}, "df": 3}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.FULL": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1}}, "df": 4, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}}, "df": 6}}}, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}}, "df": 1, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ForeignKey": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.simplify.flatten": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1}}, "df": 4}}}}}, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}}, "df": 2}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Floor": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.From": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}}, "df": 13, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"6": {"4": {"docs": {"sqlglot.expressions.FromBase64": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}}}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.FreespaceProperty": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Fetch": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}}, "df": 3}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.FallbackProperty": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.false": {"tf": 1}, "sqlglot.optimizer.simplify.is_false": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}}, "df": 3}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.FARROW": {"tf": 1}}, "df": 1}}}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Group": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1}}, "df": 6, "b": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.GroupedData": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}}, "df": 10}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.expressions.GroupConcat": {"tf": 1}}, "df": 2}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.expressions.Greatest": {"tf": 1}}, "df": 2}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.coalesce_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.unsupported": {"tf": 1}, "sqlglot.generator.Generator.sep": {"tf": 1}, "sqlglot.generator.Generator.seg": {"tf": 1}, "sqlglot.generator.Generator.pad_comment": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.generator.Generator.wrap": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.generator.Generator.normalize_func": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.clone_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1}, "sqlglot.generator.Generator.returning_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1}, "sqlglot.generator.Generator.set_sql": {"tf": 1}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.null_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.concat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.comment_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.text_width": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}, "sqlglot.generator.Generator.naked_property": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1}, "sqlglot.generator.cached_generator": {"tf": 1}}, "df": 310}}, "e": {"docs": {"sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}}, "df": 5, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.expressions.GenerateSeries": {"tf": 1}}, "df": 2}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.expressions.GeneratedAsIdentityColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}}, "df": 3}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}, "t": {"docs": {"sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 8}, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}}, "df": 2}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}}, "df": 2}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.Glob": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}}, "df": 3, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {"sqlglot.expressions.GT": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1}}, "df": 4, "e": {"docs": {"sqlglot.expressions.GTE": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}}, "df": 3}}}, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.expressions.Join": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.planner.Join": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1}}, "df": 15, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JoinHint": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}}, "df": 2}}}}, "s": {"docs": {"sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}}, "df": 4}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.JournalProperty": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}}, "df": 4, "b": {"docs": {"sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.JSONBContains": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JSONBExtract": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.JSONBExtractScalar": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.JSONKeyValue": {"tf": 1}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1}}, "df": 2}}}}}}}}, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JSONObject": {"tf": 1}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1}}, "df": 2}}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JSONExtract": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.JSONExtractScalar": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JSONFormat": {"tf": 1}}, "df": 1}}}}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.union": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Union": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}}, "df": 12, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1}}, "df": 1}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Unionable": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}}, "df": 4}}}}, "b": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {"sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}}, "df": 1}, "q": {"docs": {"sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.UniqueColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.UnixToStr": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.UnixToTime": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.UnixToTimeStr": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.expressions.Unary": {"tf": 1}}, "df": 2}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 2}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Unnest": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}}, "df": 12}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}, "sqlglot.generator.Generator.unsupported": {"tf": 1}}, "df": 2, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.UnsupportedError": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Uncache": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}}, "df": 3}}}}}, "k": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}}, "df": 1}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.Unhex": {"tf": 1}}, "df": 1}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}}, "df": 1}}}}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.expressions.Update": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}}, "df": 7}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Upper": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.UppercaseColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.UDTF": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Use": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}}, "df": 3, "r": {"docs": {"sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1}}, "df": 1, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.UserDefinedFunction": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}}, "df": 2}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1}}, "df": 2}}}}}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1}}, "df": 2}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"1": {"2": {"8": {"docs": {"sqlglot.expressions.DataType.Type.UINT128": {"tf": 1}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"5": {"6": {"docs": {"sqlglot.expressions.DataType.Type.UINT256": {"tf": 1}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {"sqlglot.expressions.DataType.Type.UINT": {"tf": 1}, "sqlglot.tokens.TokenType.UINT": {"tf": 1}}, "df": 2}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1}}, "df": 2}}}}}}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}}, "df": 2}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Except": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}}, "df": 8, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1}}, "df": 1}}}}}}}, "p": {"docs": {"sqlglot.expressions.Exp": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.add_comments": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 46, "s": {"docs": {"sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}}, "df": 4}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Explode": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}}, "df": 3}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.expand": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.expressions.Extract": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1}}, "df": 7}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ExternalProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.ExecuteError": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ExecuteAsProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Exists": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}}, "df": 3}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}}, "df": 9}}}}, "d": {"docs": {"sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe.sql.Column.endswith": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Encode": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.EncodeColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.EngineProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}}, "df": 5}}}}}, "s": {"docs": {"sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}}, "df": 2}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor.context.Context.eval": {"tf": 1}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}}, "df": 3}}}, "q": {"docs": {"sqlglot.expressions.Condition.eq": {"tf": 1}, "sqlglot.expressions.EQ": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}}, "df": 5}, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Escape": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}}, "df": 3}}}}}, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 6}}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.schema.Schema.empty": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.transforms.epoch_cast_to_ts": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {"sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}}, "df": 9}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Repeat": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1}, "sqlglot.dialects.redshift.Redshift": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}}, "df": 8}}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Reduce": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}}, "df": 3, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.RenameTable": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1}}, "df": 3}}}}}}, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Return": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.expressions.ReturnsProperty": {"tf": 1}}, "df": 2}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Returning": {"tf": 1}, "sqlglot.generator.Generator.returning_sql": {"tf": 1}, "sqlglot.tokens.TokenType.RETURNING": {"tf": 1}}, "df": 4}}}}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1}}, "df": 9}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.env.reverse_key": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}}, "df": 2}}}}}, "f": {"docs": {"sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Reference": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.RespectNulls": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}}, "df": 5}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.parser.Parser.reset": {"tf": 1}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.expressions.ReadCSV": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.helper.csv_reader": {"tf": 1}}, "df": 1}}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.RegexpExtract": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.RegexpLike": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.RegexpILike": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.RegexpSplit": {"tf": 1}}, "df": 1}}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}}, "df": 1}}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}}, "df": 3}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.tokens.TokenType.ROWS": {"tf": 1}}, "df": 1, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}}, "df": 2}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.table.RowReader": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}}, "df": 2}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.RowFormatProperty": {"tf": 1}}, "df": 1}}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.RowFormatDelimitedProperty": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.RowFormatSerdeProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}}, "df": 2}}}}}}}, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.RowNumber": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.expressions.Rollback": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}}, "df": 4}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}}, "df": 4}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Round": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.parser.binary_range_parser": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}}, "df": 3, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}}, "df": 2}}}}}}}, "n": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.expressions.RangeN": {"tf": 1}}, "df": 2}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.table.RangeReader": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}}, "df": 2}}}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 3}}}, "w": {"docs": {"sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.RawString": {"tf": 1}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1}}, "df": 2}}}}}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.expressions.Right": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}}, "df": 3}}}}}, "l": {"docs": {"sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.expressions.Limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}}, "df": 9}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.expressions.Literal": {"tf": 1}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.optimizer.simplify.date_literal": {"tf": 1}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1}}, "df": 8, "s": {"docs": {"sqlglot.optimizer.simplify.simplify_literals": {"tf": 1}}, "df": 1}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.expressions.Condition.like": {"tf": 1}, "sqlglot.expressions.Like": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.parser.parse_like": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1}}, "df": 7, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LikeProperty": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LikeAny": {"tf": 1}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1}}, "df": 2}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.lineage.lineage": {"tf": 1}}, "df": 1, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}}, "df": 2}}}}}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}}, "df": 2, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.LastDateOfMonth": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.expressions.Lambda": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}}, "df": 3}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Lateral": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}}, "df": 4}}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LanguageProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "w": {"docs": {"sqlglot.optimizer.normalize.distributive_law": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}}, "df": 3}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}}, "df": 9, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LocationProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "k": {"docs": {"sqlglot.expressions.Lock": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LOCK": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LockingProperty": {"tf": 1}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD": {"tf": 1}}, "df": 3, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.expressions.LoadData": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}}, "df": 2}}}}}}, "g": {"1": {"0": {"docs": {"sqlglot.expressions.Log10": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "2": {"docs": {"sqlglot.expressions.Log2": {"tf": 1}}, "df": 1}, "docs": {"sqlglot.expressions.Log": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LogProperty": {"tf": 1}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.LogicalOr": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.LogicalAnd": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}}, "df": 2}}}}}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Lower": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.expressions.Left": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}}, "df": 3}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.expressions.Least": {"tf": 1}}, "df": 3}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Length": {"tf": 1}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Levenshtein": {"tf": 1}}, "df": 1}}}}}}}}}}, "t": {"docs": {"sqlglot.expressions.LT": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1}, "sqlglot.tokens.TokenType.LT_AT": {"tf": 1}}, "df": 4, "e": {"docs": {"sqlglot.expressions.LTE": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}}, "df": 3}}, "n": {"docs": {"sqlglot.expressions.Ln": {"tf": 1}}, "df": 1}, "r": {"docs": {"sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}}, "df": 1}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.expressions.Hint": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}}, "df": 1}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1}, "sqlglot.dialects.hive.Hive": {"tf": 1}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}}, "df": 9}}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.expressions.Having": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}}, "df": 6}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}}, "df": 3}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.Hex": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.HexString": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}}, "df": 2}}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Hll": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}}, "df": 2}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}}, "df": 2}}}}}, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.lineage.Node.to_html": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}}, "df": 2}}}}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.expressions.Merge": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}}, "df": 8, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.MergeTreeTTL": {"tf": 1}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.MergeTreeTTLAction": {"tf": 1}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.MergeBlockRatioProperty": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}}, "df": 2}}}}}}}}}, "a": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.expressions.Max": {"tf": 1}}, "df": 3}, "p": {"docs": {"sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.Map": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}}, "df": 5, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}}, "df": 7}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.MatchRecognize": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.MatchAgainst": {"tf": 1}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.MaterializedProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "y": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.expressions.Min": {"tf": 1}}, "df": 3}}, "o": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.expressions.Mod": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}}, "df": 4, "e": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}}, "df": 5}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}}, "df": 2}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Month": {"tf": 1}}, "df": 1}}}}, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1}, "sqlglot.dialects.mysql.MySQL": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}}, "df": 6}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Mul": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}}, "df": 2}}, "d": {"5": {"docs": {"sqlglot.expressions.MD5": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.Binary": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.parser.binary_range_parser": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}}, "df": 7}}}}, "g": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.bigquery.BigQuery": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1}}, "df": 13}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1}}, "df": 2}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}}, "df": 2}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}}, "df": 2}}}}}}}, "t": {"docs": {"sqlglot.expressions.DataType.Type.BIT": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BIT": {"tf": 1}}, "df": 3, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.BitString": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}}, "df": 2}}}}}}, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.BitwiseAnd": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.BitwiseLeftShift": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.BitwiseOr": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}}, "df": 2}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.BitwiseRightShift": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}}, "df": 2}}}}}}}}}}, "x": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.BitwiseXor": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}}, "df": 2}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.BitwiseNot": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Between": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}}, "df": 6}}}}}, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.BEGIN": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.bfs": {"tf": 1}}, "df": 1}}, "y": {"docs": {"sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}}, "df": 8, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.ByteString": {"tf": 1}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.BlockCompressionProperty": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Boolean": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}}, "df": 6}}}}}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}}, "df": 3}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Bracket": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}}, "df": 4}}}, "e": {"docs": {"sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.optimizer.scope.Scope.branch": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.tokens.TokenType.BREAK": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}}, "df": 14, "s": {"docs": {"sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1}}, "df": 7}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.National": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1}}, "df": 3}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.NATURAL": {"tf": 1}}, "df": 1}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.naked_property": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Null": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator.null_sql": {"tf": 1}, "sqlglot.optimizer.simplify.is_null": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}}, "df": 7, "s": {"docs": {"sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}}, "df": 5, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "q": {"docs": {"sqlglot.expressions.NullSafeEQ": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "q": {"docs": {"sqlglot.expressions.NullSafeNEQ": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}}, "df": 2}}}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}}, "df": 2}}}}}}, "m": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}}, "df": 4, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.NumberToStr": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1}}, "df": 2}}}}}}}}}}}}, "o": {"docs": {"sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}}, "df": 10, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.generator.Generator.normalize_func": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}}, "df": 5, "d": {"docs": {"sqlglot.optimizer.normalize.normalized": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 1}}}}}}}}}}}, "t": {"docs": {"sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Not": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}}, "df": 6, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.NotNullColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.NoPrimaryIndexProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.lineage.Node": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 3}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}}, "df": 4}}}}, "q": {"docs": {"sqlglot.expressions.Condition.neq": {"tf": 1}, "sqlglot.expressions.NEQ": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}}, "df": 4}, "g": {"docs": {"sqlglot.expressions.Neg": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}}, "df": 2}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}}, "df": 2, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.NextValueFor": {"tf": 1}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}}, "df": 2}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}}, "df": 2}}}}}}, "l": {"2": {"docs": {"sqlglot.expressions.Nvl2": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.expressions.Var": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 7, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}}, "df": 2}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}}, "df": 2}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Variance": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.VariancePop": {"tf": 1}}, "df": 1}}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.VarMap": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.expressions.Values": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}}, "df": 5}}}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 2}}}}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.VolatileProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.VIEW": {"tf": 1}}, "df": 1}}}}, "x": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}}, "df": 2, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.expressions.XMLTable": {"tf": 1}}, "df": 2}}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.diff.Keep": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.tokens.TokenType.KEEP": {"tf": 1}}, "df": 3}}, "y": {"docs": {"sqlglot.executor.env.reverse_key": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}}, "df": 4}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}}, "df": 2}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Qualify": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 9}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Quantile": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.query_modifiers": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}}, "df": 2}}}}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Year": {"tf": 1}}, "df": 1}}}}}}, "fullname": {"root": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 52, "s": {"docs": {"sqlglot.planner.Step.to_s": {"tf": 1}}, "df": 1, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dataframe.sql.Column": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.dataframe.sql.Window": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.coalesce_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.clone_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1}, "sqlglot.generator.Generator.returning_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1}, "sqlglot.generator.Generator.set_sql": {"tf": 1}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.null_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.concat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.comment_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1}}, "df": 384, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.pretty": {"tf": 1}, "sqlglot.schema": {"tf": 1}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dataframe.sql.Column": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.dataframe.sql.Window": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.clickhouse": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.dialects.databricks": {"tf": 1}, "sqlglot.dialects.databricks.Databricks": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Tokenizer": {"tf": 1}, "sqlglot.dialects.dialect": {"tf": 1}, "sqlglot.dialects.dialect.Dialects": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1}, "sqlglot.dialects.dialect.Dialect": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1}, "sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.dialects.drill": {"tf": 1}, "sqlglot.dialects.drill.Drill": {"tf": 1}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.dialects.duckdb": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.dialects.hive": {"tf": 1}, "sqlglot.dialects.hive.Hive": {"tf": 1}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.dialects.mysql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.oracle": {"tf": 1}, "sqlglot.dialects.oracle.Oracle": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres": {"tf": 1}, "sqlglot.dialects.postgres.Postgres": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto": {"tf": 1}, "sqlglot.dialects.presto.Presto": {"tf": 1}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.dialects.redshift": {"tf": 1}, "sqlglot.dialects.redshift.Redshift": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark": {"tf": 1}, "sqlglot.dialects.spark.Spark": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.spark2": {"tf": 1}, "sqlglot.dialects.spark2.Spark2": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.starrocks": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau": {"tf": 1}, "sqlglot.dialects.tableau.Tableau": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.coalesce_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata": {"tf": 1}, "sqlglot.dialects.teradata.Teradata": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Tokenizer": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.dialects.trino": {"tf": 1}, "sqlglot.dialects.trino.Trino": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.errors": {"tf": 1}, "sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}, "sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.context.Context.eval": {"tf": 1}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 1}, "sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.executor.context.Context.filter": {"tf": 1}, "sqlglot.executor.context.Context.sort": {"tf": 1}, "sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.executor.env": {"tf": 1}, "sqlglot.executor.env.reverse_key": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.env.str_position": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1}, "sqlglot.executor.env.cast": {"tf": 1}, "sqlglot.executor.env.ordered": {"tf": 1}, "sqlglot.executor.env.interval": {"tf": 1}, "sqlglot.executor.python": {"tf": 1}, "sqlglot.executor.python.PythonExecutor": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.static": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.executor.python.Python": {"tf": 1}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.executor.table": {"tf": 1}, "sqlglot.executor.table.Table": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.append": {"tf": 1}, "sqlglot.executor.table.Table.pop": {"tf": 1}, "sqlglot.executor.table.TableIter": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}, "sqlglot.executor.table.RangeReader": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1}, "sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.add_comments": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Condition": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Condition.like": {"tf": 1}, "sqlglot.expressions.Condition.ilike": {"tf": 1}, "sqlglot.expressions.Condition.eq": {"tf": 1}, "sqlglot.expressions.Condition.neq": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1}, "sqlglot.expressions.Predicate": {"tf": 1}, "sqlglot.expressions.DerivedTable": {"tf": 1}, "sqlglot.expressions.Unionable": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.UDTF": {"tf": 1}, "sqlglot.expressions.Cache": {"tf": 1}, "sqlglot.expressions.Uncache": {"tf": 1}, "sqlglot.expressions.Create": {"tf": 1}, "sqlglot.expressions.Clone": {"tf": 1}, "sqlglot.expressions.Describe": {"tf": 1}, "sqlglot.expressions.Pragma": {"tf": 1}, "sqlglot.expressions.Set": {"tf": 1}, "sqlglot.expressions.SetItem": {"tf": 1}, "sqlglot.expressions.Show": {"tf": 1}, "sqlglot.expressions.UserDefinedFunction": {"tf": 1}, "sqlglot.expressions.CharacterSet": {"tf": 1}, "sqlglot.expressions.With": {"tf": 1}, "sqlglot.expressions.WithinGroup": {"tf": 1}, "sqlglot.expressions.CTE": {"tf": 1}, "sqlglot.expressions.TableAlias": {"tf": 1}, "sqlglot.expressions.BitString": {"tf": 1}, "sqlglot.expressions.HexString": {"tf": 1}, "sqlglot.expressions.ByteString": {"tf": 1}, "sqlglot.expressions.RawString": {"tf": 1}, "sqlglot.expressions.Column": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.ColumnPosition": {"tf": 1}, "sqlglot.expressions.ColumnDef": {"tf": 1}, "sqlglot.expressions.AlterColumn": {"tf": 1}, "sqlglot.expressions.RenameTable": {"tf": 1}, "sqlglot.expressions.SetTag": {"tf": 1}, "sqlglot.expressions.Comment": {"tf": 1}, "sqlglot.expressions.MergeTreeTTLAction": {"tf": 1}, "sqlglot.expressions.MergeTreeTTL": {"tf": 1}, "sqlglot.expressions.ColumnConstraint": {"tf": 1}, "sqlglot.expressions.ColumnConstraintKind": {"tf": 1}, "sqlglot.expressions.AutoIncrementColumnConstraint": {"tf": 1}, "sqlglot.expressions.CaseSpecificColumnConstraint": {"tf": 1}, "sqlglot.expressions.CharacterSetColumnConstraint": {"tf": 1}, "sqlglot.expressions.CheckColumnConstraint": {"tf": 1}, "sqlglot.expressions.CollateColumnConstraint": {"tf": 1}, "sqlglot.expressions.CommentColumnConstraint": {"tf": 1}, "sqlglot.expressions.CompressColumnConstraint": {"tf": 1}, "sqlglot.expressions.DateFormatColumnConstraint": {"tf": 1}, "sqlglot.expressions.DefaultColumnConstraint": {"tf": 1}, "sqlglot.expressions.EncodeColumnConstraint": {"tf": 1}, "sqlglot.expressions.GeneratedAsIdentityColumnConstraint": {"tf": 1}, "sqlglot.expressions.InlineLengthColumnConstraint": {"tf": 1}, "sqlglot.expressions.NotNullColumnConstraint": {"tf": 1}, "sqlglot.expressions.OnUpdateColumnConstraint": {"tf": 1}, "sqlglot.expressions.PrimaryKeyColumnConstraint": {"tf": 1}, "sqlglot.expressions.TitleColumnConstraint": {"tf": 1}, "sqlglot.expressions.UniqueColumnConstraint": {"tf": 1}, "sqlglot.expressions.UppercaseColumnConstraint": {"tf": 1}, "sqlglot.expressions.PathColumnConstraint": {"tf": 1}, "sqlglot.expressions.Constraint": {"tf": 1}, "sqlglot.expressions.Delete": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Drop": {"tf": 1}, "sqlglot.expressions.Filter": {"tf": 1}, "sqlglot.expressions.Check": {"tf": 1}, "sqlglot.expressions.Directory": {"tf": 1}, "sqlglot.expressions.ForeignKey": {"tf": 1}, "sqlglot.expressions.PrimaryKey": {"tf": 1}, "sqlglot.expressions.Into": {"tf": 1}, "sqlglot.expressions.From": {"tf": 1}, "sqlglot.expressions.Having": {"tf": 1}, "sqlglot.expressions.Hint": {"tf": 1}, "sqlglot.expressions.JoinHint": {"tf": 1}, "sqlglot.expressions.Identifier": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Index": {"tf": 1}, "sqlglot.expressions.Insert": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.OnConflict": {"tf": 1}, "sqlglot.expressions.Returning": {"tf": 1}, "sqlglot.expressions.Introducer": {"tf": 1}, "sqlglot.expressions.National": {"tf": 1}, "sqlglot.expressions.LoadData": {"tf": 1}, "sqlglot.expressions.Partition": {"tf": 1}, "sqlglot.expressions.Fetch": {"tf": 1}, "sqlglot.expressions.Group": {"tf": 1}, "sqlglot.expressions.Lambda": {"tf": 1}, "sqlglot.expressions.Limit": {"tf": 1}, "sqlglot.expressions.Literal": {"tf": 1}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Lateral": {"tf": 1}, "sqlglot.expressions.MatchRecognize": {"tf": 1}, "sqlglot.expressions.Final": {"tf": 1}, "sqlglot.expressions.Offset": {"tf": 1}, "sqlglot.expressions.Order": {"tf": 1}, "sqlglot.expressions.Cluster": {"tf": 1}, "sqlglot.expressions.Distribute": {"tf": 1}, "sqlglot.expressions.Sort": {"tf": 1}, "sqlglot.expressions.Ordered": {"tf": 1}, "sqlglot.expressions.Property": {"tf": 1}, "sqlglot.expressions.AlgorithmProperty": {"tf": 1}, "sqlglot.expressions.AutoIncrementProperty": {"tf": 1}, "sqlglot.expressions.BlockCompressionProperty": {"tf": 1}, "sqlglot.expressions.CharacterSetProperty": {"tf": 1}, "sqlglot.expressions.ChecksumProperty": {"tf": 1}, "sqlglot.expressions.CollateProperty": {"tf": 1}, "sqlglot.expressions.DataBlocksizeProperty": {"tf": 1}, "sqlglot.expressions.DefinerProperty": {"tf": 1}, "sqlglot.expressions.DistKeyProperty": {"tf": 1}, "sqlglot.expressions.DistStyleProperty": {"tf": 1}, "sqlglot.expressions.EngineProperty": {"tf": 1}, "sqlglot.expressions.ExecuteAsProperty": {"tf": 1}, "sqlglot.expressions.ExternalProperty": {"tf": 1}, "sqlglot.expressions.FallbackProperty": {"tf": 1}, "sqlglot.expressions.FileFormatProperty": {"tf": 1}, "sqlglot.expressions.FreespaceProperty": {"tf": 1}, "sqlglot.expressions.InputOutputFormat": {"tf": 1}, "sqlglot.expressions.IsolatedLoadingProperty": {"tf": 1}, "sqlglot.expressions.JournalProperty": {"tf": 1}, "sqlglot.expressions.LanguageProperty": {"tf": 1}, "sqlglot.expressions.DictProperty": {"tf": 1}, "sqlglot.expressions.DictSubProperty": {"tf": 1}, "sqlglot.expressions.DictRange": {"tf": 1}, "sqlglot.expressions.LikeProperty": {"tf": 1}, "sqlglot.expressions.LocationProperty": {"tf": 1}, "sqlglot.expressions.LockingProperty": {"tf": 1}, "sqlglot.expressions.LogProperty": {"tf": 1}, "sqlglot.expressions.MaterializedProperty": {"tf": 1}, "sqlglot.expressions.MergeBlockRatioProperty": {"tf": 1}, "sqlglot.expressions.NoPrimaryIndexProperty": {"tf": 1}, "sqlglot.expressions.OnCommitProperty": {"tf": 1}, "sqlglot.expressions.PartitionedByProperty": {"tf": 1}, "sqlglot.expressions.ReturnsProperty": {"tf": 1}, "sqlglot.expressions.RowFormatProperty": {"tf": 1}, "sqlglot.expressions.RowFormatDelimitedProperty": {"tf": 1}, "sqlglot.expressions.RowFormatSerdeProperty": {"tf": 1}, "sqlglot.expressions.SchemaCommentProperty": {"tf": 1}, "sqlglot.expressions.SerdeProperties": {"tf": 1}, "sqlglot.expressions.SetProperty": {"tf": 1}, "sqlglot.expressions.SettingsProperty": {"tf": 1}, "sqlglot.expressions.SortKeyProperty": {"tf": 1}, "sqlglot.expressions.SqlSecurityProperty": {"tf": 1}, "sqlglot.expressions.StabilityProperty": {"tf": 1}, "sqlglot.expressions.TemporaryProperty": {"tf": 1}, "sqlglot.expressions.TransientProperty": {"tf": 1}, "sqlglot.expressions.VolatileProperty": {"tf": 1}, "sqlglot.expressions.WithDataProperty": {"tf": 1}, "sqlglot.expressions.WithJournalTableProperty": {"tf": 1}, "sqlglot.expressions.Properties": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.expressions.Qualify": {"tf": 1}, "sqlglot.expressions.Return": {"tf": 1}, "sqlglot.expressions.Reference": {"tf": 1}, "sqlglot.expressions.Tuple": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.Subqueryable": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Table": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.SystemTime": {"tf": 1}, "sqlglot.expressions.Union": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Except": {"tf": 1}, "sqlglot.expressions.Intersect": {"tf": 1}, "sqlglot.expressions.Unnest": {"tf": 1}, "sqlglot.expressions.Update": {"tf": 1}, "sqlglot.expressions.Values": {"tf": 1}, "sqlglot.expressions.Var": {"tf": 1}, "sqlglot.expressions.Schema": {"tf": 1}, "sqlglot.expressions.Lock": {"tf": 1}, "sqlglot.expressions.Select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.TableSample": {"tf": 1}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Pivot": {"tf": 1}, "sqlglot.expressions.Window": {"tf": 1}, "sqlglot.expressions.WindowSpec": {"tf": 1}, "sqlglot.expressions.Where": {"tf": 1}, "sqlglot.expressions.Star": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Parameter": {"tf": 1}, "sqlglot.expressions.SessionParameter": {"tf": 1}, "sqlglot.expressions.Placeholder": {"tf": 1}, "sqlglot.expressions.Null": {"tf": 1}, "sqlglot.expressions.Boolean": {"tf": 1}, "sqlglot.expressions.DataTypeSize": {"tf": 1}, "sqlglot.expressions.DataType": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INET": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.PseudoType": {"tf": 1}, "sqlglot.expressions.SubqueryPredicate": {"tf": 1}, "sqlglot.expressions.All": {"tf": 1}, "sqlglot.expressions.Any": {"tf": 1}, "sqlglot.expressions.Exists": {"tf": 1}, "sqlglot.expressions.Command": {"tf": 1}, "sqlglot.expressions.Transaction": {"tf": 1}, "sqlglot.expressions.Commit": {"tf": 1}, "sqlglot.expressions.Rollback": {"tf": 1}, "sqlglot.expressions.AlterTable": {"tf": 1}, "sqlglot.expressions.AddConstraint": {"tf": 1}, "sqlglot.expressions.DropPartition": {"tf": 1}, "sqlglot.expressions.Binary": {"tf": 1}, "sqlglot.expressions.Add": {"tf": 1}, "sqlglot.expressions.Connector": {"tf": 1}, "sqlglot.expressions.And": {"tf": 1}, "sqlglot.expressions.Or": {"tf": 1}, "sqlglot.expressions.BitwiseAnd": {"tf": 1}, "sqlglot.expressions.BitwiseLeftShift": {"tf": 1}, "sqlglot.expressions.BitwiseOr": {"tf": 1}, "sqlglot.expressions.BitwiseRightShift": {"tf": 1}, "sqlglot.expressions.BitwiseXor": {"tf": 1}, "sqlglot.expressions.Div": {"tf": 1}, "sqlglot.expressions.Overlaps": {"tf": 1}, "sqlglot.expressions.Dot": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.expressions.DPipe": {"tf": 1}, "sqlglot.expressions.EQ": {"tf": 1}, "sqlglot.expressions.NullSafeEQ": {"tf": 1}, "sqlglot.expressions.NullSafeNEQ": {"tf": 1}, "sqlglot.expressions.Distance": {"tf": 1}, "sqlglot.expressions.Escape": {"tf": 1}, "sqlglot.expressions.Glob": {"tf": 1}, "sqlglot.expressions.GT": {"tf": 1}, "sqlglot.expressions.GTE": {"tf": 1}, "sqlglot.expressions.ILike": {"tf": 1}, "sqlglot.expressions.ILikeAny": {"tf": 1}, "sqlglot.expressions.IntDiv": {"tf": 1}, "sqlglot.expressions.Is": {"tf": 1}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.Like": {"tf": 1}, "sqlglot.expressions.LikeAny": {"tf": 1}, "sqlglot.expressions.LT": {"tf": 1}, "sqlglot.expressions.LTE": {"tf": 1}, "sqlglot.expressions.Mod": {"tf": 1}, "sqlglot.expressions.Mul": {"tf": 1}, "sqlglot.expressions.NEQ": {"tf": 1}, "sqlglot.expressions.SimilarTo": {"tf": 1}, "sqlglot.expressions.Slice": {"tf": 1}, "sqlglot.expressions.Sub": {"tf": 1}, "sqlglot.expressions.ArrayOverlaps": {"tf": 1}, "sqlglot.expressions.Unary": {"tf": 1}, "sqlglot.expressions.BitwiseNot": {"tf": 1}, "sqlglot.expressions.Not": {"tf": 1}, "sqlglot.expressions.Paren": {"tf": 1}, "sqlglot.expressions.Neg": {"tf": 1}, "sqlglot.expressions.Alias": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Aliases": {"tf": 1}, "sqlglot.expressions.AtTimeZone": {"tf": 1}, "sqlglot.expressions.Between": {"tf": 1}, "sqlglot.expressions.Bracket": {"tf": 1}, "sqlglot.expressions.Distinct": {"tf": 1}, "sqlglot.expressions.In": {"tf": 1}, "sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}, "sqlglot.expressions.Interval": {"tf": 1}, "sqlglot.expressions.IgnoreNulls": {"tf": 1}, "sqlglot.expressions.RespectNulls": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.expressions.AggFunc": {"tf": 1}, "sqlglot.expressions.ParameterizedAgg": {"tf": 1}, "sqlglot.expressions.Abs": {"tf": 1}, "sqlglot.expressions.Anonymous": {"tf": 1}, "sqlglot.expressions.Hll": {"tf": 1}, "sqlglot.expressions.ApproxDistinct": {"tf": 1}, "sqlglot.expressions.Array": {"tf": 1}, "sqlglot.expressions.ToChar": {"tf": 1}, "sqlglot.expressions.GenerateSeries": {"tf": 1}, "sqlglot.expressions.ArrayAgg": {"tf": 1}, "sqlglot.expressions.ArrayAll": {"tf": 1}, "sqlglot.expressions.ArrayAny": {"tf": 1}, "sqlglot.expressions.ArrayConcat": {"tf": 1}, "sqlglot.expressions.ArrayContains": {"tf": 1}, "sqlglot.expressions.ArrayContained": {"tf": 1}, "sqlglot.expressions.ArrayFilter": {"tf": 1}, "sqlglot.expressions.ArrayJoin": {"tf": 1}, "sqlglot.expressions.ArraySize": {"tf": 1}, "sqlglot.expressions.ArraySort": {"tf": 1}, "sqlglot.expressions.ArraySum": {"tf": 1}, "sqlglot.expressions.ArrayUnionAgg": {"tf": 1}, "sqlglot.expressions.Avg": {"tf": 1}, "sqlglot.expressions.AnyValue": {"tf": 1}, "sqlglot.expressions.Case": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.Cast": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.expressions.CastToStrType": {"tf": 1}, "sqlglot.expressions.Collate": {"tf": 1}, "sqlglot.expressions.TryCast": {"tf": 1}, "sqlglot.expressions.Ceil": {"tf": 1}, "sqlglot.expressions.Coalesce": {"tf": 1}, "sqlglot.expressions.Concat": {"tf": 1}, "sqlglot.expressions.ConcatWs": {"tf": 1}, "sqlglot.expressions.Count": {"tf": 1}, "sqlglot.expressions.CountIf": {"tf": 1}, "sqlglot.expressions.CurrentDate": {"tf": 1}, "sqlglot.expressions.CurrentDatetime": {"tf": 1}, "sqlglot.expressions.CurrentTime": {"tf": 1}, "sqlglot.expressions.CurrentTimestamp": {"tf": 1}, "sqlglot.expressions.CurrentUser": {"tf": 1}, "sqlglot.expressions.DateAdd": {"tf": 1}, "sqlglot.expressions.DateSub": {"tf": 1}, "sqlglot.expressions.DateDiff": {"tf": 1}, "sqlglot.expressions.DateTrunc": {"tf": 1}, "sqlglot.expressions.DatetimeAdd": {"tf": 1}, "sqlglot.expressions.DatetimeSub": {"tf": 1}, "sqlglot.expressions.DatetimeDiff": {"tf": 1}, "sqlglot.expressions.DatetimeTrunc": {"tf": 1}, "sqlglot.expressions.DayOfWeek": {"tf": 1}, "sqlglot.expressions.DayOfMonth": {"tf": 1}, "sqlglot.expressions.DayOfYear": {"tf": 1}, "sqlglot.expressions.WeekOfYear": {"tf": 1}, "sqlglot.expressions.LastDateOfMonth": {"tf": 1}, "sqlglot.expressions.Extract": {"tf": 1}, "sqlglot.expressions.TimestampAdd": {"tf": 1}, "sqlglot.expressions.TimestampSub": {"tf": 1}, "sqlglot.expressions.TimestampDiff": {"tf": 1}, "sqlglot.expressions.TimestampTrunc": {"tf": 1}, "sqlglot.expressions.TimeAdd": {"tf": 1}, "sqlglot.expressions.TimeSub": {"tf": 1}, "sqlglot.expressions.TimeDiff": {"tf": 1}, "sqlglot.expressions.TimeTrunc": {"tf": 1}, "sqlglot.expressions.DateFromParts": {"tf": 1}, "sqlglot.expressions.DateStrToDate": {"tf": 1}, "sqlglot.expressions.DateToDateStr": {"tf": 1}, "sqlglot.expressions.DateToDi": {"tf": 1}, "sqlglot.expressions.Day": {"tf": 1}, "sqlglot.expressions.Decode": {"tf": 1}, "sqlglot.expressions.DiToDate": {"tf": 1}, "sqlglot.expressions.Encode": {"tf": 1}, "sqlglot.expressions.Exp": {"tf": 1}, "sqlglot.expressions.Explode": {"tf": 1}, "sqlglot.expressions.Floor": {"tf": 1}, "sqlglot.expressions.FromBase64": {"tf": 1}, "sqlglot.expressions.ToBase64": {"tf": 1}, "sqlglot.expressions.Greatest": {"tf": 1}, "sqlglot.expressions.GroupConcat": {"tf": 1}, "sqlglot.expressions.Hex": {"tf": 1}, "sqlglot.expressions.If": {"tf": 1}, "sqlglot.expressions.IfNull": {"tf": 1}, "sqlglot.expressions.Initcap": {"tf": 1}, "sqlglot.expressions.JSONKeyValue": {"tf": 1}, "sqlglot.expressions.JSONObject": {"tf": 1}, "sqlglot.expressions.OpenJSONColumnDef": {"tf": 1}, "sqlglot.expressions.OpenJSON": {"tf": 1}, "sqlglot.expressions.JSONBContains": {"tf": 1}, "sqlglot.expressions.JSONExtract": {"tf": 1}, "sqlglot.expressions.JSONExtractScalar": {"tf": 1}, "sqlglot.expressions.JSONBExtract": {"tf": 1}, "sqlglot.expressions.JSONBExtractScalar": {"tf": 1}, "sqlglot.expressions.JSONFormat": {"tf": 1}, "sqlglot.expressions.Least": {"tf": 1}, "sqlglot.expressions.Left": {"tf": 1}, "sqlglot.expressions.Right": {"tf": 1}, "sqlglot.expressions.Length": {"tf": 1}, "sqlglot.expressions.Levenshtein": {"tf": 1}, "sqlglot.expressions.Ln": {"tf": 1}, "sqlglot.expressions.Log": {"tf": 1}, "sqlglot.expressions.Log2": {"tf": 1}, "sqlglot.expressions.Log10": {"tf": 1}, "sqlglot.expressions.LogicalOr": {"tf": 1}, "sqlglot.expressions.LogicalAnd": {"tf": 1}, "sqlglot.expressions.Lower": {"tf": 1}, "sqlglot.expressions.Map": {"tf": 1}, "sqlglot.expressions.StarMap": {"tf": 1}, "sqlglot.expressions.VarMap": {"tf": 1}, "sqlglot.expressions.MatchAgainst": {"tf": 1}, "sqlglot.expressions.Max": {"tf": 1}, "sqlglot.expressions.MD5": {"tf": 1}, "sqlglot.expressions.Min": {"tf": 1}, "sqlglot.expressions.Month": {"tf": 1}, "sqlglot.expressions.Nvl2": {"tf": 1}, "sqlglot.expressions.Posexplode": {"tf": 1}, "sqlglot.expressions.Pow": {"tf": 1}, "sqlglot.expressions.PercentileCont": {"tf": 1}, "sqlglot.expressions.PercentileDisc": {"tf": 1}, "sqlglot.expressions.Quantile": {"tf": 1}, "sqlglot.expressions.ApproxQuantile": {"tf": 1}, "sqlglot.expressions.RangeN": {"tf": 1}, "sqlglot.expressions.ReadCSV": {"tf": 1}, "sqlglot.expressions.Reduce": {"tf": 1}, "sqlglot.expressions.RegexpExtract": {"tf": 1}, "sqlglot.expressions.RegexpLike": {"tf": 1}, "sqlglot.expressions.RegexpILike": {"tf": 1}, "sqlglot.expressions.RegexpSplit": {"tf": 1}, "sqlglot.expressions.Repeat": {"tf": 1}, "sqlglot.expressions.Round": {"tf": 1}, "sqlglot.expressions.RowNumber": {"tf": 1}, "sqlglot.expressions.SafeDivide": {"tf": 1}, "sqlglot.expressions.SetAgg": {"tf": 1}, "sqlglot.expressions.SHA": {"tf": 1}, "sqlglot.expressions.SHA2": {"tf": 1}, "sqlglot.expressions.SortArray": {"tf": 1}, "sqlglot.expressions.Split": {"tf": 1}, "sqlglot.expressions.Substring": {"tf": 1}, "sqlglot.expressions.StandardHash": {"tf": 1}, "sqlglot.expressions.StrPosition": {"tf": 1}, "sqlglot.expressions.StrToDate": {"tf": 1}, "sqlglot.expressions.StrToTime": {"tf": 1}, "sqlglot.expressions.StrToUnix": {"tf": 1}, "sqlglot.expressions.NumberToStr": {"tf": 1}, "sqlglot.expressions.Struct": {"tf": 1}, "sqlglot.expressions.StructExtract": {"tf": 1}, "sqlglot.expressions.Sum": {"tf": 1}, "sqlglot.expressions.Sqrt": {"tf": 1}, "sqlglot.expressions.Stddev": {"tf": 1}, "sqlglot.expressions.StddevPop": {"tf": 1}, "sqlglot.expressions.StddevSamp": {"tf": 1}, "sqlglot.expressions.TimeToStr": {"tf": 1}, "sqlglot.expressions.TimeToTimeStr": {"tf": 1}, "sqlglot.expressions.TimeToUnix": {"tf": 1}, "sqlglot.expressions.TimeStrToDate": {"tf": 1}, "sqlglot.expressions.TimeStrToTime": {"tf": 1}, "sqlglot.expressions.TimeStrToUnix": {"tf": 1}, "sqlglot.expressions.Trim": {"tf": 1}, "sqlglot.expressions.TsOrDsAdd": {"tf": 1}, "sqlglot.expressions.TsOrDsToDateStr": {"tf": 1}, "sqlglot.expressions.TsOrDsToDate": {"tf": 1}, "sqlglot.expressions.TsOrDiToDi": {"tf": 1}, "sqlglot.expressions.Unhex": {"tf": 1}, "sqlglot.expressions.UnixToStr": {"tf": 1}, "sqlglot.expressions.UnixToTime": {"tf": 1}, "sqlglot.expressions.UnixToTimeStr": {"tf": 1}, "sqlglot.expressions.Upper": {"tf": 1}, "sqlglot.expressions.Variance": {"tf": 1}, "sqlglot.expressions.VariancePop": {"tf": 1}, "sqlglot.expressions.Week": {"tf": 1}, "sqlglot.expressions.XMLTable": {"tf": 1}, "sqlglot.expressions.Year": {"tf": 1}, "sqlglot.expressions.Use": {"tf": 1}, "sqlglot.expressions.Merge": {"tf": 1}, "sqlglot.expressions.When": {"tf": 1}, "sqlglot.expressions.NextValueFor": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.expressions.false": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.unsupported": {"tf": 1}, "sqlglot.generator.Generator.sep": {"tf": 1}, "sqlglot.generator.Generator.seg": {"tf": 1}, "sqlglot.generator.Generator.pad_comment": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.generator.Generator.wrap": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.generator.Generator.normalize_func": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.clone_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1}, "sqlglot.generator.Generator.returning_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1}, "sqlglot.generator.Generator.set_sql": {"tf": 1}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.null_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.concat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.comment_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.text_width": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}, "sqlglot.generator.Generator.naked_property": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1}, "sqlglot.generator.cached_generator": {"tf": 1}, "sqlglot.helper": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.lineage": {"tf": 1}, "sqlglot.lineage.Node": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer": {"tf": 1}, "sqlglot.optimizer.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1}, "sqlglot.optimizer.canonicalize": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}, "sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalized": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.optimizer.optimizer": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}, "sqlglot.optimizer.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1}, "sqlglot.optimizer.simplify.flatten": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_parens": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.optimizer.simplify.always_true": {"tf": 1}, "sqlglot.optimizer.simplify.is_complement": {"tf": 1}, "sqlglot.optimizer.simplify.is_false": {"tf": 1}, "sqlglot.optimizer.simplify.is_null": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1}, "sqlglot.optimizer.simplify.date_literal": {"tf": 1}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}, "sqlglot.parser": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.parser.parse_like": {"tf": 1}, "sqlglot.parser.binary_range_parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.parser.Parser.reset": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner": {"tf": 1}, "sqlglot.planner.Plan": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}, "sqlglot.planner.Step": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}, "sqlglot.planner.Scan": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.Aggregate": {"tf": 1}, "sqlglot.planner.Sort": {"tf": 1}, "sqlglot.planner.SetOperation": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.Schema.empty": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}, "sqlglot.serde": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.time": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens": {"tf": 1}, "sqlglot.tokens.TokenType": {"tf": 1}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}, "sqlglot.tokens.TokenType.DASH": {"tf": 1}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1}, "sqlglot.tokens.TokenType.COLON": {"tf": 1}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}, "sqlglot.tokens.TokenType.AMP": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1}, "sqlglot.tokens.TokenType.CARET": {"tf": 1}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LT_AT": {"tf": 1}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.DAMP": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.DATABASE": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BIT": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}, "sqlglot.tokens.TokenType.UINT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT128": {"tf": 1}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1}, "sqlglot.tokens.TokenType.INT256": {"tf": 1}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}, "sqlglot.tokens.TokenType.INET": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}, "sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.tokens.TokenType.FULL": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}, "sqlglot.tokens.TokenType.INNER": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1}, "sqlglot.tokens.TokenType.KEEP": {"tf": 1}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD": {"tf": 1}, "sqlglot.tokens.TokenType.LOCK": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}, "sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}, "sqlglot.tokens.TokenType.PRAGMA": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}, "sqlglot.tokens.TokenType.RETURNING": {"tf": 1}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}, "sqlglot.tokens.TokenType.SETTINGS": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.tokens.TokenType.SOME": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}, "sqlglot.tokens.TokenType.TOP": {"tf": 1}, "sqlglot.tokens.TokenType.THEN": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}, "sqlglot.tokens.Token": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.tokens.Tokenizer": {"tf": 1}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 1843, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.SqlglotError": {"tf": 1}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1}, "sqlglot.dialects.sqlite": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1.4142135623730951}}, "df": 11}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SqlSecurityProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Sqrt": {"tf": 1}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Schema": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.schema.Schema": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.empty": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1.4142135623730951}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}}, "df": 28, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.SchemaError": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SchemaCommentProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}}, "df": 1}}}, "n": {"docs": {"sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.planner.Scan": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}}, "df": 5}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.build_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1.4142135623730951}}, "df": 44, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.ScopeType": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}}, "df": 7}}}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"2": {"docs": {"sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1}, "sqlglot.dialects.spark2": {"tf": 1}, "sqlglot.dialects.spark2.Spark2": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"tf": 1.4142135623730951}}, "df": 8}, "docs": {"sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1}, "sqlglot.dialects.spark": {"tf": 1}, "sqlglot.dialects.spark.Spark": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1.4142135623730951}}, "df": 6, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.SparkSession": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}}, "df": 4}}}}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.SPACE": {"tf": 1}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Split": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2}}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}}, "df": 25, "s": {"docs": {"sqlglot.optimizer.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}}, "df": 3}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {"sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Set": {"tf": 1}, "sqlglot.generator.Generator.set_sql": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}}, "df": 12, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.expressions.SetTag": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.SETTINGS": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SettingsProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.SetItem": {"tf": 1}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1}}, "df": 2}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SetProperty": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.SetAgg": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.planner.SetOperation": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 3}}}}}}}}}, "s": {"docs": {"sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.serde": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}}, "df": 4, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.SerdeProperties": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}}, "df": 2}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.SessionParameter": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}, "p": {"docs": {"sqlglot.generator.Generator.sep": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}}, "df": 1}}}}}}}, "g": {"docs": {"sqlglot.generator.Generator.seg": {"tf": 1}}, "df": 1}, "q": {"docs": {"sqlglot.helper.seq_get": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.name_sequence": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.case_sensitive": {"tf": 1}}, "df": 1}}}}}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.tokens.TokenType.SEMI": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.executor.context.Context.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.expressions.Sort": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}, "sqlglot.planner.Sort": {"tf": 1}}, "df": 8, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SortKeyProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SortArray": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}}, "df": 5, "s": {"docs": {"sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.SOME": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.expressions.Sum": {"tf": 1}}, "df": 3}, "b": {"docs": {"sqlglot.expressions.Sub": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.Column.substr": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1}, "sqlglot.expressions.Substring": {"tf": 1}}, "df": 4}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subquery": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}}, "df": 10, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Subqueryable": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}}, "df": 4}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.SubqueryPredicate": {"tf": 1}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 11}}}}}}}, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.subclasses": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}}, "df": 2}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.schema.Schema.supported_table_args": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.Star": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}}, "df": 8, "t": {"docs": {"sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe.sql.Column.startswith": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1}, "sqlglot.dialects.starrocks": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}}, "df": 5}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.StarMap": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.executor.python.PythonExecutor.static": {"tf": 1}}, "df": 1}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.StabilityProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.StandardHash": {"tf": 1}}, "df": 1}}}}}}}}}}, "r": {"docs": {"sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.executor.env.str_position": {"tf": 1}}, "df": 3, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.Struct": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}}, "df": 4, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.StructExtract": {"tf": 1}}, "df": 1}}}}}}}}}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.expressions.StrPosition": {"tf": 1}}, "df": 3}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}}, "df": 9}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.StrToDate": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.StrToTime": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.StrToUnix": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.expressions.Stddev": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.StddevPop": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.StddevSamp": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.planner.Step": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}}, "df": 4}}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}}, "df": 1}}}}}}}}}, "f": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}}, "df": 1, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.SafeDivide": {"tf": 1}}, "df": 1}}}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression.same_parent": {"tf": 1}}, "df": 1}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1}, "sqlglot.dialects.snowflake": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1.4142135623730951}}, "df": 11}}}}}}}, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.camel_to_snake_case": {"tf": 1}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.expressions.Show": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}}, "df": 3}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.should_identify": {"tf": 1}}, "df": 1}}}}, "a": {"2": {"docs": {"sqlglot.expressions.SHA2": {"tf": 1}}, "df": 1}, "docs": {"sqlglot.expressions.SHA": {"tf": 1}}, "df": 1}}, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.expressions.SystemTime": {"tf": 1}}, "df": 2}}}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}}, "df": 2}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}}, "df": 2}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}}, "df": 2}}}}}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.expressions.SimilarTo": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}}, "df": 2}}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.optimizer.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.flatten": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.simplify_parens": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.optimizer.simplify.always_true": {"tf": 1}, "sqlglot.optimizer.simplify.is_complement": {"tf": 1}, "sqlglot.optimizer.simplify.is_false": {"tf": 1}, "sqlglot.optimizer.simplify.is_null": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1}, "sqlglot.optimizer.simplify.date_literal": {"tf": 1}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1}}, "df": 21}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Slice": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.SLASH": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.pretty": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1}, "sqlglot.dialects.presto": {"tf": 1}, "sqlglot.dialects.presto.Presto": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1.4142135623730951}}, "df": 9}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Predicate": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}}, "df": 8}}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.prepend_ctes": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.transforms.preprocess": {"tf": 1}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.expressions.Properties": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}}, "df": 22}}}, "y": {"docs": {"sqlglot.expressions.Property": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.naked_property": {"tf": 1}}, "df": 3}}}}}, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.expressions.Pragma": {"tf": 1}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PRAGMA": {"tf": 1}}, "df": 3}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}}, "df": 1, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.PrimaryKey": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.PrimaryKeyColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.parser.parse_like": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 11, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.parser": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.parser.parse_like": {"tf": 1}, "sqlglot.parser.binary_range_parser": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.__init__": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.reset": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.check_errors": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.raise_error": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.validate_expression": {"tf": 1.4142135623730951}}, "df": 34}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}}, "df": 3}}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Partition": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}}, "df": 5, "b": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}}, "df": 2}}, "s": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.expressions.PartitionedByProperty": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}, "s": {"docs": {"sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}}, "df": 3}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Parameter": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.expressions.ParameterizedAgg": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.expressions.Paren": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}}, "df": 6, "t": {"docs": {"sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}}, "df": 2}, "s": {"docs": {"sqlglot.optimizer.simplify.simplify_parens": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.PathColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "d": {"docs": {"sqlglot.generator.Generator.pad_comment": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.PERCENT": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.PercentileCont": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.PercentileDisc": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.expressions.Pivot": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}}, "df": 6}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.PIPE": {"tf": 1}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.expressions.Placeholder": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.expressions.replace_placeholders": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {"sqlglot.planner.Plan": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}}, "df": 2, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.planner": {"tf": 1}, "sqlglot.planner.Plan": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}, "sqlglot.planner.Step": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}, "sqlglot.planner.Scan": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.Aggregate": {"tf": 1}, "sqlglot.planner.Sort": {"tf": 1}, "sqlglot.planner.SetOperation": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 16}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.PLUS": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}}, "df": 7, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1}, "sqlglot.dialects.postgres": {"tf": 1}, "sqlglot.dialects.postgres.Postgres": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}}, "df": 6}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.executor.env.str_position": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Posexplode": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {"sqlglot.executor.table.Table.pop": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}}, "df": 2}, "w": {"docs": {"sqlglot.expressions.Pow": {"tf": 1}}, "df": 1}}, "y": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.python": {"tf": 1}, "sqlglot.executor.python.PythonExecutor": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.static": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.executor.python.Python": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}}, "df": 21, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.python.PythonExecutor": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.static": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}}, "df": 17}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.PseudoType": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}, "sqlglot.optimizer.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.4142135623730951}}, "df": 10}}}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}}, "df": 3, "e": {"docs": {"sqlglot.parse_one": {"tf": 1}}, "df": 1}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.OnUpdateColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.OnConflict": {"tf": 1}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1}}, "df": 2}}}}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.OnCommitProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "r": {"docs": {"sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Or": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}}, "df": 9, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Order": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}}, "df": 4, "b": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.env.ordered": {"tf": 1}, "sqlglot.expressions.Ordered": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}}, "df": 4}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1}, "sqlglot.dialects.oracle": {"tf": 1}, "sqlglot.dialects.oracle.Oracle": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1.4142135623730951}}, "df": 9}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}}, "df": 3, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Overlaps": {"tf": 1}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1}, "sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1}}, "df": 3}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {"sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}}, "df": 13, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 6, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.OptimizeError": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {"sqlglot.optimizer": {"tf": 1}, "sqlglot.optimizer.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1}, "sqlglot.optimizer.canonicalize": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}, "sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalized": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.optimizer.optimizer": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}, "sqlglot.optimizer.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1}, "sqlglot.optimizer.simplify.flatten": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_parens": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.optimizer.simplify.always_true": {"tf": 1}, "sqlglot.optimizer.simplify.is_complement": {"tf": 1}, "sqlglot.optimizer.simplify.is_false": {"tf": 1}, "sqlglot.optimizer.simplify.is_null": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1}, "sqlglot.optimizer.simplify.date_literal": {"tf": 1}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 131}}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}}, "df": 2}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.unnest_operands": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1, "j": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.OpenJSON": {"tf": 1}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.OpenJSONColumnDef": {"tf": 1}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}}, "df": 1, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}}, "df": 1}}}}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.expressions.Offset": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}}, "df": 7}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}}, "df": 8}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.OUTER": {"tf": 1}}, "df": 1}}}}, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}}, "df": 3}}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.expressions.Transaction": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}}, "df": 5}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.Expression.transform": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.transforms": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 12}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.TransientProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 2}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1}, "sqlglot.dialects.trino": {"tf": 1}, "sqlglot.dialects.trino.Trino": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1.4142135623730951}}, "df": 5}}, "m": {"docs": {"sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.expressions.Trim": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}}, "df": 3}, "e": {"docs": {"sqlglot.trie": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1.4142135623730951}}, "df": 3}}, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.expressions.TryCast": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}}, "df": 3}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}}, "df": 1}}, "e": {"docs": {"sqlglot.expressions.true": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.optimizer.simplify.always_true": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}}, "df": 4}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.table": {"tf": 1}, "sqlglot.executor.table.Table": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Table.__init__": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Table.add_columns": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Table.append": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Table.pop": {"tf": 1.4142135623730951}, "sqlglot.executor.table.TableIter": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}, "sqlglot.executor.table.RangeReader": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.expressions.Table": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}}, "df": 40, "a": {"docs": {}, "df": 0, "u": {"docs": {"sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1}, "sqlglot.dialects.tableau": {"tf": 1}, "sqlglot.dialects.tableau.Tableau": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator.coalesce_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}}, "df": 8}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.TableAlias": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}}, "df": 2}}}}}, "s": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.qualify_tables": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}}, "df": 8, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.expressions.TableSample": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}}, "df": 4}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.table.TableIter": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}}, "df": 2}}}}}}}, "g": {"docs": {"sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}}, "df": 2}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.transforms.remove_target_from_merge": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {"sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 1}}, "df": 22, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.tokens.Token": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 7, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 2, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Tokenizer": {"tf": 1}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1}, "sqlglot.tokens.Tokenizer": {"tf": 1}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 21}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.TokenError": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {"sqlglot.tokens": {"tf": 1}, "sqlglot.tokens.TokenType": {"tf": 1}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}, "sqlglot.tokens.TokenType.DASH": {"tf": 1}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1}, "sqlglot.tokens.TokenType.COLON": {"tf": 1}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}, "sqlglot.tokens.TokenType.AMP": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1}, "sqlglot.tokens.TokenType.CARET": {"tf": 1}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LT_AT": {"tf": 1}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.DAMP": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.DATABASE": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BIT": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}, "sqlglot.tokens.TokenType.UINT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT128": {"tf": 1}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1}, "sqlglot.tokens.TokenType.INT256": {"tf": 1}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}, "sqlglot.tokens.TokenType.INET": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}, "sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.tokens.TokenType.FULL": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}, "sqlglot.tokens.TokenType.INNER": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1}, "sqlglot.tokens.TokenType.KEEP": {"tf": 1}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD": {"tf": 1}, "sqlglot.tokens.TokenType.LOCK": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}, "sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}, "sqlglot.tokens.TokenType.PRAGMA": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}, "sqlglot.tokens.TokenType.RETURNING": {"tf": 1}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}, "sqlglot.tokens.TokenType.SETTINGS": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.tokens.TokenType.SOME": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}, "sqlglot.tokens.TokenType.TOP": {"tf": 1}, "sqlglot.tokens.TokenType.THEN": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}, "sqlglot.tokens.Token": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.tokens.Tokenizer": {"tf": 1}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 298}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType": {"tf": 1}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}, "sqlglot.tokens.TokenType.DASH": {"tf": 1}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1}, "sqlglot.tokens.TokenType.COLON": {"tf": 1}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}, "sqlglot.tokens.TokenType.AMP": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1}, "sqlglot.tokens.TokenType.CARET": {"tf": 1}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LT_AT": {"tf": 1}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.DAMP": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.DATABASE": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BIT": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}, "sqlglot.tokens.TokenType.UINT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT128": {"tf": 1}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1}, "sqlglot.tokens.TokenType.INT256": {"tf": 1}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}, "sqlglot.tokens.TokenType.INET": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}, "sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.tokens.TokenType.FULL": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}, "sqlglot.tokens.TokenType.INNER": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1}, "sqlglot.tokens.TokenType.KEEP": {"tf": 1}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD": {"tf": 1}, "sqlglot.tokens.TokenType.LOCK": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}, "sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}, "sqlglot.tokens.TokenType.PRAGMA": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}, "sqlglot.tokens.TokenType.RETURNING": {"tf": 1}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}, "sqlglot.tokens.TokenType.SETTINGS": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.tokens.TokenType.SOME": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}, "sqlglot.tokens.TokenType.TOP": {"tf": 1}, "sqlglot.tokens.TokenType.THEN": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}}, "df": 288}}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.ToChar": {"tf": 1}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1}}, "df": 2}}}}, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"6": {"4": {"docs": {"sqlglot.expressions.ToBase64": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}}}, "p": {"docs": {"sqlglot.tokens.TokenType.TOP": {"tf": 1}}, "df": 1}}, "s": {"docs": {"sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 1}}, "df": 2, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1}, "sqlglot.dialects.tsql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1.4142135623730951}}, "df": 10}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1}}, "df": 2}}}}}}}}}}, "t": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1}}, "df": 2}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.TsOrDsAdd": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.TsOrDsToDate": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.TsOrDsToDateStr": {"tf": 1}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.expressions.TsOrDiToDi": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {"sqlglot.helper.tsort": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1}, "sqlglot.dialects.teradata": {"tf": 1}, "sqlglot.dialects.teradata.Teradata": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1.4142135623730951}}, "df": 11}}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.generator.Generator.text_width": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}}, "df": 5}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.TemporaryProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.time": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}}, "df": 10, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}}, "df": 3, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1}, "sqlglot.expressions.TimestampTrunc": {"tf": 1}}, "df": 2}}}}, "z": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}}, "df": 2}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "z": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.TimestampAdd": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.TimestampSub": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.TimestampDiff": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.expressions.TimeStrToTime": {"tf": 1}}, "df": 2}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.TimeStrToDate": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.TimeStrToUnix": {"tf": 1}}, "df": 1}}}}}}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.TimeSub": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.TimeAdd": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.TimeDiff": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.TimeTrunc": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.TimeToStr": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.TimeToTimeStr": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.TimeToUnix": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.TitleColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.tokens.TokenType.TILDA": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.context.Context.eval_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.expressions.Tuple": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}}, "df": 5}}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.this": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.THEN": {"tf": 1}}, "df": 1}}}, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INET": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}}, "df": 82, "s": {"docs": {"sqlglot.optimizer.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.TypeAnnotator": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 6}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.annotate_types.TypeAnnotator": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1}}, "df": 3}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.GroupedData": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dataframe.sql.Column": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.dataframe.sql.Window": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}}, "df": 111, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrameNaFunctions": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}}, "df": 5}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrameReader": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}}, "df": 3}}}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}}, "df": 7}}}}}}}}}}}, "b": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.databricks": {"tf": 1}, "sqlglot.dialects.databricks.Databricks": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1}}, "df": 6}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataBlocksizeProperty": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.DATABASE": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.expressions.DataType": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INET": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}}, "df": 83, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataTypeSize": {"tf": 1}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "e": {"docs": {"sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1}, "sqlglot.optimizer.simplify.date_literal": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}}, "df": 11, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.expressions.DateStrToDate": {"tf": 1}}, "df": 2}}}}}}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DateSub": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.expressions.DateDiff": {"tf": 1}}, "df": 3}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DateFormatColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.DateFromParts": {"tf": 1}}, "df": 1}}}}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"6": {"4": {"docs": {"sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {"sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}}, "df": 3, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DatetimeAdd": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DatetimeSub": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.DatetimeDiff": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.DatetimeTrunc": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.DateTrunc": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DateToDateStr": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {"sqlglot.expressions.DateToDi": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1}}, "df": 2}}}}}}}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DateAdd": {"tf": 1}}, "df": 1}}}}}, "y": {"docs": {"sqlglot.expressions.Day": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.DayOfWeek": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.DayOfMonth": {"tf": 1}}, "df": 1}}}}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DayOfYear": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.DASH": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.DARROW": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.DAMP": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Distinct": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}}, "df": 7}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Distribute": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}}, "df": 2}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.normalize.distributive_law": {"tf": 1}}, "df": 1}}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DistKeyProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DistStyleProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Distance": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 3}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect": {"tf": 1}, "sqlglot.dialects.dialect.Dialects": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1}, "sqlglot.dialects.dialect.Dialect": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}}, "df": 70, "s": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.clickhouse": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.dialects.databricks": {"tf": 1}, "sqlglot.dialects.databricks.Databricks": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Tokenizer": {"tf": 1}, "sqlglot.dialects.dialect": {"tf": 1}, "sqlglot.dialects.dialect.Dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1}, "sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.dialects.drill": {"tf": 1}, "sqlglot.dialects.drill.Drill": {"tf": 1}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.dialects.duckdb": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.dialects.hive": {"tf": 1}, "sqlglot.dialects.hive.Hive": {"tf": 1}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.dialects.mysql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.oracle": {"tf": 1}, "sqlglot.dialects.oracle.Oracle": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres": {"tf": 1}, "sqlglot.dialects.postgres.Postgres": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto": {"tf": 1}, "sqlglot.dialects.presto.Presto": {"tf": 1}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.dialects.redshift": {"tf": 1}, "sqlglot.dialects.redshift.Redshift": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark": {"tf": 1}, "sqlglot.dialects.spark.Spark": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.spark2": {"tf": 1}, "sqlglot.dialects.spark2.Spark2": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.starrocks": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau": {"tf": 1}, "sqlglot.dialects.tableau.Tableau": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.coalesce_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata": {"tf": 1}, "sqlglot.dialects.teradata.Teradata": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Tokenizer": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.dialects.trino": {"tf": 1}, "sqlglot.dialects.trino.Trino": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}}, "df": 220}}}}}}, "v": {"docs": {"sqlglot.expressions.Div": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1.4142135623730951}}, "df": 15}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Directory": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}}, "df": 2}}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}}, "df": 3, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DictProperty": {"tf": 1}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DictSubProperty": {"tf": 1}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DictRange": {"tf": 1}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DiToDate": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.expressions.Drop": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}}, "df": 5, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}}, "df": 1}}}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}}, "df": 1}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DropPartition": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1}, "sqlglot.dialects.drill": {"tf": 1}, "sqlglot.dialects.drill.Drill": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1.4142135623730951}}, "df": 7}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}}, "df": 4, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.expressions.Describe": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}}, "df": 4}}}}}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Delete": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}}, "df": 7}}}}, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}}, "df": 2}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.planner.Step.add_dependency": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}}, "df": 4, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DerivedTable": {"tf": 1}}, "df": 1}}}}}}}}}}, "f": {"docs": {"sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}}, "df": 3, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DefaultColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DefinerProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Decode": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 1}}}}}}}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1}, "sqlglot.dialects.duckdb": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1.4142135623730951}}, "df": 7}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}}, "df": 2}}}, "s": {"docs": {"sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}}, "df": 1}, "f": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.dfs": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.Dot": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}}, "df": 5}, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}}, "df": 2}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DPipe": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}}, "df": 3}}}}, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 1}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.DCOLON": {"tf": 1}}, "df": 1}}}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.expressions.Create": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}}, "df": 5, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.CROSS": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}}, "df": 6}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.coalesce_sql": {"tf": 1}, "sqlglot.expressions.Coalesce": {"tf": 1}}, "df": 3}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.expressions.Count": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 6, "i": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.CountIf": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {"sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.expressions.Column": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1}}, "df": 53, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.expressions.ColumnDef": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}}, "df": 3}}}, "s": {"docs": {"sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}}, "df": 15}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.ColumnPosition": {"tf": 1}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1}}, "df": 2}}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}}, "df": 2, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.ColumnConstraintKind": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "s": {"docs": {"sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}}, "df": 1}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Collate": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}}, "df": 3, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CollateColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.CollateProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.helper.ensure_collection": {"tf": 1}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.COLON": {"tf": 1}}, "df": 1}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.expressions.Commit": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}}, "df": 4}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.expressions.Comment": {"tf": 1}, "sqlglot.generator.Generator.pad_comment": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.generator.Generator.comment_sql": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.expressions.Expression.add_comments": {"tf": 1}}, "df": 1}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CommentColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "a": {"docs": {"sqlglot.tokens.TokenType.COMMA": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Command": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}}, "df": 3}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CompressColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.simplify.is_complement": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.expressions.Constraint": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}}, "df": 4}}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.expressions.Concat": {"tf": 1}, "sqlglot.generator.Generator.concat_sql": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}}, "df": 4, "w": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.ConcatWs": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.context": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.__init__": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.eval": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.add_columns": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.table_iter": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.filter": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.sort": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.set_row": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.set_index": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.set_range": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}}, "df": 13}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Condition": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Condition.like": {"tf": 1}, "sqlglot.expressions.Condition.ilike": {"tf": 1}, "sqlglot.expressions.Condition.eq": {"tf": 1}, "sqlglot.expressions.Condition.neq": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}}, "df": 15}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Connector": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1}}, "df": 1}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.convert": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1}, "sqlglot.expressions.Cache": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}}, "df": 5, "d": {"docs": {"sqlglot.generator.cached_generator": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.executor.env.cast": {"tf": 1}, "sqlglot.expressions.Cast": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 1}}, "df": 10, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.CastToStrType": {"tf": 1}}, "df": 1}}}}}}}}}, "s": {"docs": {"sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}}, "df": 1}}, "e": {"docs": {"sqlglot.expressions.Case": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}}, "df": 7, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CaseSpecificColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.helper.camel_to_snake_case": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.canonicalize": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}, "sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1}}, "df": 6}}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.CARET": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.clickhouse": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1}}, "df": 10}}}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Clone": {"tf": 1}, "sqlglot.generator.Generator.clone_sql": {"tf": 1}}, "df": 2}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Cluster": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}}, "df": 3}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.expressions.CTE": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1}}, "df": 8, "s": {"docs": {"sqlglot.generator.Generator.prepend_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}}, "df": 5}}, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Select.ctas": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1}}, "df": 6, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.CurrentDate": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}}, "df": 2, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.CurrentDatetime": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.CurrentTime": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.CurrentTimestamp": {"tf": 1}}, "df": 1}}}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.CurrentUser": {"tf": 1}}, "df": 1}}}}}}}}}, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.CUBE": {"tf": 1}}, "df": 1}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}}, "df": 3}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.helper.while_changing": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {"sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CharacterSet": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CharacterSetColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.CharacterSetProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.Check": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}}, "df": 3, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CheckColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ChecksumProperty": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.replace_children": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Ceil": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.expressions.In": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 7, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 35, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Initcap": {"tf": 1}}, "df": 1}}}}}, "t": {"1": {"2": {"8": {"docs": {"sqlglot.expressions.DataType.Type.INT128": {"tf": 1}, "sqlglot.tokens.TokenType.INT128": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"5": {"6": {"docs": {"sqlglot.expressions.DataType.Type.INT256": {"tf": 1}, "sqlglot.tokens.TokenType.INT256": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "4": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1}}, "df": 2}}}}}}}}}}}, "8": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1}}, "df": 2}}}}}}}}}}}, "docs": {"sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Intersect": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}}, "df": 9, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1}}, "df": 1}}}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.executor.env.interval": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.Interval": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}}, "df": 9}}}}}, "o": {"docs": {"sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.expressions.Into": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}}, "df": 5}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Introducer": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}}, "df": 3}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.expressions.IntDiv": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}}, "df": 2}}}}, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.expressions.Insert": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}}, "df": 7, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.InlineLengthColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.expressions.Index": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}}, "df": 6}, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.indent": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.InputOutputFormat": {"tf": 1}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.INET": {"tf": 1}, "sqlglot.tokens.TokenType.INET": {"tf": 1}}, "df": 2}}, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.INNER": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Is": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.simplify.is_complement": {"tf": 1}, "sqlglot.optimizer.simplify.is_false": {"tf": 1}, "sqlglot.optimizer.simplify.is_null": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}}, "df": 25, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}}, "df": 3}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1.4142135623730951}}, "df": 2, "d": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.IsolatedLoadingProperty": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.expressions.Condition.ilike": {"tf": 1}, "sqlglot.expressions.ILike": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1}}, "df": 7, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ILikeAny": {"tf": 1}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1}}, "df": 2}}}}}}}, "f": {"docs": {"sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.If": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}}, "df": 7, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.IfNull": {"tf": 1}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.IgnoreNulls": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}}, "df": 2}}}}}}}}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.is_iterable": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Identifier": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.optimizer.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}}, "df": 3}}}}, "y": {"docs": {"sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}}, "df": 2}}}}}}}, "r": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Alias": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}}, "df": 9, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Aliases": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}}, "df": 3}}}}}, "l": {"docs": {"sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.All": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}}, "df": 6}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.ALTER": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.AlterColumn": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}}, "df": 2}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.AlterTable": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}}, "df": 2}}}}}}}}, "g": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.AlgorithmProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.simplify.always_true": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}}, "df": 2}}}}}, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}}, "df": 2, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.planner.Aggregate": {"tf": 1}}, "df": 2}}}}}}, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.AggFunc": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.expressions.Avg": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.expressions.Anonymous": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}}, "df": 3}}}}}}}, "y": {"docs": {"sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Any": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1}}, "df": 6, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.AnyValue": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.find_ancestor": {"tf": 1}}, "df": 1}}}}}}, "d": {"docs": {"sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.And": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}}, "df": 6}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.TypeAnnotator": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1.4142135623730951}}, "df": 5}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.tokens.TokenType.ANTI": {"tf": 1}}, "df": 1}}}, "s": {"docs": {"sqlglot.expressions.Condition.as_": {"tf": 1}}, "df": 1, "c": {"docs": {"sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}}, "df": 4}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.assert_is": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.tokens.TokenType.ASOF": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.Array": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}}, "df": 5, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.expressions.ArrayAgg": {"tf": 1}}, "df": 2}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.ArrayAll": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ArrayAny": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.ArrayOverlaps": {"tf": 1}}, "df": 1}}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ArrayConcat": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.ArrayContains": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.ArrayContained": {"tf": 1}}, "df": 1}}}}}}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.ArrayFilter": {"tf": 1}}, "df": 1}}}}}}, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.ArrayJoin": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.ArraySize": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ArraySort": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.ArraySum": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.ArrayUnionAgg": {"tf": 1}}, "df": 1}}}}}}}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}}, "df": 6}}}, "g": {"docs": {"sqlglot.expressions.Func.from_arg_list": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}}, "df": 2}}}, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}}, "df": 4}}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}}, "df": 1, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ApproxDistinct": {"tf": 1}}, "df": 1}}}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.ApproxQuantile": {"tf": 1}}, "df": 1}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.table.Table.append": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}}, "df": 2}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.expressions.Expression.add_comments": {"tf": 1}, "sqlglot.expressions.Add": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1}}, "df": 11, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.AddConstraint": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.AutoIncrementColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.AutoIncrementProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.AutoName": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {"sqlglot.tokens.TokenType.LT_AT": {"tf": 1}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1}}, "df": 2, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.AtTimeZone": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "b": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Abs": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}}, "df": 5}}}}}}}}}}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.AMP": {"tf": 1}}, "df": 1}}}, "w": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Where": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}}, "df": 7}}, "n": {"docs": {"sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.When": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}}, "df": 5}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.while_changing": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.expressions.With": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}}, "df": 13, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1}}, "df": 1, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.WithinGroup": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}}, "df": 2}}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.WithDataProperty": {"tf": 1}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.WithJournalTableProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dataframe.sql.Window": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Window": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}}, "df": 9, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dataframe.sql.WindowSpec": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.expressions.WindowSpec": {"tf": 1}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1}}, "df": 10}}}}}}}}, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.generator.Generator.text_width": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.errors.ErrorLevel.WARN": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 4}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.Week": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.WeekOfYear": {"tf": 1}}, "df": 1}}}}}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.generator.Generator.wrap": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1}}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.executor.context.Context.filter": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.expressions.Filter": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}}, "df": 6}}}, "l": {"docs": {"sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.FileFormatProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}}, "df": 4}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}}, "df": 7}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Final": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}}, "df": 2}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.normalize_func": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}}, "df": 10, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}}, "df": 3}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.FULL": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1}}, "df": 4, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}}, "df": 6}}}, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}}, "df": 1, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ForeignKey": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.simplify.flatten": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1}}, "df": 4}}}}}, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}}, "df": 2}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Floor": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.From": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}}, "df": 13, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"6": {"4": {"docs": {"sqlglot.expressions.FromBase64": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}}}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.FreespaceProperty": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Fetch": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}}, "df": 3}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.FallbackProperty": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.false": {"tf": 1}, "sqlglot.optimizer.simplify.is_false": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}}, "df": 3}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.FARROW": {"tf": 1}}, "df": 1}}}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Group": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1}}, "df": 6, "b": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.GroupedData": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}}, "df": 10}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.expressions.GroupConcat": {"tf": 1}}, "df": 2}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.expressions.Greatest": {"tf": 1}}, "df": 2}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.coalesce_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.unsupported": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sep": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.seg": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.pad_comment": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.wrap": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.no_identify": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.normalize_func": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.indent": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.cache_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.column_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.create_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.clone_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.describe_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.with_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.cte_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.directory_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.delete_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.drop_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.except_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.except_op": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.filter_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.hint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.index_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.national_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.partition_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.properties_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.root_properties": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.properties": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.with_properties": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.locate_properties": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.property_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.insert_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.intersect_op": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.returning_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.table_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.update_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.values_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.var_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.into_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.from_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.group_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.having_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.join_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.limit_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.offset_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.set_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.lock_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.literal_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.null_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.order_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sort_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.select_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.schema_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.star_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.union_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.union_op": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.where_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.window_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.between_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.all_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.any_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.exists_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.case_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.extract_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.trim_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.concat_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.check_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.if_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.in_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.interval_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.return_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.reference_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.paren_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.neg_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.not_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.alias_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.add_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.and_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.connector_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.cast_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.collate_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.command_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.comment_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.commit_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.div_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.distance_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.dot_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.eq_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.escape_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.glob_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.gt_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.gte_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.is_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.like_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.lt_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.lte_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.mod_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.mul_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.neq_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.or_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.slice_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sub_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.use_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.binary": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.func": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.format_args": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.text_width": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.format_time": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.expressions": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.op_expressions": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.naked_property": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.set_operation": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tag_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.token_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.when_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.merge_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.cached_generator": {"tf": 1.4142135623730951}}, "df": 311}}, "e": {"docs": {"sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}}, "df": 5, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.expressions.GenerateSeries": {"tf": 1}}, "df": 2}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.expressions.GeneratedAsIdentityColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}}, "df": 3}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}, "t": {"docs": {"sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 8}, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}}, "df": 2}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}}, "df": 2}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.Glob": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}}, "df": 3, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {"sqlglot.expressions.GT": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1}}, "df": 4, "e": {"docs": {"sqlglot.expressions.GTE": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}}, "df": 3}}}, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.expressions.Join": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.planner.Join": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1}}, "df": 15, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JoinHint": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}}, "df": 2}}}}, "s": {"docs": {"sqlglot.optimizer.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}}, "df": 9}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.JournalProperty": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}}, "df": 4, "b": {"docs": {"sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.JSONBContains": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JSONBExtract": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.JSONBExtractScalar": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.JSONKeyValue": {"tf": 1}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1}}, "df": 2}}}}}}}}, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JSONObject": {"tf": 1}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1}}, "df": 2}}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JSONExtract": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.JSONExtractScalar": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JSONFormat": {"tf": 1}}, "df": 1}}}}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.union": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Union": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}}, "df": 12, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1}}, "df": 1}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Unionable": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}}, "df": 4}}}}, "b": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {"sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}}, "df": 1}, "q": {"docs": {"sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.UniqueColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.UnixToStr": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.UnixToTime": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.UnixToTimeStr": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.expressions.Unary": {"tf": 1}}, "df": 2}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 2}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Unnest": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}}, "df": 14}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}, "sqlglot.generator.Generator.unsupported": {"tf": 1}}, "df": 2, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.UnsupportedError": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Uncache": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}}, "df": 3}}}}}, "k": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}}, "df": 1}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.Unhex": {"tf": 1}}, "df": 1}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}}, "df": 1}}}}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.expressions.Update": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}}, "df": 7}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Upper": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.UppercaseColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.UDTF": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Use": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}}, "df": 3, "r": {"docs": {"sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1}}, "df": 1, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.UserDefinedFunction": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}}, "df": 2}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1}}, "df": 2}}}}}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1}}, "df": 2}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"1": {"2": {"8": {"docs": {"sqlglot.expressions.DataType.Type.UINT128": {"tf": 1}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"5": {"6": {"docs": {"sqlglot.expressions.DataType.Type.UINT256": {"tf": 1}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {"sqlglot.expressions.DataType.Type.UINT": {"tf": 1}, "sqlglot.tokens.TokenType.UINT": {"tf": 1}}, "df": 2}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1}}, "df": 2}}}}}}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}}, "df": 2}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Except": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}}, "df": 8, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1}}, "df": 1}}}}}}}, "p": {"docs": {"sqlglot.expressions.Exp": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.add_comments": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 46, "s": {"docs": {"sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1}, "sqlglot.expressions.Expression.expressions": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.add_comments": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Condition": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Condition.like": {"tf": 1}, "sqlglot.expressions.Condition.ilike": {"tf": 1}, "sqlglot.expressions.Condition.eq": {"tf": 1}, "sqlglot.expressions.Condition.neq": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1}, "sqlglot.expressions.Predicate": {"tf": 1}, "sqlglot.expressions.DerivedTable": {"tf": 1}, "sqlglot.expressions.Unionable": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.UDTF": {"tf": 1}, "sqlglot.expressions.Cache": {"tf": 1}, "sqlglot.expressions.Uncache": {"tf": 1}, "sqlglot.expressions.Create": {"tf": 1}, "sqlglot.expressions.Clone": {"tf": 1}, "sqlglot.expressions.Describe": {"tf": 1}, "sqlglot.expressions.Pragma": {"tf": 1}, "sqlglot.expressions.Set": {"tf": 1}, "sqlglot.expressions.SetItem": {"tf": 1}, "sqlglot.expressions.Show": {"tf": 1}, "sqlglot.expressions.UserDefinedFunction": {"tf": 1}, "sqlglot.expressions.CharacterSet": {"tf": 1}, "sqlglot.expressions.With": {"tf": 1}, "sqlglot.expressions.WithinGroup": {"tf": 1}, "sqlglot.expressions.CTE": {"tf": 1}, "sqlglot.expressions.TableAlias": {"tf": 1}, "sqlglot.expressions.BitString": {"tf": 1}, "sqlglot.expressions.HexString": {"tf": 1}, "sqlglot.expressions.ByteString": {"tf": 1}, "sqlglot.expressions.RawString": {"tf": 1}, "sqlglot.expressions.Column": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.ColumnPosition": {"tf": 1}, "sqlglot.expressions.ColumnDef": {"tf": 1}, "sqlglot.expressions.AlterColumn": {"tf": 1}, "sqlglot.expressions.RenameTable": {"tf": 1}, "sqlglot.expressions.SetTag": {"tf": 1}, "sqlglot.expressions.Comment": {"tf": 1}, "sqlglot.expressions.MergeTreeTTLAction": {"tf": 1}, "sqlglot.expressions.MergeTreeTTL": {"tf": 1}, "sqlglot.expressions.ColumnConstraint": {"tf": 1}, "sqlglot.expressions.ColumnConstraintKind": {"tf": 1}, "sqlglot.expressions.AutoIncrementColumnConstraint": {"tf": 1}, "sqlglot.expressions.CaseSpecificColumnConstraint": {"tf": 1}, "sqlglot.expressions.CharacterSetColumnConstraint": {"tf": 1}, "sqlglot.expressions.CheckColumnConstraint": {"tf": 1}, "sqlglot.expressions.CollateColumnConstraint": {"tf": 1}, "sqlglot.expressions.CommentColumnConstraint": {"tf": 1}, "sqlglot.expressions.CompressColumnConstraint": {"tf": 1}, "sqlglot.expressions.DateFormatColumnConstraint": {"tf": 1}, "sqlglot.expressions.DefaultColumnConstraint": {"tf": 1}, "sqlglot.expressions.EncodeColumnConstraint": {"tf": 1}, "sqlglot.expressions.GeneratedAsIdentityColumnConstraint": {"tf": 1}, "sqlglot.expressions.InlineLengthColumnConstraint": {"tf": 1}, "sqlglot.expressions.NotNullColumnConstraint": {"tf": 1}, "sqlglot.expressions.OnUpdateColumnConstraint": {"tf": 1}, "sqlglot.expressions.PrimaryKeyColumnConstraint": {"tf": 1}, "sqlglot.expressions.TitleColumnConstraint": {"tf": 1}, "sqlglot.expressions.UniqueColumnConstraint": {"tf": 1}, "sqlglot.expressions.UppercaseColumnConstraint": {"tf": 1}, "sqlglot.expressions.PathColumnConstraint": {"tf": 1}, "sqlglot.expressions.Constraint": {"tf": 1}, "sqlglot.expressions.Delete": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Drop": {"tf": 1}, "sqlglot.expressions.Filter": {"tf": 1}, "sqlglot.expressions.Check": {"tf": 1}, "sqlglot.expressions.Directory": {"tf": 1}, "sqlglot.expressions.ForeignKey": {"tf": 1}, "sqlglot.expressions.PrimaryKey": {"tf": 1}, "sqlglot.expressions.Into": {"tf": 1}, "sqlglot.expressions.From": {"tf": 1}, "sqlglot.expressions.Having": {"tf": 1}, "sqlglot.expressions.Hint": {"tf": 1}, "sqlglot.expressions.JoinHint": {"tf": 1}, "sqlglot.expressions.Identifier": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Index": {"tf": 1}, "sqlglot.expressions.Insert": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.OnConflict": {"tf": 1}, "sqlglot.expressions.Returning": {"tf": 1}, "sqlglot.expressions.Introducer": {"tf": 1}, "sqlglot.expressions.National": {"tf": 1}, "sqlglot.expressions.LoadData": {"tf": 1}, "sqlglot.expressions.Partition": {"tf": 1}, "sqlglot.expressions.Fetch": {"tf": 1}, "sqlglot.expressions.Group": {"tf": 1}, "sqlglot.expressions.Lambda": {"tf": 1}, "sqlglot.expressions.Limit": {"tf": 1}, "sqlglot.expressions.Literal": {"tf": 1}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Lateral": {"tf": 1}, "sqlglot.expressions.MatchRecognize": {"tf": 1}, "sqlglot.expressions.Final": {"tf": 1}, "sqlglot.expressions.Offset": {"tf": 1}, "sqlglot.expressions.Order": {"tf": 1}, "sqlglot.expressions.Cluster": {"tf": 1}, "sqlglot.expressions.Distribute": {"tf": 1}, "sqlglot.expressions.Sort": {"tf": 1}, "sqlglot.expressions.Ordered": {"tf": 1}, "sqlglot.expressions.Property": {"tf": 1}, "sqlglot.expressions.AlgorithmProperty": {"tf": 1}, "sqlglot.expressions.AutoIncrementProperty": {"tf": 1}, "sqlglot.expressions.BlockCompressionProperty": {"tf": 1}, "sqlglot.expressions.CharacterSetProperty": {"tf": 1}, "sqlglot.expressions.ChecksumProperty": {"tf": 1}, "sqlglot.expressions.CollateProperty": {"tf": 1}, "sqlglot.expressions.DataBlocksizeProperty": {"tf": 1}, "sqlglot.expressions.DefinerProperty": {"tf": 1}, "sqlglot.expressions.DistKeyProperty": {"tf": 1}, "sqlglot.expressions.DistStyleProperty": {"tf": 1}, "sqlglot.expressions.EngineProperty": {"tf": 1}, "sqlglot.expressions.ExecuteAsProperty": {"tf": 1}, "sqlglot.expressions.ExternalProperty": {"tf": 1}, "sqlglot.expressions.FallbackProperty": {"tf": 1}, "sqlglot.expressions.FileFormatProperty": {"tf": 1}, "sqlglot.expressions.FreespaceProperty": {"tf": 1}, "sqlglot.expressions.InputOutputFormat": {"tf": 1}, "sqlglot.expressions.IsolatedLoadingProperty": {"tf": 1}, "sqlglot.expressions.JournalProperty": {"tf": 1}, "sqlglot.expressions.LanguageProperty": {"tf": 1}, "sqlglot.expressions.DictProperty": {"tf": 1}, "sqlglot.expressions.DictSubProperty": {"tf": 1}, "sqlglot.expressions.DictRange": {"tf": 1}, "sqlglot.expressions.LikeProperty": {"tf": 1}, "sqlglot.expressions.LocationProperty": {"tf": 1}, "sqlglot.expressions.LockingProperty": {"tf": 1}, "sqlglot.expressions.LogProperty": {"tf": 1}, "sqlglot.expressions.MaterializedProperty": {"tf": 1}, "sqlglot.expressions.MergeBlockRatioProperty": {"tf": 1}, "sqlglot.expressions.NoPrimaryIndexProperty": {"tf": 1}, "sqlglot.expressions.OnCommitProperty": {"tf": 1}, "sqlglot.expressions.PartitionedByProperty": {"tf": 1}, "sqlglot.expressions.ReturnsProperty": {"tf": 1}, "sqlglot.expressions.RowFormatProperty": {"tf": 1}, "sqlglot.expressions.RowFormatDelimitedProperty": {"tf": 1}, "sqlglot.expressions.RowFormatSerdeProperty": {"tf": 1}, "sqlglot.expressions.SchemaCommentProperty": {"tf": 1}, "sqlglot.expressions.SerdeProperties": {"tf": 1}, "sqlglot.expressions.SetProperty": {"tf": 1}, "sqlglot.expressions.SettingsProperty": {"tf": 1}, "sqlglot.expressions.SortKeyProperty": {"tf": 1}, "sqlglot.expressions.SqlSecurityProperty": {"tf": 1}, "sqlglot.expressions.StabilityProperty": {"tf": 1}, "sqlglot.expressions.TemporaryProperty": {"tf": 1}, "sqlglot.expressions.TransientProperty": {"tf": 1}, "sqlglot.expressions.VolatileProperty": {"tf": 1}, "sqlglot.expressions.WithDataProperty": {"tf": 1}, "sqlglot.expressions.WithJournalTableProperty": {"tf": 1}, "sqlglot.expressions.Properties": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.expressions.Qualify": {"tf": 1}, "sqlglot.expressions.Return": {"tf": 1}, "sqlglot.expressions.Reference": {"tf": 1}, "sqlglot.expressions.Tuple": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.Subqueryable": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Table": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.SystemTime": {"tf": 1}, "sqlglot.expressions.Union": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Except": {"tf": 1}, "sqlglot.expressions.Intersect": {"tf": 1}, "sqlglot.expressions.Unnest": {"tf": 1}, "sqlglot.expressions.Update": {"tf": 1}, "sqlglot.expressions.Values": {"tf": 1}, "sqlglot.expressions.Var": {"tf": 1}, "sqlglot.expressions.Schema": {"tf": 1}, "sqlglot.expressions.Lock": {"tf": 1}, "sqlglot.expressions.Select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.TableSample": {"tf": 1}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Pivot": {"tf": 1}, "sqlglot.expressions.Window": {"tf": 1}, "sqlglot.expressions.WindowSpec": {"tf": 1}, "sqlglot.expressions.Where": {"tf": 1}, "sqlglot.expressions.Star": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Parameter": {"tf": 1}, "sqlglot.expressions.SessionParameter": {"tf": 1}, "sqlglot.expressions.Placeholder": {"tf": 1}, "sqlglot.expressions.Null": {"tf": 1}, "sqlglot.expressions.Boolean": {"tf": 1}, "sqlglot.expressions.DataTypeSize": {"tf": 1}, "sqlglot.expressions.DataType": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INET": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.PseudoType": {"tf": 1}, "sqlglot.expressions.SubqueryPredicate": {"tf": 1}, "sqlglot.expressions.All": {"tf": 1}, "sqlglot.expressions.Any": {"tf": 1}, "sqlglot.expressions.Exists": {"tf": 1}, "sqlglot.expressions.Command": {"tf": 1}, "sqlglot.expressions.Transaction": {"tf": 1}, "sqlglot.expressions.Commit": {"tf": 1}, "sqlglot.expressions.Rollback": {"tf": 1}, "sqlglot.expressions.AlterTable": {"tf": 1}, "sqlglot.expressions.AddConstraint": {"tf": 1}, "sqlglot.expressions.DropPartition": {"tf": 1}, "sqlglot.expressions.Binary": {"tf": 1}, "sqlglot.expressions.Add": {"tf": 1}, "sqlglot.expressions.Connector": {"tf": 1}, "sqlglot.expressions.And": {"tf": 1}, "sqlglot.expressions.Or": {"tf": 1}, "sqlglot.expressions.BitwiseAnd": {"tf": 1}, "sqlglot.expressions.BitwiseLeftShift": {"tf": 1}, "sqlglot.expressions.BitwiseOr": {"tf": 1}, "sqlglot.expressions.BitwiseRightShift": {"tf": 1}, "sqlglot.expressions.BitwiseXor": {"tf": 1}, "sqlglot.expressions.Div": {"tf": 1}, "sqlglot.expressions.Overlaps": {"tf": 1}, "sqlglot.expressions.Dot": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.expressions.DPipe": {"tf": 1}, "sqlglot.expressions.EQ": {"tf": 1}, "sqlglot.expressions.NullSafeEQ": {"tf": 1}, "sqlglot.expressions.NullSafeNEQ": {"tf": 1}, "sqlglot.expressions.Distance": {"tf": 1}, "sqlglot.expressions.Escape": {"tf": 1}, "sqlglot.expressions.Glob": {"tf": 1}, "sqlglot.expressions.GT": {"tf": 1}, "sqlglot.expressions.GTE": {"tf": 1}, "sqlglot.expressions.ILike": {"tf": 1}, "sqlglot.expressions.ILikeAny": {"tf": 1}, "sqlglot.expressions.IntDiv": {"tf": 1}, "sqlglot.expressions.Is": {"tf": 1}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.Like": {"tf": 1}, "sqlglot.expressions.LikeAny": {"tf": 1}, "sqlglot.expressions.LT": {"tf": 1}, "sqlglot.expressions.LTE": {"tf": 1}, "sqlglot.expressions.Mod": {"tf": 1}, "sqlglot.expressions.Mul": {"tf": 1}, "sqlglot.expressions.NEQ": {"tf": 1}, "sqlglot.expressions.SimilarTo": {"tf": 1}, "sqlglot.expressions.Slice": {"tf": 1}, "sqlglot.expressions.Sub": {"tf": 1}, "sqlglot.expressions.ArrayOverlaps": {"tf": 1}, "sqlglot.expressions.Unary": {"tf": 1}, "sqlglot.expressions.BitwiseNot": {"tf": 1}, "sqlglot.expressions.Not": {"tf": 1}, "sqlglot.expressions.Paren": {"tf": 1}, "sqlglot.expressions.Neg": {"tf": 1}, "sqlglot.expressions.Alias": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Aliases": {"tf": 1}, "sqlglot.expressions.AtTimeZone": {"tf": 1}, "sqlglot.expressions.Between": {"tf": 1}, "sqlglot.expressions.Bracket": {"tf": 1}, "sqlglot.expressions.Distinct": {"tf": 1}, "sqlglot.expressions.In": {"tf": 1}, "sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}, "sqlglot.expressions.Interval": {"tf": 1}, "sqlglot.expressions.IgnoreNulls": {"tf": 1}, "sqlglot.expressions.RespectNulls": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.expressions.AggFunc": {"tf": 1}, "sqlglot.expressions.ParameterizedAgg": {"tf": 1}, "sqlglot.expressions.Abs": {"tf": 1}, "sqlglot.expressions.Anonymous": {"tf": 1}, "sqlglot.expressions.Hll": {"tf": 1}, "sqlglot.expressions.ApproxDistinct": {"tf": 1}, "sqlglot.expressions.Array": {"tf": 1}, "sqlglot.expressions.ToChar": {"tf": 1}, "sqlglot.expressions.GenerateSeries": {"tf": 1}, "sqlglot.expressions.ArrayAgg": {"tf": 1}, "sqlglot.expressions.ArrayAll": {"tf": 1}, "sqlglot.expressions.ArrayAny": {"tf": 1}, "sqlglot.expressions.ArrayConcat": {"tf": 1}, "sqlglot.expressions.ArrayContains": {"tf": 1}, "sqlglot.expressions.ArrayContained": {"tf": 1}, "sqlglot.expressions.ArrayFilter": {"tf": 1}, "sqlglot.expressions.ArrayJoin": {"tf": 1}, "sqlglot.expressions.ArraySize": {"tf": 1}, "sqlglot.expressions.ArraySort": {"tf": 1}, "sqlglot.expressions.ArraySum": {"tf": 1}, "sqlglot.expressions.ArrayUnionAgg": {"tf": 1}, "sqlglot.expressions.Avg": {"tf": 1}, "sqlglot.expressions.AnyValue": {"tf": 1}, "sqlglot.expressions.Case": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.Cast": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.expressions.CastToStrType": {"tf": 1}, "sqlglot.expressions.Collate": {"tf": 1}, "sqlglot.expressions.TryCast": {"tf": 1}, "sqlglot.expressions.Ceil": {"tf": 1}, "sqlglot.expressions.Coalesce": {"tf": 1}, "sqlglot.expressions.Concat": {"tf": 1}, "sqlglot.expressions.ConcatWs": {"tf": 1}, "sqlglot.expressions.Count": {"tf": 1}, "sqlglot.expressions.CountIf": {"tf": 1}, "sqlglot.expressions.CurrentDate": {"tf": 1}, "sqlglot.expressions.CurrentDatetime": {"tf": 1}, "sqlglot.expressions.CurrentTime": {"tf": 1}, "sqlglot.expressions.CurrentTimestamp": {"tf": 1}, "sqlglot.expressions.CurrentUser": {"tf": 1}, "sqlglot.expressions.DateAdd": {"tf": 1}, "sqlglot.expressions.DateSub": {"tf": 1}, "sqlglot.expressions.DateDiff": {"tf": 1}, "sqlglot.expressions.DateTrunc": {"tf": 1}, "sqlglot.expressions.DatetimeAdd": {"tf": 1}, "sqlglot.expressions.DatetimeSub": {"tf": 1}, "sqlglot.expressions.DatetimeDiff": {"tf": 1}, "sqlglot.expressions.DatetimeTrunc": {"tf": 1}, "sqlglot.expressions.DayOfWeek": {"tf": 1}, "sqlglot.expressions.DayOfMonth": {"tf": 1}, "sqlglot.expressions.DayOfYear": {"tf": 1}, "sqlglot.expressions.WeekOfYear": {"tf": 1}, "sqlglot.expressions.LastDateOfMonth": {"tf": 1}, "sqlglot.expressions.Extract": {"tf": 1}, "sqlglot.expressions.TimestampAdd": {"tf": 1}, "sqlglot.expressions.TimestampSub": {"tf": 1}, "sqlglot.expressions.TimestampDiff": {"tf": 1}, "sqlglot.expressions.TimestampTrunc": {"tf": 1}, "sqlglot.expressions.TimeAdd": {"tf": 1}, "sqlglot.expressions.TimeSub": {"tf": 1}, "sqlglot.expressions.TimeDiff": {"tf": 1}, "sqlglot.expressions.TimeTrunc": {"tf": 1}, "sqlglot.expressions.DateFromParts": {"tf": 1}, "sqlglot.expressions.DateStrToDate": {"tf": 1}, "sqlglot.expressions.DateToDateStr": {"tf": 1}, "sqlglot.expressions.DateToDi": {"tf": 1}, "sqlglot.expressions.Day": {"tf": 1}, "sqlglot.expressions.Decode": {"tf": 1}, "sqlglot.expressions.DiToDate": {"tf": 1}, "sqlglot.expressions.Encode": {"tf": 1}, "sqlglot.expressions.Exp": {"tf": 1}, "sqlglot.expressions.Explode": {"tf": 1}, "sqlglot.expressions.Floor": {"tf": 1}, "sqlglot.expressions.FromBase64": {"tf": 1}, "sqlglot.expressions.ToBase64": {"tf": 1}, "sqlglot.expressions.Greatest": {"tf": 1}, "sqlglot.expressions.GroupConcat": {"tf": 1}, "sqlglot.expressions.Hex": {"tf": 1}, "sqlglot.expressions.If": {"tf": 1}, "sqlglot.expressions.IfNull": {"tf": 1}, "sqlglot.expressions.Initcap": {"tf": 1}, "sqlglot.expressions.JSONKeyValue": {"tf": 1}, "sqlglot.expressions.JSONObject": {"tf": 1}, "sqlglot.expressions.OpenJSONColumnDef": {"tf": 1}, "sqlglot.expressions.OpenJSON": {"tf": 1}, "sqlglot.expressions.JSONBContains": {"tf": 1}, "sqlglot.expressions.JSONExtract": {"tf": 1}, "sqlglot.expressions.JSONExtractScalar": {"tf": 1}, "sqlglot.expressions.JSONBExtract": {"tf": 1}, "sqlglot.expressions.JSONBExtractScalar": {"tf": 1}, "sqlglot.expressions.JSONFormat": {"tf": 1}, "sqlglot.expressions.Least": {"tf": 1}, "sqlglot.expressions.Left": {"tf": 1}, "sqlglot.expressions.Right": {"tf": 1}, "sqlglot.expressions.Length": {"tf": 1}, "sqlglot.expressions.Levenshtein": {"tf": 1}, "sqlglot.expressions.Ln": {"tf": 1}, "sqlglot.expressions.Log": {"tf": 1}, "sqlglot.expressions.Log2": {"tf": 1}, "sqlglot.expressions.Log10": {"tf": 1}, "sqlglot.expressions.LogicalOr": {"tf": 1}, "sqlglot.expressions.LogicalAnd": {"tf": 1}, "sqlglot.expressions.Lower": {"tf": 1}, "sqlglot.expressions.Map": {"tf": 1}, "sqlglot.expressions.StarMap": {"tf": 1}, "sqlglot.expressions.VarMap": {"tf": 1}, "sqlglot.expressions.MatchAgainst": {"tf": 1}, "sqlglot.expressions.Max": {"tf": 1}, "sqlglot.expressions.MD5": {"tf": 1}, "sqlglot.expressions.Min": {"tf": 1}, "sqlglot.expressions.Month": {"tf": 1}, "sqlglot.expressions.Nvl2": {"tf": 1}, "sqlglot.expressions.Posexplode": {"tf": 1}, "sqlglot.expressions.Pow": {"tf": 1}, "sqlglot.expressions.PercentileCont": {"tf": 1}, "sqlglot.expressions.PercentileDisc": {"tf": 1}, "sqlglot.expressions.Quantile": {"tf": 1}, "sqlglot.expressions.ApproxQuantile": {"tf": 1}, "sqlglot.expressions.RangeN": {"tf": 1}, "sqlglot.expressions.ReadCSV": {"tf": 1}, "sqlglot.expressions.Reduce": {"tf": 1}, "sqlglot.expressions.RegexpExtract": {"tf": 1}, "sqlglot.expressions.RegexpLike": {"tf": 1}, "sqlglot.expressions.RegexpILike": {"tf": 1}, "sqlglot.expressions.RegexpSplit": {"tf": 1}, "sqlglot.expressions.Repeat": {"tf": 1}, "sqlglot.expressions.Round": {"tf": 1}, "sqlglot.expressions.RowNumber": {"tf": 1}, "sqlglot.expressions.SafeDivide": {"tf": 1}, "sqlglot.expressions.SetAgg": {"tf": 1}, "sqlglot.expressions.SHA": {"tf": 1}, "sqlglot.expressions.SHA2": {"tf": 1}, "sqlglot.expressions.SortArray": {"tf": 1}, "sqlglot.expressions.Split": {"tf": 1}, "sqlglot.expressions.Substring": {"tf": 1}, "sqlglot.expressions.StandardHash": {"tf": 1}, "sqlglot.expressions.StrPosition": {"tf": 1}, "sqlglot.expressions.StrToDate": {"tf": 1}, "sqlglot.expressions.StrToTime": {"tf": 1}, "sqlglot.expressions.StrToUnix": {"tf": 1}, "sqlglot.expressions.NumberToStr": {"tf": 1}, "sqlglot.expressions.Struct": {"tf": 1}, "sqlglot.expressions.StructExtract": {"tf": 1}, "sqlglot.expressions.Sum": {"tf": 1}, "sqlglot.expressions.Sqrt": {"tf": 1}, "sqlglot.expressions.Stddev": {"tf": 1}, "sqlglot.expressions.StddevPop": {"tf": 1}, "sqlglot.expressions.StddevSamp": {"tf": 1}, "sqlglot.expressions.TimeToStr": {"tf": 1}, "sqlglot.expressions.TimeToTimeStr": {"tf": 1}, "sqlglot.expressions.TimeToUnix": {"tf": 1}, "sqlglot.expressions.TimeStrToDate": {"tf": 1}, "sqlglot.expressions.TimeStrToTime": {"tf": 1}, "sqlglot.expressions.TimeStrToUnix": {"tf": 1}, "sqlglot.expressions.Trim": {"tf": 1}, "sqlglot.expressions.TsOrDsAdd": {"tf": 1}, "sqlglot.expressions.TsOrDsToDateStr": {"tf": 1}, "sqlglot.expressions.TsOrDsToDate": {"tf": 1}, "sqlglot.expressions.TsOrDiToDi": {"tf": 1}, "sqlglot.expressions.Unhex": {"tf": 1}, "sqlglot.expressions.UnixToStr": {"tf": 1}, "sqlglot.expressions.UnixToTime": {"tf": 1}, "sqlglot.expressions.UnixToTimeStr": {"tf": 1}, "sqlglot.expressions.Upper": {"tf": 1}, "sqlglot.expressions.Variance": {"tf": 1}, "sqlglot.expressions.VariancePop": {"tf": 1}, "sqlglot.expressions.Week": {"tf": 1}, "sqlglot.expressions.XMLTable": {"tf": 1}, "sqlglot.expressions.Year": {"tf": 1}, "sqlglot.expressions.Use": {"tf": 1}, "sqlglot.expressions.Merge": {"tf": 1}, "sqlglot.expressions.When": {"tf": 1}, "sqlglot.expressions.NextValueFor": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.expressions.false": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}}, "df": 648}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Explode": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}}, "df": 3}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.expand": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.expressions.Extract": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1}}, "df": 7}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ExternalProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.ExecuteError": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ExecuteAsProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.context.Context.eval": {"tf": 1}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 1}, "sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.executor.context.Context.filter": {"tf": 1}, "sqlglot.executor.context.Context.sort": {"tf": 1}, "sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.executor.env": {"tf": 1}, "sqlglot.executor.env.reverse_key": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.env.str_position": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1}, "sqlglot.executor.env.cast": {"tf": 1}, "sqlglot.executor.env.ordered": {"tf": 1}, "sqlglot.executor.env.interval": {"tf": 1}, "sqlglot.executor.python": {"tf": 1}, "sqlglot.executor.python.PythonExecutor": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.static": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.executor.python.Python": {"tf": 1}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.executor.table": {"tf": 1}, "sqlglot.executor.table.Table": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.append": {"tf": 1}, "sqlglot.executor.table.Table.pop": {"tf": 1}, "sqlglot.executor.table.TableIter": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}, "sqlglot.executor.table.RangeReader": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}}, "df": 59}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Exists": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}}, "df": 3}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}}, "df": 9}}}}, "d": {"docs": {"sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe.sql.Column.endswith": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {"sqlglot.executor.env": {"tf": 1}, "sqlglot.executor.env.reverse_key": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.env.str_position": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1}, "sqlglot.executor.env.cast": {"tf": 1}, "sqlglot.executor.env.ordered": {"tf": 1}, "sqlglot.executor.env.interval": {"tf": 1}}, "df": 10}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Encode": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.EncodeColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.EngineProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.errors": {"tf": 1}, "sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}, "sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.check_errors": {"tf": 1}}, "df": 18}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}}, "df": 5}}}}}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor.context.Context.eval": {"tf": 1}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}}, "df": 3}}}, "q": {"docs": {"sqlglot.expressions.Condition.eq": {"tf": 1}, "sqlglot.expressions.EQ": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}}, "df": 5}, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Escape": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}}, "df": 3}}}}}, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 10}}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.schema.Schema.empty": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.transforms.epoch_cast_to_ts": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {"sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}}, "df": 9}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Repeat": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1}, "sqlglot.dialects.redshift": {"tf": 1}, "sqlglot.dialects.redshift.Redshift": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.4142135623730951}}, "df": 9}}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Reduce": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}}, "df": 3, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.RenameTable": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1}}, "df": 3}}}}}}, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Return": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.expressions.ReturnsProperty": {"tf": 1}}, "df": 2}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Returning": {"tf": 1}, "sqlglot.generator.Generator.returning_sql": {"tf": 1}, "sqlglot.tokens.TokenType.RETURNING": {"tf": 1}}, "df": 4}}}}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1}}, "df": 9}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.env.reverse_key": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}}, "df": 2}}}}}, "f": {"docs": {"sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Reference": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.RespectNulls": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}}, "df": 5}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.parser.Parser.reset": {"tf": 1}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.expressions.ReadCSV": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.helper.csv_reader": {"tf": 1}}, "df": 1}}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.RegexpExtract": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.RegexpLike": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.RegexpILike": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.RegexpSplit": {"tf": 1}}, "df": 1}}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}}, "df": 1}}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}}, "df": 3}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.tokens.TokenType.ROWS": {"tf": 1}}, "df": 1, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}}, "df": 2}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.table.RowReader": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}}, "df": 2}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.RowFormatProperty": {"tf": 1}}, "df": 1}}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.RowFormatDelimitedProperty": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.RowFormatSerdeProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}}, "df": 2}}}}}}}, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.RowNumber": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.expressions.Rollback": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}}, "df": 4}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}}, "df": 4}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Round": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.parser.binary_range_parser": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}}, "df": 3, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}}, "df": 2}}}}}}}, "n": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.expressions.RangeN": {"tf": 1}}, "df": 2}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.table.RangeReader": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}}, "df": 2}}}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 3}}}, "w": {"docs": {"sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.RawString": {"tf": 1}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1}}, "df": 2}}}}}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.expressions.Right": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}}, "df": 3}}}}}, "l": {"docs": {"sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.expressions.Limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}}, "df": 9}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.expressions.Literal": {"tf": 1}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.optimizer.simplify.date_literal": {"tf": 1}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1}}, "df": 8, "s": {"docs": {"sqlglot.optimizer.simplify.simplify_literals": {"tf": 1}}, "df": 1}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.expressions.Condition.like": {"tf": 1}, "sqlglot.expressions.Like": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.parser.parse_like": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1}}, "df": 7, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LikeProperty": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LikeAny": {"tf": 1}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1}}, "df": 2}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.lineage": {"tf": 1}, "sqlglot.lineage.Node": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}}, "df": 8, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}}, "df": 2}}}}}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}}, "df": 2, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.LastDateOfMonth": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.expressions.Lambda": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}}, "df": 3}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Lateral": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}}, "df": 4}}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LanguageProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "w": {"docs": {"sqlglot.optimizer.normalize.distributive_law": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}}, "df": 3}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}}, "df": 9, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LocationProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "k": {"docs": {"sqlglot.expressions.Lock": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LOCK": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LockingProperty": {"tf": 1}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD": {"tf": 1}}, "df": 3, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.expressions.LoadData": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}}, "df": 2}}}}}}, "g": {"1": {"0": {"docs": {"sqlglot.expressions.Log10": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "2": {"docs": {"sqlglot.expressions.Log2": {"tf": 1}}, "df": 1}, "docs": {"sqlglot.expressions.Log": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LogProperty": {"tf": 1}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.LogicalOr": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.LogicalAnd": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}}, "df": 2}}}}}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Lower": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.expressions.Left": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}}, "df": 3}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.expressions.Least": {"tf": 1}}, "df": 3}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Length": {"tf": 1}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Levenshtein": {"tf": 1}}, "df": 1}}}}}}}}}}, "t": {"docs": {"sqlglot.expressions.LT": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1}, "sqlglot.tokens.TokenType.LT_AT": {"tf": 1}}, "df": 4, "e": {"docs": {"sqlglot.expressions.LTE": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}}, "df": 3}}, "n": {"docs": {"sqlglot.expressions.Ln": {"tf": 1}}, "df": 1}, "r": {"docs": {"sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}}, "df": 1}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.expressions.Hint": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}}, "df": 1}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1}, "sqlglot.dialects.hive": {"tf": 1}, "sqlglot.dialects.hive.Hive": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1.4142135623730951}}, "df": 10}}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.expressions.Having": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}}, "df": 6}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}}, "df": 3}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.Hex": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.HexString": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}}, "df": 2}}}}}}}, "l": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.helper": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}}, "df": 23}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Hll": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}}, "df": 2}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}}, "df": 2}}}}}, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.lineage.Node.to_html": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}}, "df": 2}}}}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.expressions.Merge": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.optimizer.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}}, "df": 9, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.MergeTreeTTL": {"tf": 1}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.MergeTreeTTLAction": {"tf": 1}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.MergeBlockRatioProperty": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}}, "df": 2}}}}}}}}}, "a": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.expressions.Max": {"tf": 1}}, "df": 3}, "p": {"docs": {"sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.Map": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}}, "df": 5, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}}, "df": 7}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.MatchRecognize": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.MatchAgainst": {"tf": 1}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.MaterializedProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "y": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.expressions.Min": {"tf": 1}}, "df": 3}}, "o": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.expressions.Mod": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}}, "df": 4, "e": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}}, "df": 5}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}}, "df": 2}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Month": {"tf": 1}}, "df": 1}}}}, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1}, "sqlglot.dialects.mysql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1.4142135623730951}}, "df": 7}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Mul": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}}, "df": 2}}, "d": {"5": {"docs": {"sqlglot.expressions.MD5": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.Binary": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.parser.binary_range_parser": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}}, "df": 7}}}}, "g": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.bigquery": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1}}, "df": 14}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1}}, "df": 2}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}}, "df": 2}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}}, "df": 2}}}}}}}, "t": {"docs": {"sqlglot.expressions.DataType.Type.BIT": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BIT": {"tf": 1}}, "df": 3, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.BitString": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}}, "df": 2}}}}}}, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.BitwiseAnd": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.BitwiseLeftShift": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.BitwiseOr": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}}, "df": 2}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.BitwiseRightShift": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}}, "df": 2}}}}}}}}}}, "x": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.BitwiseXor": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}}, "df": 2}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.BitwiseNot": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Between": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}}, "df": 6}}}}}, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.BEGIN": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.bfs": {"tf": 1}}, "df": 1}}, "y": {"docs": {"sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}}, "df": 8, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.ByteString": {"tf": 1}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.BlockCompressionProperty": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Boolean": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}}, "df": 6}}}}}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}}, "df": 3}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Bracket": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}}, "df": 4}}}, "e": {"docs": {"sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.optimizer.scope.Scope.branch": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.tokens.TokenType.BREAK": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}}, "df": 14, "s": {"docs": {"sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1}}, "df": 7}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.National": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1}}, "df": 3}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.NATURAL": {"tf": 1}}, "df": 1}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.naked_property": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Null": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator.null_sql": {"tf": 1}, "sqlglot.optimizer.simplify.is_null": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}}, "df": 7, "s": {"docs": {"sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}}, "df": 5, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "q": {"docs": {"sqlglot.expressions.NullSafeEQ": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "q": {"docs": {"sqlglot.expressions.NullSafeNEQ": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}}, "df": 2}}}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}}, "df": 2}}}}}}, "m": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}}, "df": 4, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.NumberToStr": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1}}, "df": 2}}}}}}}}}}}}, "o": {"docs": {"sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}}, "df": 10, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.generator.Generator.normalize_func": {"tf": 1}, "sqlglot.optimizer.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalized": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}}, "df": 10, "d": {"docs": {"sqlglot.optimizer.normalize.normalized": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 1}}}}}}}}}}}, "t": {"docs": {"sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Not": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}}, "df": 6, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.NotNullColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.NoPrimaryIndexProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.lineage.Node": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 3}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}}, "df": 4}}}}, "q": {"docs": {"sqlglot.expressions.Condition.neq": {"tf": 1}, "sqlglot.expressions.NEQ": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}}, "df": 4}, "g": {"docs": {"sqlglot.expressions.Neg": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}}, "df": 2}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}}, "df": 2, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.NextValueFor": {"tf": 1}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}}, "df": 2}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}}, "df": 2}}}}}}, "l": {"2": {"docs": {"sqlglot.expressions.Nvl2": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.expressions.Var": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 7, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}}, "df": 2}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}}, "df": 2}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Variance": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.VariancePop": {"tf": 1}}, "df": 1}}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.VarMap": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.expressions.Values": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}}, "df": 5}}}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 2}}}}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.VolatileProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.VIEW": {"tf": 1}}, "df": 1}}}}, "x": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}}, "df": 2, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.expressions.XMLTable": {"tf": 1}}, "df": 2}}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.diff.Keep": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.tokens.TokenType.KEEP": {"tf": 1}}, "df": 3}}, "y": {"docs": {"sqlglot.executor.env.reverse_key": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}}, "df": 4}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}}, "df": 2}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Qualify": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.optimizer.qualify": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 18}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Quantile": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.query_modifiers": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}}, "df": 2}}}}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Year": {"tf": 1}}, "df": 1}}}}}}, "annotation": {"root": {"docs": {"sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.empty": {"tf": 1}}, "df": 23, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.schema.Schema.empty": {"tf": 1}}, "df": 9}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}}, "df": 9}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.parent_select": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.depth": {"tf": 1}}, "df": 1}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}}, "df": 2}}}}}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.parent_select": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}}, "df": 3}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}}, "df": 2}}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.schema.Schema.supported_table_args": {"tf": 1}}, "df": 1}}}}}}}}}}}, "default_value": {"root": {"1": {"docs": {"sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}}, "df": 1}, "2": {"docs": {"sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}}, "df": 1}, "3": {"docs": {"sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}}, "df": 1}, "4": {"docs": {"sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}}, "df": 1}, "5": {"docs": {"sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}}, "df": 1}, "6": {"docs": {"sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}}, "df": 1}, "docs": {"sqlglot.schema": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BIT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INET": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT128": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT256": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UINT128": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UINT256": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DOT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.STAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LTE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GTE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NOT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.EQ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AND": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CARET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LT_AT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DAMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATABASE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT128": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT256": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JSON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UUID": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.XML": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ANY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ASC": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CASE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DESC": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DIV": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DROP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.END": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FOR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FROM": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IF": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INNER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTO": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.KEEP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LOAD": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LOCK": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MAP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MOD": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OVER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PRAGMA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RETURNING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SETTINGS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SOME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TOP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.THEN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.USE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.USING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1.4142135623730951}}, "df": 402, "f": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.pretty": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1.4142135623730951}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.FARROW": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1.4142135623730951}}, "df": 2}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.FETCH": {"tf": 1.4142135623730951}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.FILTER": {"tf": 1.4142135623730951}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.FINAL": {"tf": 1.4142135623730951}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.FIRST": {"tf": 1.4142135623730951}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.FOR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1.4142135623730951}}, "df": 2, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1.4142135623730951}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.FORMAT": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.tokens.TokenType.FROM": {"tf": 1.4142135623730951}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.FULL": {"tf": 1.4142135623730951}}, "df": 1}}, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.FUNCTION": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "l": {"docs": {"sqlglot.tokens.TokenType.L_PAREN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1.4142135623730951}}, "df": 3, "t": {"docs": {"sqlglot.schema": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INET": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}, "sqlglot.tokens.TokenType.DASH": {"tf": 1}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1}, "sqlglot.tokens.TokenType.COLON": {"tf": 1}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}, "sqlglot.tokens.TokenType.AMP": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1}, "sqlglot.tokens.TokenType.CARET": {"tf": 1}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LT_AT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.DAMP": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.DATABASE": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BIT": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}, "sqlglot.tokens.TokenType.UINT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT128": {"tf": 1}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1}, "sqlglot.tokens.TokenType.INT256": {"tf": 1}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}, "sqlglot.tokens.TokenType.INET": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}, "sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.tokens.TokenType.FULL": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}, "sqlglot.tokens.TokenType.INNER": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1}, "sqlglot.tokens.TokenType.KEEP": {"tf": 1}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD": {"tf": 1}, "sqlglot.tokens.TokenType.LOCK": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}, "sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}, "sqlglot.tokens.TokenType.PRAGMA": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}, "sqlglot.tokens.TokenType.RETURNING": {"tf": 1}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}, "sqlglot.tokens.TokenType.SETTINGS": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.tokens.TokenType.SOME": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}, "sqlglot.tokens.TokenType.TOP": {"tf": 1}, "sqlglot.tokens.TokenType.THEN": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}}, "df": 402, "e": {"docs": {"sqlglot.tokens.TokenType.LTE": {"tf": 1.4142135623730951}}, "df": 1}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}}, "df": 8}}}}}, "k": {"docs": {"sqlglot.tokens.TokenType.LOCK": {"tf": 1.4142135623730951}}, "df": 1}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1.4142135623730951}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.LOAD": {"tf": 1.4142135623730951}}, "df": 1}}}, "r": {"docs": {"sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1.4142135623730951}}, "df": 1}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.LATERAL": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.LEFT": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.LIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1.4142135623730951}}, "df": 2}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.LIMIT": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.schema": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1.4142135623730951}}, "df": 3}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}}, "df": 6}}}}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"2": {"docs": {"sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {"sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1.4142135623730951}}, "df": 1}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.SPACE": {"tf": 1.4142135623730951}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.STAR": {"tf": 1.4142135623730951}}, "df": 1, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "t": {"docs": {"sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1.4142135623730951}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1.4142135623730951}}, "df": 2}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1.4142135623730951}}, "df": 6}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1.4142135623730951}}, "df": 2}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1.4142135623730951}}, "df": 1}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.tokens.TokenType.SEMI": {"tf": 1.4142135623730951}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "t": {"docs": {"sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SET": {"tf": 1.4142135623730951}}, "df": 2, "s": {"docs": {"sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1.4142135623730951}}, "df": 1}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.SETTINGS": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.SELECT": {"tf": 1.4142135623730951}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1.4142135623730951}}, "df": 2}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.SUPER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1.4142135623730951}}, "df": 2}}}, "b": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.SLASH": {"tf": 1.4142135623730951}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.SHOW": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.SOME": {"tf": 1.4142135623730951}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.DataType.Type.MAP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MAP": {"tf": 1.4142135623730951}}, "df": 2, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema": {"tf": 1}}, "df": 1}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1.4142135623730951}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1.4142135623730951}}, "df": 1}}}}, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1.4142135623730951}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1.4142135623730951}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.MERGE": {"tf": 1.4142135623730951}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.MONEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1.4142135623730951}}, "df": 2}}}, "d": {"docs": {"sqlglot.tokens.TokenType.MOD": {"tf": 1.4142135623730951}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.schema": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1.4142135623730951}}, "df": 3}}}}}, "r": {"docs": {"sqlglot.tokens.TokenType.OR": {"tf": 1.4142135623730951}}, "df": 1, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1.4142135623730951}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1.4142135623730951}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.ORDERED": {"tf": 1.4142135623730951}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.OFFSET": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "n": {"docs": {"sqlglot.tokens.TokenType.ON": {"tf": 1.4142135623730951}}, "df": 1}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.OUTER": {"tf": 1.4142135623730951}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.OVER": {"tf": 1.4142135623730951}}, "df": 1, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1.4142135623730951}}, "df": 1}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "g": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.schema": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INET": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}, "sqlglot.tokens.TokenType.DASH": {"tf": 1}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1}, "sqlglot.tokens.TokenType.COLON": {"tf": 1}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}, "sqlglot.tokens.TokenType.AMP": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1}, "sqlglot.tokens.TokenType.CARET": {"tf": 1}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LT_AT": {"tf": 1}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.DAMP": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.DATABASE": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BIT": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}, "sqlglot.tokens.TokenType.UINT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT128": {"tf": 1}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1}, "sqlglot.tokens.TokenType.INT256": {"tf": 1}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}, "sqlglot.tokens.TokenType.INET": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}, "sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.tokens.TokenType.FULL": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}, "sqlglot.tokens.TokenType.INNER": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1}, "sqlglot.tokens.TokenType.KEEP": {"tf": 1}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD": {"tf": 1}, "sqlglot.tokens.TokenType.LOCK": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}, "sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}, "sqlglot.tokens.TokenType.PRAGMA": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}, "sqlglot.tokens.TokenType.RETURNING": {"tf": 1}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}, "sqlglot.tokens.TokenType.SETTINGS": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.tokens.TokenType.SOME": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}, "sqlglot.tokens.TokenType.TOP": {"tf": 1}, "sqlglot.tokens.TokenType.THEN": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}}, "df": 402, "e": {"docs": {"sqlglot.tokens.TokenType.GTE": {"tf": 1.4142135623730951}}, "df": 1}}, "e": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.tokens.TokenType.GLOB": {"tf": 1.4142135623730951}}, "df": 1, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.GLOBAL": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1.4142135623730951}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1}}, "df": 21}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.DISTINCT": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "v": {"docs": {"sqlglot.tokens.TokenType.DIV": {"tf": 1.4142135623730951}}, "df": 1}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.DATABASE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "e": {"docs": {"sqlglot.expressions.DataType.Type.DATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1.4142135623730951}}, "df": 3, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"6": {"4": {"docs": {"sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1.4142135623730951}}, "df": 2}, "docs": {}, "df": 0}, "docs": {"sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1.4142135623730951}}, "df": 3}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.DASH": {"tf": 1.4142135623730951}}, "df": 1}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.DARROW": {"tf": 1.4142135623730951}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.DAMP": {"tf": 1.4142135623730951}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1.4142135623730951}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.DROP": {"tf": 1.4142135623730951}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}}, "df": 1}}}}}, "f": {"docs": {"sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1.4142135623730951}}, "df": 1, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.DEFAULT": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.DELETE": {"tf": 1.4142135623730951}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.tokens.TokenType.DESC": {"tf": 1.4142135623730951}}, "df": 1, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1.4142135623730951}}, "df": 2}}}}, "t": {"docs": {"sqlglot.tokens.TokenType.DOT": {"tf": 1.4142135623730951}}, "df": 1}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.DOLLAR": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.DCOLON": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.DPIPE": {"tf": 1.4142135623730951}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "x": {"2": {"7": {"docs": {"sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BIT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INET": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT128": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT256": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UINT128": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UINT256": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DOT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.STAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LTE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GTE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NOT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.EQ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AND": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CARET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LT_AT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DAMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATABASE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT128": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT256": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JSON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UUID": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.XML": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ANY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ASC": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CASE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DESC": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DIV": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DROP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.END": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FOR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FROM": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IF": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INNER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTO": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.KEEP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LOAD": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LOCK": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MAP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MOD": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OVER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PRAGMA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RETURNING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SETTINGS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SOME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TOP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.THEN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.USE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.USING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1.4142135623730951}}, "df": 395}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.XML": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.XML": {"tf": 1.4142135623730951}}, "df": 2}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1.4142135623730951}}, "df": 2}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.BINARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1.4142135623730951}}, "df": 2}}}}, "t": {"docs": {"sqlglot.expressions.DataType.Type.BIT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIT": {"tf": 1.4142135623730951}}, "df": 3}}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1.4142135623730951}}, "df": 2}}}, "e": {"docs": {"sqlglot.tokens.TokenType.L_BRACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1.4142135623730951}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.tokens.TokenType.BREAK": {"tf": 1.4142135623730951}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1.4142135623730951}}, "df": 2}}}}, "y": {"docs": {"sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1.4142135623730951}}, "df": 3, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1.4142135623730951}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.BEGIN": {"tf": 1.4142135623730951}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.BETWEEN": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1.4142135623730951}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.CROSS": {"tf": 1.4142135623730951}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.CHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1.4142135623730951}}, "df": 2, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.tokens.TokenType.COMMA": {"tf": 1.4142135623730951}}, "df": 1, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.COMMAND": {"tf": 1.4142135623730951}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.COMMENT": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.COMMIT": {"tf": 1.4142135623730951}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.COLON": {"tf": 1.4142135623730951}}, "df": 1}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.COLUMN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1.4142135623730951}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.COLLATE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.CARET": {"tf": 1.4142135623730951}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.CACHE": {"tf": 1.4142135623730951}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.CASE": {"tf": 1.4142135623730951}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.CUBE": {"tf": 1.4142135623730951}}, "df": 1}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1.4142135623730951}}, "df": 5}}}}}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1.4142135623730951}}, "df": 1}}, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.HINT": {"tf": 1.4142135623730951}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.HASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1.4142135623730951}}, "df": 2}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.HAVING": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1.4142135623730951}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1.4142135623730951}}, "df": 7, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1.4142135623730951}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.tokens.TokenType.PRAGMA": {"tf": 1.4142135623730951}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.L_PAREN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1.4142135623730951}}, "df": 2}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.PARAMETER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.PARTITION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.PLUS": {"tf": 1.4142135623730951}}, "df": 1}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.PIPE": {"tf": 1.4142135623730951}}, "df": 1}}, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.PIVOT": {"tf": 1.4142135623730951}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.PERCENT": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "r": {"docs": {"sqlglot.tokens.TokenType.R_PAREN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1.4142135623730951}}, "df": 3, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.REPLACE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.RETURNING": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.REFERENCES": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ErrorLevel.RAISE": {"tf": 1.4142135623730951}}, "df": 1}}}, "w": {"docs": {"sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1.4142135623730951}}, "df": 1}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.RANGE": {"tf": 1.4142135623730951}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.ROW": {"tf": 1.4142135623730951}}, "df": 1, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}, "s": {"docs": {"sqlglot.tokens.TokenType.ROWS": {"tf": 1.4142135623730951}}, "df": 1}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1.4142135623730951}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.ROLLUP": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.RIGHT": {"tf": 1.4142135623730951}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.RLIKE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1.4142135623730951}}, "df": 3, "a": {"docs": {}, "df": 0, "u": {"docs": {"sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1.4142135623730951}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.TRUE": {"tf": 1.4142135623730951}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1.4142135623730951}}, "df": 1}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}, "t": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.TEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1.4142135623730951}}, "df": 2}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INET": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1.4142135623730951}}, "df": 76}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1.4142135623730951}}, "df": 3, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1.4142135623730951}}, "df": 3, "t": {"docs": {}, "df": 0, "z": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1.4142135623730951}}, "df": 2}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "z": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.tokens.TokenType.TILDA": {"tf": 1.4142135623730951}}, "df": 1}}}}, "o": {"docs": {"sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1.4142135623730951}}, "df": 1, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}, "sqlglot.tokens.TokenType.DASH": {"tf": 1}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1}, "sqlglot.tokens.TokenType.COLON": {"tf": 1}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}, "sqlglot.tokens.TokenType.AMP": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1}, "sqlglot.tokens.TokenType.CARET": {"tf": 1}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LT_AT": {"tf": 1}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.DAMP": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.DATABASE": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BIT": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}, "sqlglot.tokens.TokenType.UINT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT128": {"tf": 1}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1}, "sqlglot.tokens.TokenType.INT256": {"tf": 1}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}, "sqlglot.tokens.TokenType.INET": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}, "sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.tokens.TokenType.FULL": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}, "sqlglot.tokens.TokenType.INNER": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1}, "sqlglot.tokens.TokenType.KEEP": {"tf": 1}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD": {"tf": 1}, "sqlglot.tokens.TokenType.LOCK": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}, "sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}, "sqlglot.tokens.TokenType.PRAGMA": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}, "sqlglot.tokens.TokenType.RETURNING": {"tf": 1}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}, "sqlglot.tokens.TokenType.SETTINGS": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.tokens.TokenType.SOME": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}, "sqlglot.tokens.TokenType.TOP": {"tf": 1}, "sqlglot.tokens.TokenType.THEN": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}}, "df": 287}}}}}}}, "p": {"docs": {"sqlglot.tokens.TokenType.TOP": {"tf": 1.4142135623730951}}, "df": 1}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.THEN": {"tf": 1.4142135623730951}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}}, "df": 4}}}}}}}}}, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.EXCEPT": {"tf": 1.4142135623730951}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.EXECUTE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.EXISTS": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "q": {"docs": {"sqlglot.tokens.TokenType.EQ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1.4142135623730951}}, "df": 2}, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.END": {"tf": 1.4142135623730951}}, "df": 2}}, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.ELSE": {"tf": 1.4142135623730951}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.ESCAPE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1.4142135623730951}}, "df": 2}}}}, "n": {"docs": {"sqlglot.tokens.TokenType.IN": {"tf": 1.4142135623730951}}, "df": 1, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1.4142135623730951}}, "df": 2}}}, "t": {"1": {"2": {"8": {"docs": {"sqlglot.expressions.DataType.Type.INT128": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT128": {"tf": 1.4142135623730951}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"5": {"6": {"docs": {"sqlglot.expressions.DataType.Type.INT256": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT256": {"tf": 1.4142135623730951}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "4": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}}, "8": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}}, "docs": {"sqlglot.expressions.DataType.Type.INT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT": {"tf": 1.4142135623730951}}, "df": 2, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1.4142135623730951}}, "df": 2}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.INTERSECT": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "o": {"docs": {"sqlglot.tokens.TokenType.INTO": {"tf": 1.4142135623730951}}, "df": 1}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.INET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INET": {"tf": 1.4142135623730951}}, "df": 2}}, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.INNER": {"tf": 1.4142135623730951}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.INSERT": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "f": {"docs": {"sqlglot.tokens.TokenType.IF": {"tf": 1.4142135623730951}}, "df": 1}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.ILIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1.4142135623730951}}, "df": 2}}}}, "r": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.IRLIKE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "s": {"docs": {"sqlglot.tokens.TokenType.IS": {"tf": 1.4142135623730951}}, "df": 1, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.ISNULL": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.errors.ErrorLevel.WARN": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITH": {"tf": 1.4142135623730951}}, "df": 2}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.WINDOW": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.WHEN": {"tf": 1.4142135623730951}}, "df": 1}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.WHERE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1.4142135623730951}}, "df": 1}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.NATURAL": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.NUMBER": {"tf": 1.4142135623730951}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.NULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULL": {"tf": 1.4142135623730951}}, "df": 2, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1.4142135623730951}}, "df": 2}}}}, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1.4142135623730951}}, "df": 2}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.NOT": {"tf": 1.4142135623730951}}, "df": 1, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.NOTNULL": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "q": {"docs": {"sqlglot.tokens.TokenType.NEQ": {"tf": 1.4142135623730951}}, "df": 1}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.NEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1.4142135623730951}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1.4142135623730951}}, "df": 2}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.ALTER": {"tf": 1.4142135623730951}}, "df": 1}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.ALWAYS": {"tf": 1.4142135623730951}}, "df": 1}}}}, "l": {"docs": {"sqlglot.tokens.TokenType.ALL": {"tf": 1.4142135623730951}}, "df": 1}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1.4142135623730951}}, "df": 2}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1.4142135623730951}}, "df": 4}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.AND": {"tf": 1.4142135623730951}}, "df": 1}, "t": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.tokens.TokenType.ANTI": {"tf": 1.4142135623730951}}, "df": 1}}, "y": {"docs": {"sqlglot.tokens.TokenType.ANY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1.4142135623730951}}, "df": 3}}, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.AMP": {"tf": 1.4142135623730951}}, "df": 1}}, "t": {"docs": {"sqlglot.tokens.TokenType.LT_AT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1.4142135623730951}}, "df": 2}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.APPLY": {"tf": 1.4142135623730951}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.tokens.TokenType.ASC": {"tf": 1.4142135623730951}}, "df": 1}, "o": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.tokens.TokenType.ASOF": {"tf": 1.4142135623730951}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1.4142135623730951}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "k": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.UNIQUE": {"tf": 1.4142135623730951}}, "df": 1, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1.4142135623730951}}, "df": 2}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.UNCACHE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.UNNEST": {"tf": 1.4142135623730951}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"1": {"2": {"8": {"docs": {"sqlglot.expressions.DataType.Type.UINT128": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1.4142135623730951}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"5": {"6": {"docs": {"sqlglot.expressions.DataType.Type.UINT256": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1.4142135623730951}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {"sqlglot.expressions.DataType.Type.UINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UINT": {"tf": 1.4142135623730951}}, "df": 2}}}, "s": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}, "e": {"docs": {"sqlglot.tokens.TokenType.USE": {"tf": 1.4142135623730951}}, "df": 1, "r": {"docs": {"sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1.4142135623730951}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.USING": {"tf": 1.4142135623730951}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DataType.Type.UUID": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UUID": {"tf": 1.4142135623730951}}, "df": 2}}}, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.UPDATE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "j": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DataType.Type.JSON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JSON": {"tf": 1.4142135623730951}}, "df": 2, "b": {"docs": {"sqlglot.expressions.DataType.Type.JSONB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1.4142135623730951}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.JOIN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1.4142135623730951}}, "df": 2}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.VAR": {"tf": 1.4142135623730951}}, "df": 1, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1.4142135623730951}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"sqlglot.tokens.TokenType.VALUES": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.VIEW": {"tf": 1.4142135623730951}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.VOLATILE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1.4142135623730951}}, "df": 2}, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.KEEP": {"tf": 1.4142135623730951}}, "df": 1}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.QUALIFY": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.QUOTE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "signature": {"root": {"0": {"docs": {"sqlglot.diff.ChangeDistiller.__init__": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}}, "df": 6, "x": {"7": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"1": {"1": {"8": {"1": {"3": {"8": {"docs": {}, "df": 0, "e": {"5": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "a": {"docs": {}, "df": 0, "f": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}, "9": {"docs": {}, "df": 0, "f": {"3": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "e": {"docs": {}, "df": 0, "a": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}, "docs": {}, "df": 0, "a": {"5": {"6": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"5": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "4": {"9": {"8": {"7": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "f": {"3": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "e": {"1": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "docs": {}, "df": 0, "a": {"0": {"docs": {}, "df": 0, "e": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "2": {"9": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "b": {"docs": {}, "df": 0, "d": {"9": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "e": {"2": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}}, "8": {"docs": {}, "df": 0, "a": {"5": {"6": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}}, "docs": {}, "df": 0}}, "1": {"0": {"0": {"docs": {"sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "2": {"8": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "4": {"0": {"6": {"0": {"4": {"7": {"2": {"2": {"4": {"9": {"9": {"3": {"4": {"4": {"docs": {"sqlglot.dataframe.sql.Column.isin": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "5": {"5": {"6": {"9": {"9": {"2": {"docs": {"sqlglot.dataframe.sql.Column.between": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "6": {"1": {"6": {"5": {"2": {"8": {"docs": {"sqlglot.dataframe.sql.Column.between": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"2": {"1": {"6": {"0": {"docs": {"sqlglot.dataframe.sql.Column.over": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "7": {"1": {"8": {"7": {"6": {"8": {"docs": {"sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"6": {"4": {"4": {"3": {"2": {"docs": {"sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "3": {"0": {"6": {"0": {"4": {"8": {"0": {"docs": {"sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "1": {"7": {"0": {"5": {"1": {"2": {"docs": {"sqlglot.dataframe.sql.Window.orderBy": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "5": {"3": {"5": {"9": {"0": {"4": {"docs": {"sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"9": {"3": {"2": {"8": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "8": {"2": {"2": {"9": {"9": {"2": {"docs": {"sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "8": {"0": {"8": {"8": {"0": {"docs": {"sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"3": {"3": {"6": {"8": {"0": {"docs": {"sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "4": {"3": {"7": {"2": {"8": {"docs": {"sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "4": {"0": {"4": {"8": {"9": {"4": {"4": {"docs": {"sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"0": {"9": {"4": {"4": {"docs": {"sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "1": {"5": {"6": {"4": {"8": {"0": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "3": {"3": {"9": {"3": {"4": {"4": {"docs": {"sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "4": {"6": {"0": {"4": {"6": {"4": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"3": {"9": {"1": {"6": {"8": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "5": {"8": {"4": {"7": {"0": {"5": {"6": {"docs": {"sqlglot.dataframe.sql.Column.__init__": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "6": {"2": {"9": {"6": {"2": {"4": {"0": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "8": {"4": {"4": {"0": {"8": {"6": {"4": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"5": {"6": {"5": {"1": {"6": {"8": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}}, "df": 3}, "2": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}}, "df": 1}, "3": {"9": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 2.449489742783178}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 2}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.isin": {"tf": 2}, "sqlglot.dataframe.sql.Column.between": {"tf": 2}, "sqlglot.dataframe.sql.Column.over": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 2}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 2}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 2}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 2}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 2}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.__init__": {"tf": 2}, "sqlglot.generator.Generator.sep": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.seg": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.national_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.properties": {"tf": 2.449489742783178}, "sqlglot.generator.Generator.table_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 2}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.expressions": {"tf": 2}, "sqlglot.helper.csv": {"tf": 1.4142135623730951}, "sqlglot.lineage.Node.__init__": {"tf": 1.4142135623730951}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}}, "df": 42}, "docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 2}, "6": {"docs": {"sqlglot.diff.ChangeDistiller.__init__": {"tf": 1.4142135623730951}}, "df": 1}, "8": {"0": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot.parse": {"tf": 10.099504938362077}, "sqlglot.parse_one": {"tf": 12.727922061357855}, "sqlglot.transpile": {"tf": 14}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 6}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 12.806248474865697}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 6}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 14.2828568570857}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 6.4031242374328485}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 5.830951894845301}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 6.324555320336759}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 6.48074069840786}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 8.426149773176359}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 8.426149773176359}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 6.324555320336759}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 6.324555320336759}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 12.041594578792296}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 9.9498743710662}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 9.9498743710662}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 7.3484692283495345}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 7.3484692283495345}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 7.14142842854285}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 7.3484692283495345}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 7.3484692283495345}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 7.3484692283495345}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 5.656854249492381}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 10.677078252031311}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 10}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 13.19090595827292}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 7.937253933193772}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 5.0990195135927845}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 8.12403840463596}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 6}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 8}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 9.327379053088816}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 6}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 6.782329983125268}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 9.16515138991168}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 8.602325267042627}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 6.164414002968976}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 6.164414002968976}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 6.164414002968976}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 6.164414002968976}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 6.164414002968976}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 6.164414002968976}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 7.0710678118654755}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 7.54983443527075}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 8.831760866327848}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 9.797958971132712}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 8.54400374531753}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 8.246211251235321}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 8.246211251235321}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 6.48074069840786}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 5.656854249492381}, "sqlglot.dataframe.sql.Column.copy": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 6.6332495807108}, "sqlglot.dataframe.sql.Column.sql": {"tf": 4.242640687119285}, "sqlglot.dataframe.sql.Column.alias": {"tf": 6}, "sqlglot.dataframe.sql.Column.asc": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.desc": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.when": {"tf": 7.937253933193772}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 6}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.cast": {"tf": 6.6332495807108}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 8}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 8}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 6}, "sqlglot.dataframe.sql.Column.like": {"tf": 4.242640687119285}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 4.242640687119285}, "sqlglot.dataframe.sql.Column.substr": {"tf": 9.9498743710662}, "sqlglot.dataframe.sql.Column.isin": {"tf": 7.937253933193772}, "sqlglot.dataframe.sql.Column.between": {"tf": 8.660254037844387}, "sqlglot.dataframe.sql.Column.over": {"tf": 7.211102550927978}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 10.677078252031311}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 10.908712114635714}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 12.083045973594572}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 9}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 9}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 6.6332495807108}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 6.6332495807108}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 5.744562646538029}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 3.1622776601683795}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 4.242640687119285}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 9}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 9}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 6.6332495807108}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 6.6332495807108}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 6}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 10}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 5.830951894845301}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 4.795831523312719}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 6.557438524302}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 7.615773105863909}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 7.745966692414834}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 4.47213595499958}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 4.47213595499958}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 4.47213595499958}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 5.291502622129181}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 5.291502622129181}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 5.291502622129181}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 5.291502622129181}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 5.744562646538029}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 9.273618495495704}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 7.615773105863909}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 10.099504938362077}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 5.5677643628300215}, "sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 5.744562646538029}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 5.0990195135927845}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 5.0990195135927845}, "sqlglot.dialects.dialect.rename_func": {"tf": 6.928203230275509}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.if_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 7.416198487095663}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 7.416198487095663}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 8.48528137423857}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 8.660254037844387}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 8}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 6.164414002968976}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 6.164414002968976}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 4.898979485566356}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.min_or_least": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.trim_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 4}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 9.539392014169456}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 4.47213595499958}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 8}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 5.291502622129181}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 5.744562646538029}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 5.291502622129181}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 5.291502622129181}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 5.291502622129181}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 5.477225575051661}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 6.6332495807108}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.tableau.Tableau.Generator.coalesce_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 7.416198487095663}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 5.291502622129181}, "sqlglot.diff.Insert.__init__": {"tf": 4.47213595499958}, "sqlglot.diff.Remove.__init__": {"tf": 4.47213595499958}, "sqlglot.diff.Move.__init__": {"tf": 4.47213595499958}, "sqlglot.diff.Update.__init__": {"tf": 6.164414002968976}, "sqlglot.diff.Keep.__init__": {"tf": 6.164414002968976}, "sqlglot.diff.diff": {"tf": 12.96148139681572}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 5.656854249492381}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 12.727922061357855}, "sqlglot.errors.ParseError.__init__": {"tf": 6.48074069840786}, "sqlglot.errors.ParseError.new": {"tf": 13.038404810405298}, "sqlglot.errors.concat_messages": {"tf": 5.385164807134504}, "sqlglot.errors.merge_errors": {"tf": 6.48074069840786}, "sqlglot.executor.execute": {"tf": 12.727922061357855}, "sqlglot.executor.context.Context.__init__": {"tf": 7.416198487095663}, "sqlglot.executor.context.Context.eval": {"tf": 3.7416573867739413}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 3.7416573867739413}, "sqlglot.executor.context.Context.add_columns": {"tf": 4.69041575982343}, "sqlglot.executor.context.Context.table_iter": {"tf": 7.681145747868608}, "sqlglot.executor.context.Context.filter": {"tf": 4}, "sqlglot.executor.context.Context.sort": {"tf": 4}, "sqlglot.executor.context.Context.set_row": {"tf": 4.47213595499958}, "sqlglot.executor.context.Context.set_index": {"tf": 4.47213595499958}, "sqlglot.executor.context.Context.set_range": {"tf": 5.291502622129181}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 2.8284271247461903}, "sqlglot.executor.env.filter_nulls": {"tf": 4.242640687119285}, "sqlglot.executor.env.null_if_any": {"tf": 3.4641016151377544}, "sqlglot.executor.env.str_position": {"tf": 4.69041575982343}, "sqlglot.executor.env.substring": {"tf": 5.0990195135927845}, "sqlglot.executor.env.cast": {"tf": 3.7416573867739413}, "sqlglot.executor.env.ordered": {"tf": 4.242640687119285}, "sqlglot.executor.env.interval": {"tf": 3.7416573867739413}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 4.47213595499958}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 3.7416573867739413}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 3.7416573867739413}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 3.7416573867739413}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 3.7416573867739413}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 3.7416573867739413}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 4.242640687119285}, "sqlglot.executor.python.PythonExecutor.static": {"tf": 3.1622776601683795}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 3.7416573867739413}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 3.7416573867739413}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 4.242640687119285}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 4.795831523312719}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 4.69041575982343}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 4.242640687119285}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 4.242640687119285}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 4.242640687119285}, "sqlglot.executor.table.Table.__init__": {"tf": 4.898979485566356}, "sqlglot.executor.table.Table.add_columns": {"tf": 4.69041575982343}, "sqlglot.executor.table.Table.append": {"tf": 3.7416573867739413}, "sqlglot.executor.table.Table.pop": {"tf": 3.1622776601683795}, "sqlglot.executor.table.TableIter.__init__": {"tf": 2.8284271247461903}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 2.8284271247461903}, "sqlglot.executor.table.RowReader.__init__": {"tf": 4}, "sqlglot.executor.table.ensure_tables": {"tf": 5.744562646538029}, "sqlglot.expressions.Expression.__init__": {"tf": 3.7416573867739413}, "sqlglot.expressions.Expression.text": {"tf": 4}, "sqlglot.expressions.Expression.copy": {"tf": 3.1622776601683795}, "sqlglot.expressions.Expression.add_comments": {"tf": 5.291502622129181}, "sqlglot.expressions.Expression.append": {"tf": 5.291502622129181}, "sqlglot.expressions.Expression.set": {"tf": 5.291502622129181}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 5.744562646538029}, "sqlglot.expressions.Expression.find": {"tf": 7.0710678118654755}, "sqlglot.expressions.Expression.find_all": {"tf": 7.0710678118654755}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 6}, "sqlglot.expressions.Expression.root": {"tf": 4.47213595499958}, "sqlglot.expressions.Expression.walk": {"tf": 5.0990195135927845}, "sqlglot.expressions.Expression.dfs": {"tf": 5.830951894845301}, "sqlglot.expressions.Expression.bfs": {"tf": 4.242640687119285}, "sqlglot.expressions.Expression.unnest": {"tf": 3.1622776601683795}, "sqlglot.expressions.Expression.unalias": {"tf": 3.1622776601683795}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 3.1622776601683795}, "sqlglot.expressions.Expression.flatten": {"tf": 4.242640687119285}, "sqlglot.expressions.Expression.sql": {"tf": 9.055385138137417}, "sqlglot.expressions.Expression.transform": {"tf": 5.830951894845301}, "sqlglot.expressions.Expression.replace": {"tf": 3.7416573867739413}, "sqlglot.expressions.Expression.pop": {"tf": 4.47213595499958}, "sqlglot.expressions.Expression.assert_is": {"tf": 5.477225575051661}, "sqlglot.expressions.Expression.error_messages": {"tf": 6}, "sqlglot.expressions.Expression.dump": {"tf": 3.1622776601683795}, "sqlglot.expressions.Expression.load": {"tf": 3.7416573867739413}, "sqlglot.expressions.Condition.and_": {"tf": 11.789826122551595}, "sqlglot.expressions.Condition.or_": {"tf": 11.789826122551595}, "sqlglot.expressions.Condition.not_": {"tf": 4.898979485566356}, "sqlglot.expressions.Condition.as_": {"tf": 12.12435565298214}, "sqlglot.expressions.Condition.isin": {"tf": 9.591663046625438}, "sqlglot.expressions.Condition.between": {"tf": 7.810249675906654}, "sqlglot.expressions.Condition.is_": {"tf": 6.928203230275509}, "sqlglot.expressions.Condition.like": {"tf": 6.928203230275509}, "sqlglot.expressions.Condition.ilike": {"tf": 6.928203230275509}, "sqlglot.expressions.Condition.eq": {"tf": 5.291502622129181}, "sqlglot.expressions.Condition.neq": {"tf": 5.291502622129181}, "sqlglot.expressions.Condition.rlike": {"tf": 6.928203230275509}, "sqlglot.expressions.Unionable.union": {"tf": 11.489125293076057}, "sqlglot.expressions.Unionable.intersect": {"tf": 11.489125293076057}, "sqlglot.expressions.Unionable.except_": {"tf": 11.489125293076057}, "sqlglot.expressions.Column.to_dot": {"tf": 4.47213595499958}, "sqlglot.expressions.Delete.delete": {"tf": 11.489125293076057}, "sqlglot.expressions.Delete.where": {"tf": 12.409673645990857}, "sqlglot.expressions.Delete.returning": {"tf": 11.489125293076057}, "sqlglot.expressions.Insert.with_": {"tf": 13.96424004376894}, "sqlglot.expressions.Literal.number": {"tf": 4.898979485566356}, "sqlglot.expressions.Literal.string": {"tf": 4.898979485566356}, "sqlglot.expressions.Join.on": {"tf": 12.409673645990857}, "sqlglot.expressions.Join.using": {"tf": 12.409673645990857}, "sqlglot.expressions.Properties.from_dict": {"tf": 5.291502622129181}, "sqlglot.expressions.Tuple.isin": {"tf": 9.591663046625438}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 8.602325267042627}, "sqlglot.expressions.Subqueryable.limit": {"tf": 11.704699910719626}, "sqlglot.expressions.Subqueryable.with_": {"tf": 13.96424004376894}, "sqlglot.expressions.Union.limit": {"tf": 11.704699910719626}, "sqlglot.expressions.Union.select": {"tf": 12.409673645990857}, "sqlglot.expressions.Select.from_": {"tf": 11.489125293076057}, "sqlglot.expressions.Select.group_by": {"tf": 12.409673645990857}, "sqlglot.expressions.Select.order_by": {"tf": 12.409673645990857}, "sqlglot.expressions.Select.sort_by": {"tf": 12.409673645990857}, "sqlglot.expressions.Select.cluster_by": {"tf": 12.409673645990857}, "sqlglot.expressions.Select.limit": {"tf": 11.704699910719626}, "sqlglot.expressions.Select.offset": {"tf": 11.704699910719626}, "sqlglot.expressions.Select.select": {"tf": 12.409673645990857}, "sqlglot.expressions.Select.lateral": {"tf": 12.409673645990857}, "sqlglot.expressions.Select.join": {"tf": 17.46424919657298}, "sqlglot.expressions.Select.where": {"tf": 12.409673645990857}, "sqlglot.expressions.Select.having": {"tf": 12.409673645990857}, "sqlglot.expressions.Select.window": {"tf": 12.409673645990857}, "sqlglot.expressions.Select.qualify": {"tf": 12.409673645990857}, "sqlglot.expressions.Select.distinct": {"tf": 9.219544457292887}, "sqlglot.expressions.Select.ctas": {"tf": 12.328828005937952}, "sqlglot.expressions.Select.lock": {"tf": 7.14142842854285}, "sqlglot.expressions.Subquery.unnest": {"tf": 3.1622776601683795}, "sqlglot.expressions.DataType.build": {"tf": 11.357816691600547}, "sqlglot.expressions.DataType.is_type": {"tf": 7.3484692283495345}, "sqlglot.expressions.Dot.build": {"tf": 6.557438524302}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 3.1622776601683795}, "sqlglot.expressions.Func.from_arg_list": {"tf": 3.7416573867739413}, "sqlglot.expressions.Func.sql_names": {"tf": 3.1622776601683795}, "sqlglot.expressions.Func.sql_name": {"tf": 3.1622776601683795}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 3.1622776601683795}, "sqlglot.expressions.Case.when": {"tf": 9.848857801796104}, "sqlglot.expressions.Case.else_": {"tf": 8.366600265340756}, "sqlglot.expressions.Cast.is_type": {"tf": 7.3484692283495345}, "sqlglot.expressions.maybe_parse": {"tf": 14.866068747318506}, "sqlglot.expressions.union": {"tf": 12.409673645990857}, "sqlglot.expressions.intersect": {"tf": 12.409673645990857}, "sqlglot.expressions.except_": {"tf": 12.409673645990857}, "sqlglot.expressions.select": {"tf": 10.677078252031311}, "sqlglot.expressions.from_": {"tf": 10.583005244258363}, "sqlglot.expressions.update": {"tf": 13.892443989449804}, "sqlglot.expressions.delete": {"tf": 13.711309200802088}, "sqlglot.expressions.insert": {"tf": 14.594519519326424}, "sqlglot.expressions.condition": {"tf": 11.269427669584644}, "sqlglot.expressions.and_": {"tf": 11.575836902790225}, "sqlglot.expressions.or_": {"tf": 11.575836902790225}, "sqlglot.expressions.not_": {"tf": 11.269427669584644}, "sqlglot.expressions.paren": {"tf": 7.615773105863909}, "sqlglot.expressions.to_identifier": {"tf": 5.0990195135927845}, "sqlglot.expressions.to_interval": {"tf": 6.164414002968976}, "sqlglot.expressions.to_table": {"tf": 11.045361017187261}, "sqlglot.expressions.to_column": {"tf": 6.708203932499369}, "sqlglot.expressions.alias_": {"tf": 14.142135623730951}, "sqlglot.expressions.subquery": {"tf": 12.24744871391589}, "sqlglot.expressions.column": {"tf": 13.114877048604}, "sqlglot.expressions.cast": {"tf": 9.433981132056603}, "sqlglot.expressions.table_": {"tf": 13.114877048604}, "sqlglot.expressions.values": {"tf": 10.44030650891055}, "sqlglot.expressions.var": {"tf": 6.928203230275509}, "sqlglot.expressions.rename_table": {"tf": 7.745966692414834}, "sqlglot.expressions.convert": {"tf": 6.164414002968976}, "sqlglot.expressions.replace_children": {"tf": 6.928203230275509}, "sqlglot.expressions.column_table_names": {"tf": 5.385164807134504}, "sqlglot.expressions.table_name": {"tf": 5.385164807134504}, "sqlglot.expressions.replace_tables": {"tf": 6.164414002968976}, "sqlglot.expressions.replace_placeholders": {"tf": 6.855654600401044}, "sqlglot.expressions.expand": {"tf": 8.660254037844387}, "sqlglot.expressions.func": {"tf": 10.04987562112089}, "sqlglot.expressions.true": {"tf": 4.123105625617661}, "sqlglot.expressions.false": {"tf": 4.123105625617661}, "sqlglot.expressions.null": {"tf": 4.123105625617661}, "sqlglot.generator.Generator.__init__": {"tf": 17.52141546793523}, "sqlglot.generator.Generator.generate": {"tf": 7.937253933193772}, "sqlglot.generator.Generator.unsupported": {"tf": 4.47213595499958}, "sqlglot.generator.Generator.sep": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.seg": {"tf": 6}, "sqlglot.generator.Generator.pad_comment": {"tf": 4.47213595499958}, "sqlglot.generator.Generator.maybe_comment": {"tf": 8.54400374531753}, "sqlglot.generator.Generator.wrap": {"tf": 5.744562646538029}, "sqlglot.generator.Generator.no_identify": {"tf": 6.6332495807108}, "sqlglot.generator.Generator.normalize_func": {"tf": 4.47213595499958}, "sqlglot.generator.Generator.indent": {"tf": 9.327379053088816}, "sqlglot.generator.Generator.sql": {"tf": 8.94427190999916}, "sqlglot.generator.Generator.uncache_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.cache_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.characterset_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.column_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.columndef_sql": {"tf": 6.6332495807108}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 4.242640687119285}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 5.477225575051661}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.create_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.clone_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.describe_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 6}, "sqlglot.generator.Generator.with_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.cte_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.datatype_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.directory_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.delete_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.drop_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.except_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.except_op": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.fetch_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.filter_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.hint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.index_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.identifier_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.national_sql": {"tf": 6.6332495807108}, "sqlglot.generator.Generator.partition_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.properties_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.root_properties": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.properties": {"tf": 9.797958971132712}, "sqlglot.generator.Generator.with_properties": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.locate_properties": {"tf": 7.937253933193772}, "sqlglot.generator.Generator.property_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.insert_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.intersect_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.intersect_op": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.introducer_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.returning_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.table_sql": {"tf": 6.6332495807108}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 7.615773105863909}, "sqlglot.generator.Generator.pivot_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.tuple_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.update_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.values_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.var_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.into_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.from_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.group_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.having_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.join_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.lambda_sql": {"tf": 6.855654600401044}, "sqlglot.generator.Generator.lateral_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.limit_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.offset_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.setitem_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.set_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.pragma_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.lock_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.literal_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.null_sql": {"tf": 4.47213595499958}, "sqlglot.generator.Generator.boolean_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.order_sql": {"tf": 6.48074069840786}, "sqlglot.generator.Generator.cluster_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.distribute_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.sort_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.ordered_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.query_modifiers": {"tf": 6.164414002968976}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 5.744562646538029}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 5.744562646538029}, "sqlglot.generator.Generator.select_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.schema_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.star_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.parameter_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.subquery_sql": {"tf": 6.6332495807108}, "sqlglot.generator.Generator.qualify_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.union_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.union_op": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.unnest_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.where_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.window_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 6.557438524302}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.between_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.bracket_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.all_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.any_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.exists_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.case_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.constraint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.extract_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.trim_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.concat_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.check_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.if_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.openjson_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.in_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.interval_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.return_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.reference_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.paren_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.neg_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.not_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.alias_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.aliases_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.add_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.and_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.connector_sql": {"tf": 6}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.cast_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.collate_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.command_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.comment_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.transaction_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.commit_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.rollback_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.renametable_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.altertable_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.distinct_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.div_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.distance_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.dot_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.eq_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.escape_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.glob_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.gt_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.gte_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.ilike_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.is_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.like_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.likeany_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.similarto_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.lt_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.lte_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.mod_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.mul_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.neq_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.or_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.slice_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.sub_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.trycast_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.use_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.binary": {"tf": 6}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.func": {"tf": 7.483314773547883}, "sqlglot.generator.Generator.format_args": {"tf": 6.708203932499369}, "sqlglot.generator.Generator.text_width": {"tf": 4.47213595499958}, "sqlglot.generator.Generator.format_time": {"tf": 5.744562646538029}, "sqlglot.generator.Generator.expressions": {"tf": 12.165525060596439}, "sqlglot.generator.Generator.op_expressions": {"tf": 7.3484692283495345}, "sqlglot.generator.Generator.naked_property": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.set_operation": {"tf": 6}, "sqlglot.generator.Generator.tag_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.token_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.when_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.merge_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.tochar_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.cached_generator": {"tf": 7.3484692283495345}, "sqlglot.helper.seq_get": {"tf": 6.164414002968976}, "sqlglot.helper.ensure_list": {"tf": 3.1622776601683795}, "sqlglot.helper.ensure_collection": {"tf": 3.1622776601683795}, "sqlglot.helper.csv": {"tf": 5.830951894845301}, "sqlglot.helper.subclasses": {"tf": 9.38083151964686}, "sqlglot.helper.apply_index_offset": {"tf": 7.937253933193772}, "sqlglot.helper.camel_to_snake_case": {"tf": 4}, "sqlglot.helper.while_changing": {"tf": 7.483314773547883}, "sqlglot.helper.tsort": {"tf": 6.324555320336759}, "sqlglot.helper.open_file": {"tf": 3.872983346207417}, "sqlglot.helper.csv_reader": {"tf": 4.898979485566356}, "sqlglot.helper.find_new_name": {"tf": 5.385164807134504}, "sqlglot.helper.name_sequence": {"tf": 4.795831523312719}, "sqlglot.helper.object_to_dict": {"tf": 4.69041575982343}, "sqlglot.helper.split_num_words": {"tf": 7.615773105863909}, "sqlglot.helper.is_iterable": {"tf": 4}, "sqlglot.helper.flatten": {"tf": 6.082762530298219}, "sqlglot.helper.dict_depth": {"tf": 4}, "sqlglot.helper.first": {"tf": 5}, "sqlglot.helper.case_sensitive": {"tf": 8.54400374531753}, "sqlglot.helper.should_identify": {"tf": 9.643650760992955}, "sqlglot.lineage.Node.__init__": {"tf": 9.899494936611665}, "sqlglot.lineage.Node.walk": {"tf": 5}, "sqlglot.lineage.Node.to_html": {"tf": 5.0990195135927845}, "sqlglot.lineage.lineage": {"tf": 14.422205101855956}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 10.44030650891055}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 5.830951894845301}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 5.291502622129181}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 3.7416573867739413}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 5.744562646538029}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 5.656854249492381}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 5.656854249492381}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 5.744562646538029}, "sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 5.744562646538029}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 3.1622776601683795}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 3.1622776601683795}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 3.1622776601683795}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 3.1622776601683795}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 4.242640687119285}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 4.242640687119285}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 4.242640687119285}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 4.242640687119285}, "sqlglot.optimizer.normalize.normalize": {"tf": 7.280109889280518}, "sqlglot.optimizer.normalize.normalized": {"tf": 4.242640687119285}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 4.242640687119285}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 4.69041575982343}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 9.1104335791443}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 3.1622776601683795}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 3.1622776601683795}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 3.1622776601683795}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 3.7416573867739413}, "sqlglot.optimizer.optimizer.optimize": {"tf": 21.071307505705477}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 3.1622776601683795}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 4.242640687119285}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 4.242640687119285}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 4.242640687119285}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 4.242640687119285}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 3.7416573867739413}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 2.6457513110645907}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 5.0990195135927845}, "sqlglot.optimizer.qualify.qualify": {"tf": 16.822603841260722}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 9.486832980505138}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 3.1622776601683795}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 9.899494936611665}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 5.0990195135927845}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 5.744562646538029}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 4.69041575982343}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 9.433981132056603}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 8.06225774829855}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 3.1622776601683795}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 5.656854249492381}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 4.242640687119285}, "sqlglot.optimizer.scope.Scope.find": {"tf": 4.898979485566356}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 4.898979485566356}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 4.242640687119285}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 3.7416573867739413}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 4.242640687119285}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 4.242640687119285}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 3.7416573867739413}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 3.1622776601683795}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 3.1622776601683795}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 6.48074069840786}, "sqlglot.optimizer.scope.build_scope": {"tf": 6.48074069840786}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 4.242640687119285}, "sqlglot.optimizer.simplify.simplify": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 5.744562646538029}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.flatten": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 4.242640687119285}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 4.242640687119285}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 4.69041575982343}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 4.242640687119285}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 4.242640687119285}, "sqlglot.optimizer.simplify.simplify_parens": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.always_true": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.is_complement": {"tf": 3.7416573867739413}, "sqlglot.optimizer.simplify.is_false": {"tf": 4.898979485566356}, "sqlglot.optimizer.simplify.is_null": {"tf": 4.898979485566356}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 4.242640687119285}, "sqlglot.optimizer.simplify.extract_date": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.date_literal": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 3.1622776601683795}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 3.1622776601683795}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 4.242640687119285}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 4.69041575982343}, "sqlglot.parser.parse_var_map": {"tf": 6.082762530298219}, "sqlglot.parser.parse_like": {"tf": 4.898979485566356}, "sqlglot.parser.binary_range_parser": {"tf": 8.660254037844387}, "sqlglot.parser.Parser.__init__": {"tf": 11.269427669584644}, "sqlglot.parser.Parser.reset": {"tf": 3.1622776601683795}, "sqlglot.parser.Parser.parse": {"tf": 8.426149773176359}, "sqlglot.parser.Parser.parse_into": {"tf": 11.313708498984761}, "sqlglot.parser.Parser.check_errors": {"tf": 3.4641016151377544}, "sqlglot.parser.Parser.raise_error": {"tf": 6.855654600401044}, "sqlglot.parser.Parser.expression": {"tf": 7.810249675906654}, "sqlglot.parser.Parser.validate_expression": {"tf": 7.0710678118654755}, "sqlglot.planner.Plan.__init__": {"tf": 4.47213595499958}, "sqlglot.planner.Step.from_expression": {"tf": 8.602325267042627}, "sqlglot.planner.Step.add_dependency": {"tf": 5.291502622129181}, "sqlglot.planner.Step.to_s": {"tf": 5.0990195135927845}, "sqlglot.planner.Scan.from_expression": {"tf": 8.602325267042627}, "sqlglot.planner.Join.from_joins": {"tf": 8.888194417315589}, "sqlglot.planner.SetOperation.__init__": {"tf": 8.306623862918075}, "sqlglot.planner.SetOperation.from_expression": {"tf": 8.602325267042627}, "sqlglot.schema.Schema.add_table": {"tf": 12.36931687685298}, "sqlglot.schema.Schema.column_names": {"tf": 10.816653826391969}, "sqlglot.schema.Schema.get_column_type": {"tf": 11.045361017187261}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 4.795831523312719}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 5.744562646538029}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 8.48528137423857}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 7.810249675906654}, "sqlglot.schema.MappingSchema.__init__": {"tf": 11}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 6.164414002968976}, "sqlglot.schema.MappingSchema.copy": {"tf": 5.0990195135927845}, "sqlglot.schema.MappingSchema.add_table": {"tf": 12.36931687685298}, "sqlglot.schema.MappingSchema.column_names": {"tf": 10.816653826391969}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 11.045361017187261}, "sqlglot.schema.ensure_schema": {"tf": 7.681145747868608}, "sqlglot.schema.ensure_column_mapping": {"tf": 7.615773105863909}, "sqlglot.schema.flatten_schema": {"tf": 7.54983443527075}, "sqlglot.schema.nested_get": {"tf": 7.615773105863909}, "sqlglot.schema.nested_set": {"tf": 6.082762530298219}, "sqlglot.serde.dump": {"tf": 11.045361017187261}, "sqlglot.serde.load": {"tf": 11.045361017187261}, "sqlglot.time.format_time": {"tf": 7.810249675906654}, "sqlglot.tokens.Token.__init__": {"tf": 10.535653752852738}, "sqlglot.tokens.Token.number": {"tf": 5.291502622129181}, "sqlglot.tokens.Token.string": {"tf": 5.291502622129181}, "sqlglot.tokens.Token.identifier": {"tf": 5.291502622129181}, "sqlglot.tokens.Token.var": {"tf": 5.291502622129181}, "sqlglot.tokens.Tokenizer.reset": {"tf": 3.4641016151377544}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 5.744562646538029}, "sqlglot.transforms.unalias_group": {"tf": 5.744562646538029}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 5.744562646538029}, "sqlglot.transforms.eliminate_qualify": {"tf": 5.744562646538029}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 5.744562646538029}, "sqlglot.transforms.unnest_to_explode": {"tf": 5.744562646538029}, "sqlglot.transforms.explode_to_unnest": {"tf": 5.744562646538029}, "sqlglot.transforms.remove_target_from_merge": {"tf": 5.744562646538029}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 5.744562646538029}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 5.744562646538029}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 5.744562646538029}, "sqlglot.transforms.preprocess": {"tf": 8.774964387392123}, "sqlglot.trie.new_trie": {"tf": 6.708203932499369}, "sqlglot.trie.in_trie": {"tf": 6.244997998398398}}, "df": 782, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.generator.Generator.seg": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 108, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.parse": {"tf": 1.7320508075688772}, "sqlglot.parse_one": {"tf": 2.23606797749979}, "sqlglot.transpile": {"tf": 2.23606797749979}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1}, "sqlglot.dialects.dialect.rename_func": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.if_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.coalesce_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.Keep.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 3}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 3}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.executor.execute": {"tf": 2.23606797749979}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1.4142135623730951}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 2}, "sqlglot.expressions.Condition.or_": {"tf": 2}, "sqlglot.expressions.Condition.as_": {"tf": 2}, "sqlglot.expressions.Condition.isin": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.like": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.ilike": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.eq": {"tf": 1}, "sqlglot.expressions.Condition.neq": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 2}, "sqlglot.expressions.Unionable.intersect": {"tf": 2}, "sqlglot.expressions.Unionable.except_": {"tf": 2}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 2}, "sqlglot.expressions.Delete.where": {"tf": 2}, "sqlglot.expressions.Delete.returning": {"tf": 2}, "sqlglot.expressions.Insert.with_": {"tf": 2.23606797749979}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 2}, "sqlglot.expressions.Join.using": {"tf": 2}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.limit": {"tf": 2}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2.23606797749979}, "sqlglot.expressions.Union.limit": {"tf": 2}, "sqlglot.expressions.Union.select": {"tf": 2}, "sqlglot.expressions.Select.from_": {"tf": 2}, "sqlglot.expressions.Select.group_by": {"tf": 2}, "sqlglot.expressions.Select.order_by": {"tf": 2}, "sqlglot.expressions.Select.sort_by": {"tf": 2}, "sqlglot.expressions.Select.cluster_by": {"tf": 2}, "sqlglot.expressions.Select.limit": {"tf": 2}, "sqlglot.expressions.Select.offset": {"tf": 2}, "sqlglot.expressions.Select.select": {"tf": 2}, "sqlglot.expressions.Select.lateral": {"tf": 2}, "sqlglot.expressions.Select.join": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.where": {"tf": 2}, "sqlglot.expressions.Select.having": {"tf": 2}, "sqlglot.expressions.Select.window": {"tf": 2}, "sqlglot.expressions.Select.qualify": {"tf": 2}, "sqlglot.expressions.Select.distinct": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 2}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 2.23606797749979}, "sqlglot.expressions.DataType.is_type": {"tf": 1.4142135623730951}, "sqlglot.expressions.Dot.build": {"tf": 1.4142135623730951}, "sqlglot.expressions.Case.when": {"tf": 1.7320508075688772}, "sqlglot.expressions.Case.else_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Cast.is_type": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 2.449489742783178}, "sqlglot.expressions.union": {"tf": 2.23606797749979}, "sqlglot.expressions.intersect": {"tf": 2.23606797749979}, "sqlglot.expressions.except_": {"tf": 2.23606797749979}, "sqlglot.expressions.select": {"tf": 2}, "sqlglot.expressions.from_": {"tf": 2}, "sqlglot.expressions.update": {"tf": 2.449489742783178}, "sqlglot.expressions.delete": {"tf": 2.449489742783178}, "sqlglot.expressions.insert": {"tf": 2.449489742783178}, "sqlglot.expressions.condition": {"tf": 2}, "sqlglot.expressions.and_": {"tf": 2}, "sqlglot.expressions.or_": {"tf": 2}, "sqlglot.expressions.not_": {"tf": 2}, "sqlglot.expressions.paren": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_interval": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 2}, "sqlglot.expressions.to_column": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 2.23606797749979}, "sqlglot.expressions.subquery": {"tf": 2.23606797749979}, "sqlglot.expressions.column": {"tf": 2.23606797749979}, "sqlglot.expressions.cast": {"tf": 2}, "sqlglot.expressions.table_": {"tf": 2.23606797749979}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.var": {"tf": 1.4142135623730951}, "sqlglot.expressions.rename_table": {"tf": 1.7320508075688772}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1.7320508075688772}, "sqlglot.expressions.func": {"tf": 1.7320508075688772}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.expressions.false": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.generator.Generator.wrap": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.clone_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1}, "sqlglot.generator.Generator.returning_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1}, "sqlglot.generator.Generator.set_sql": {"tf": 1}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.concat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.comment_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}, "sqlglot.generator.Generator.naked_property": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1}, "sqlglot.generator.cached_generator": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1.4142135623730951}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1.4142135623730951}, "sqlglot.helper.should_identify": {"tf": 1.4142135623730951}, "sqlglot.lineage.Node.__init__": {"tf": 1.7320508075688772}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 2.6457513110645907}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.build_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.is_false": {"tf": 1}, "sqlglot.optimizer.simplify.is_null": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1.4142135623730951}, "sqlglot.parser.parse_like": {"tf": 1}, "sqlglot.parser.binary_range_parser": {"tf": 2}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 2}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.7320508075688772}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1.7320508075688772}, "sqlglot.planner.Join.from_joins": {"tf": 1.7320508075688772}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.7320508075688772}, "sqlglot.schema.Schema.add_table": {"tf": 2}, "sqlglot.schema.Schema.column_names": {"tf": 1.7320508075688772}, "sqlglot.schema.Schema.get_column_type": {"tf": 2.23606797749979}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 2}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 2.23606797749979}, "sqlglot.schema.ensure_schema": {"tf": 1.4142135623730951}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.4142135623730951}, "sqlglot.transforms.unnest_to_explode": {"tf": 1.4142135623730951}, "sqlglot.transforms.explode_to_unnest": {"tf": 1.4142135623730951}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1.4142135623730951}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1.4142135623730951}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1.4142135623730951}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 1.4142135623730951}, "sqlglot.transforms.preprocess": {"tf": 2}}, "df": 585}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {"sqlglot.generator.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}}, "df": 2}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.parse": {"tf": 1.4142135623730951}, "sqlglot.parse_one": {"tf": 2}, "sqlglot.transpile": {"tf": 2}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 2}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 2.23606797749979}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 2}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1}, "sqlglot.dialects.dialect.rename_func": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.coalesce_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1.4142135623730951}, "sqlglot.errors.ParseError.new": {"tf": 2.449489742783178}, "sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.add_comments": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.as_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Condition.like": {"tf": 1}, "sqlglot.expressions.Condition.ilike": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.window": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.qualify": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.build": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1.4142135623730951}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 2.23606797749979}, "sqlglot.expressions.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 2}, "sqlglot.expressions.delete": {"tf": 2}, "sqlglot.expressions.insert": {"tf": 2}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 2}, "sqlglot.expressions.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.column": {"tf": 2}, "sqlglot.expressions.cast": {"tf": 1.4142135623730951}, "sqlglot.expressions.table_": {"tf": 2}, "sqlglot.expressions.values": {"tf": 1.7320508075688772}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.unsupported": {"tf": 1}, "sqlglot.generator.Generator.sep": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.seg": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.pad_comment": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.wrap": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.no_identify": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.normalize_func": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.indent": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.clone_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 2}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1}, "sqlglot.generator.Generator.returning_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1}, "sqlglot.generator.Generator.set_sql": {"tf": 1}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.null_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.concat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.comment_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.format_args": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 2.23606797749979}, "sqlglot.generator.Generator.op_expressions": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.naked_property": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1}, "sqlglot.generator.cached_generator": {"tf": 1.4142135623730951}, "sqlglot.helper.csv": {"tf": 1.7320508075688772}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1.4142135623730951}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1.7320508075688772}, "sqlglot.helper.name_sequence": {"tf": 1.4142135623730951}, "sqlglot.helper.split_num_words": {"tf": 1.7320508075688772}, "sqlglot.helper.case_sensitive": {"tf": 1.4142135623730951}, "sqlglot.helper.should_identify": {"tf": 1.7320508075688772}, "sqlglot.lineage.Node.__init__": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 2.23606797749979}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1.7320508075688772}, "sqlglot.schema.Schema.column_names": {"tf": 1.7320508075688772}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_get": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_set": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}, "sqlglot.time.format_time": {"tf": 2}, "sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 515, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}}, "df": 2}}}}}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}}, "df": 3}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}}, "df": 4}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.star_sql": {"tf": 1}}, "df": 1, "t": {"docs": {"sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 2.6457513110645907}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 10, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.Column.substr": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.parser.parse_var_map": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Join.from_joins": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}}, "df": 12}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.coalesce_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.executor.context.Context.eval": {"tf": 1}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 1}, "sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.executor.context.Context.filter": {"tf": 1}, "sqlglot.executor.context.Context.sort": {"tf": 1}, "sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.static": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.append": {"tf": 1}, "sqlglot.executor.table.Table.pop": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.add_comments": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Condition.like": {"tf": 1}, "sqlglot.expressions.Condition.ilike": {"tf": 1}, "sqlglot.expressions.Condition.eq": {"tf": 1}, "sqlglot.expressions.Condition.neq": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.unsupported": {"tf": 1}, "sqlglot.generator.Generator.sep": {"tf": 1}, "sqlglot.generator.Generator.seg": {"tf": 1}, "sqlglot.generator.Generator.pad_comment": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.generator.Generator.wrap": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.generator.Generator.normalize_func": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.clone_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1}, "sqlglot.generator.Generator.returning_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1}, "sqlglot.generator.Generator.set_sql": {"tf": 1}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.null_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.concat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.comment_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.text_width": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}, "sqlglot.generator.Generator.naked_property": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.parser.Parser.reset": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 547}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1.4142135623730951}}, "df": 25, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}}, "df": 1}}}}}}}}, "q": {"docs": {"sqlglot.helper.seq_get": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 14}}}}}}, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}}, "df": 2, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1.4142135623730951}}, "df": 2}}, "p": {"docs": {"sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.sep": {"tf": 1}, "sqlglot.generator.Generator.seg": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 13}, "t": {"docs": {"sqlglot.generator.Generator.set_sql": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.generator.Generator.setitem_sql": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 2}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 2}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 2.23606797749979}, "sqlglot.schema.flatten_schema": {"tf": 1}}, "df": 18}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.build_scope": {"tf": 1.4142135623730951}}, "df": 9, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}}, "df": 1}}}}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}}, "df": 1}}}}}}, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}}, "df": 4, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}}, "df": 2}}}}}}}}}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.generator.Generator.sub_sql": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}}, "df": 7}}, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.env.str_position": {"tf": 1}}, "df": 1}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}}, "df": 3}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.generator.Generator.properties": {"tf": 1}}, "df": 1}}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}}, "df": 1}}}, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}}, "df": 10, "s": {"docs": {"sqlglot.expressions.expand": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}}, "df": 6}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.sort_sql": {"tf": 1}}, "df": 1}}}, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.generator.Generator.indent": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.generator.Generator.similarto_sql": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.slice_sql": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}}, "df": 5, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}}, "df": 3}}}}}}, "c": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.helper.csv_reader": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}}, "df": 2}}}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dataframe.sql.Column.rlike": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Condition.rlike": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.return_sql": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.delete": {"tf": 1}, "sqlglot.generator.Generator.returning_sql": {"tf": 1}}, "df": 2}}}}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}}, "df": 3}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.env.null_if_any": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}}, "df": 2}}}}}}}, "f": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}}, "df": 4, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.reference_sql": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {"sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.renametable_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}}, "df": 2, "n": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}}, "df": 1}}}}, "w": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 3, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.rawstring_sql": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 3}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.executor.table.Table.append": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.executor.table.Table.__init__": {"tf": 1}}, "df": 1}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.generator.Generator.rollback_sql": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 1}}, "df": 6}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}}, "df": 4}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.7320508075688772}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Condition.like": {"tf": 1}, "sqlglot.expressions.Condition.ilike": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.window": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.qualify": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1.4142135623730951}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 2}, "sqlglot.expressions.union": {"tf": 2}, "sqlglot.expressions.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.7320508075688772}, "sqlglot.expressions.delete": {"tf": 2}, "sqlglot.expressions.insert": {"tf": 2}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.column": {"tf": 1.7320508075688772}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1.7320508075688772}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}}, "df": 132, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}}, "df": 3}}}}}}, "t": {"docs": {"sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.executor.env.interval": {"tf": 1}}, "df": 2}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 7}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.uncache_sql": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}}, "df": 1, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe.sql.Column.between": {"tf": 1}}, "df": 1}}}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}}, "df": 6}}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Select.join": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.generator.Generator.use_sql": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "d": {"docs": {"sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}}, "df": 5, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.parse": {"tf": 2}, "sqlglot.parse_one": {"tf": 2}, "sqlglot.transpile": {"tf": 2.8284271247461903}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 2.6457513110645907}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 2.23606797749979}, "sqlglot.executor.execute": {"tf": 2}, "sqlglot.expressions.Expression.sql": {"tf": 2.23606797749979}, "sqlglot.expressions.Condition.and_": {"tf": 2.23606797749979}, "sqlglot.expressions.Condition.or_": {"tf": 2.23606797749979}, "sqlglot.expressions.Condition.as_": {"tf": 2.23606797749979}, "sqlglot.expressions.Unionable.union": {"tf": 2.23606797749979}, "sqlglot.expressions.Unionable.intersect": {"tf": 2.23606797749979}, "sqlglot.expressions.Unionable.except_": {"tf": 2.23606797749979}, "sqlglot.expressions.Delete.delete": {"tf": 2.23606797749979}, "sqlglot.expressions.Delete.where": {"tf": 2.23606797749979}, "sqlglot.expressions.Delete.returning": {"tf": 2.23606797749979}, "sqlglot.expressions.Insert.with_": {"tf": 2.23606797749979}, "sqlglot.expressions.Join.on": {"tf": 2.23606797749979}, "sqlglot.expressions.Join.using": {"tf": 2.23606797749979}, "sqlglot.expressions.Subqueryable.limit": {"tf": 2.23606797749979}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2.23606797749979}, "sqlglot.expressions.Union.limit": {"tf": 2.23606797749979}, "sqlglot.expressions.Union.select": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.from_": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.group_by": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.order_by": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.sort_by": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.cluster_by": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.limit": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.offset": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.select": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.lateral": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.join": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.where": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.having": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.window": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.qualify": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.ctas": {"tf": 2.23606797749979}, "sqlglot.expressions.DataType.build": {"tf": 2.23606797749979}, "sqlglot.expressions.maybe_parse": {"tf": 2.23606797749979}, "sqlglot.expressions.union": {"tf": 2.23606797749979}, "sqlglot.expressions.intersect": {"tf": 2.23606797749979}, "sqlglot.expressions.except_": {"tf": 2.23606797749979}, "sqlglot.expressions.select": {"tf": 2.23606797749979}, "sqlglot.expressions.from_": {"tf": 2.23606797749979}, "sqlglot.expressions.update": {"tf": 2.23606797749979}, "sqlglot.expressions.delete": {"tf": 2.23606797749979}, "sqlglot.expressions.insert": {"tf": 2.23606797749979}, "sqlglot.expressions.condition": {"tf": 2.23606797749979}, "sqlglot.expressions.and_": {"tf": 2.23606797749979}, "sqlglot.expressions.or_": {"tf": 2.23606797749979}, "sqlglot.expressions.not_": {"tf": 2.23606797749979}, "sqlglot.expressions.to_table": {"tf": 2.23606797749979}, "sqlglot.expressions.alias_": {"tf": 2.23606797749979}, "sqlglot.expressions.subquery": {"tf": 2.23606797749979}, "sqlglot.expressions.func": {"tf": 2.23606797749979}, "sqlglot.helper.case_sensitive": {"tf": 2.23606797749979}, "sqlglot.helper.should_identify": {"tf": 2.23606797749979}, "sqlglot.lineage.lineage": {"tf": 2.23606797749979}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 2.23606797749979}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 2.23606797749979}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 2.23606797749979}, "sqlglot.schema.Schema.add_table": {"tf": 2.23606797749979}, "sqlglot.schema.Schema.column_names": {"tf": 2.23606797749979}, "sqlglot.schema.Schema.get_column_type": {"tf": 2.23606797749979}, "sqlglot.schema.MappingSchema.__init__": {"tf": 2.23606797749979}, "sqlglot.schema.MappingSchema.add_table": {"tf": 2.23606797749979}, "sqlglot.schema.MappingSchema.column_names": {"tf": 2.23606797749979}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 2.23606797749979}}, "df": 74, "s": {"docs": {"sqlglot.parse": {"tf": 1.4142135623730951}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 2}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.sql": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.as_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.window": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.qualify": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.build": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.helper.case_sensitive": {"tf": 1.4142135623730951}, "sqlglot.helper.should_identify": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.4142135623730951}}, "df": 71}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.__init__": {"tf": 1.4142135623730951}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}, "sqlglot.generator.cached_generator": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1.4142135623730951}, "sqlglot.schema.flatten_schema": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1.4142135623730951}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1.4142135623730951}}, "df": 47, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.dictproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.dictrange_sql": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}}, "df": 9}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.distribute_sql": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 1}}, "df": 3}}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.diff.diff": {"tf": 2.23606797749979}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 2.23606797749979}}, "df": 2}}, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.directory_sql": {"tf": 1}}, "df": 1}}}}}}}, "v": {"docs": {"sqlglot.generator.Generator.div_sql": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}}, "df": 1, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.table": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 2.8284271247461903}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 2.449489742783178}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 2.449489742783178}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 2.449489742783178}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 2.449489742783178}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 2.449489742783178}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 2.23606797749979}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 2}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 2}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}}, "df": 89, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}}, "df": 3}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.cast": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.is_type": {"tf": 1.4142135623730951}, "sqlglot.expressions.Cast.is_type": {"tf": 1.4142135623730951}, "sqlglot.expressions.cast": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 14, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.datatypesize_sql": {"tf": 1}}, "df": 1}}}}}}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "e": {"docs": {"sqlglot.optimizer.simplify.date_literal": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}}, "df": 3}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}}, "df": 1}}}}}, "g": {"docs": {"sqlglot.helper.tsort": {"tf": 1}}, "df": 1}}, "f": {"docs": {"sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}}, "df": 4}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.executor.env.ordered": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}}, "df": 2}}, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.errors.ParseError.new": {"tf": 1}}, "df": 1}}}}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}}, "df": 5}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.planner.Step.add_dependency": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.schema.flatten_schema": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}}, "df": 3}, "w": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.lineage.Node.__init__": {"tf": 1}}, "df": 1}}}}}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.build": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}}, "df": 2}}}}}, "b": {"docs": {"sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 5}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.generator.Generator.drop_sql": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.droppartition_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.dpipe_sql": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalized": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 1}}, "df": 4}}}, "t": {"docs": {"sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1.4142135623730951}, "sqlglot.helper.tsort": {"tf": 1.7320508075688772}, "sqlglot.helper.first": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}}, "df": 5, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.7320508075688772}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.7320508075688772}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 2.23606797749979}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.parser.binary_range_parser": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 93, "s": {"docs": {"sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}}, "df": 11}}}}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.window": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.qualify": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 84}}, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}}, "df": 2}, "e": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 5}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}}, "df": 3}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.transforms.preprocess": {"tf": 1}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.__init__": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.table_iter": {"tf": 1.4142135623730951}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1.4142135623730951}, "sqlglot.expressions.rename_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.4142135623730951}}, "df": 26, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}}, "df": 3}}}}, "s": {"docs": {"sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 9, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 5}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.context.Context.table_iter": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.generator.Generator.tablealias_sql": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}}, "df": 4}}}}, "g": {"docs": {"sqlglot.generator.Generator.tag_sql": {"tf": 1}}, "df": 1}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.helper.find_new_name": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 15}}}}, "h": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor.env.str_position": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1}, "sqlglot.executor.env.cast": {"tf": 1}, "sqlglot.executor.env.ordered": {"tf": 1}, "sqlglot.executor.env.interval": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}}, "df": 6}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Case.when": {"tf": 1}}, "df": 1}}}, "o": {"docs": {"sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.executor.env.cast": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}}, "df": 6, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 11, "s": {"docs": {"sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 11}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 2}}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.tochar_sql": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1}}, "df": 2}}}}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {"sqlglot.generator.Generator.national_sql": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 2.6457513110645907}, "sqlglot.executor.execute": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.context.Context.filter": {"tf": 1}, "sqlglot.executor.context.Context.sort": {"tf": 1}, "sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.executor.env.str_position": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Table.__init__": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.expressions.Expression.add_comments": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.7320508075688772}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1.7320508075688772}, "sqlglot.expressions.delete": {"tf": 1.7320508075688772}, "sqlglot.expressions.insert": {"tf": 1.7320508075688772}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.column": {"tf": 2}, "sqlglot.expressions.table_": {"tf": 2}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 4.242640687119285}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.unsupported": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.indent": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1.7320508075688772}, "sqlglot.generator.cached_generator": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.7320508075688772}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1.7320508075688772}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 2}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1.7320508075688772}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 152, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.window": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.qualify": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1.7320508075688772}, "sqlglot.expressions.delete": {"tf": 1.7320508075688772}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.column": {"tf": 1.7320508075688772}, "sqlglot.expressions.table_": {"tf": 1.7320508075688772}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}}, "df": 97}}}}}}, "t": {"docs": {"sqlglot.expressions.not_": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}}, "df": 2, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}}, "df": 3}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1}}, "df": 8}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}, "sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.normalize_func": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 28}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.generator.Generator.national_sql": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}}, "df": 4}, "q": {"docs": {"sqlglot.expressions.Condition.neq": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}}, "df": 2}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 2, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1}}, "df": 1}}}}}}}}}}, "g": {"docs": {"sqlglot.generator.Generator.neg_sql": {"tf": 1}}, "df": 1}}, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}}, "df": 2}}}}}}}}}}, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}}, "df": 2}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.executor.env.ordered": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "q": {"docs": {"sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "q": {"docs": {"sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}}, "df": 7, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}}, "df": 64}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 2.6457513110645907}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.expressions.Expression.add_comments": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.indent": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1.7320508075688772}, "sqlglot.generator.cached_generator": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.parser.binary_range_parser": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.__init__": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1.4142135623730951}, "sqlglot.schema.flatten_schema": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 78}}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 2, "r": {"docs": {"sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}}, "df": 2}}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}}, "df": 2}}}}}}}, "n": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.openjson_sql": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Condition.like": {"tf": 1}, "sqlglot.expressions.Condition.ilike": {"tf": 1}, "sqlglot.expressions.Condition.eq": {"tf": 1}, "sqlglot.expressions.Condition.neq": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1}}, "df": 17}}}}, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 5, "s": {"docs": {"sqlglot.expressions.Select.distinct": {"tf": 1}}, "df": 1}, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}}, "df": 5}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.onconflict_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.generator.Generator.overlaps_sql": {"tf": 1}}, "df": 1}}}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 6}}}}}, "b": {"docs": {}, "df": 0, "j": {"docs": {"sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 4}}, "r": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}}, "df": 2, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.order_sql": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.ordered_sql": {"tf": 1}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}}, "df": 3}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1.4142135623730951}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.expressions.Expression.add_comments": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.helper.tsort": {"tf": 1.4142135623730951}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.parser.parse_like": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1.7320508075688772}, "sqlglot.serde.dump": {"tf": 1.7320508075688772}, "sqlglot.serde.load": {"tf": 1.7320508075688772}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 72}}, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}}, "df": 5, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.lineage.Node.to_html": {"tf": 1}}, "df": 1}}}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Condition.like": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}}, "df": 2, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.likeany_sql": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}}, "df": 4}}}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.limit_sql": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}}, "df": 5}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe.sql.Column.substr": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1}}, "df": 2}}}}, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}}, "df": 6}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}}, "df": 1}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}}, "df": 3}}}}, "t": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.between": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 3.7416573867739413}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}}, "df": 25, "e": {"docs": {"sqlglot.generator.Generator.lte_sql": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}}, "df": 3}}, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.generator.Generator.lambda_sql": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}}, "df": 2}}}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.expressions.Condition.between": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe.sql.Column.between": {"tf": 1}}, "df": 1}}}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.locate_properties": {"tf": 1}}, "df": 1}}}}}, "k": {"docs": {"sqlglot.generator.Generator.lock_sql": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.generator.Generator.loaddata_sql": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.pop": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.helper.while_changing": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1.4142135623730951}}, "df": 15, "x": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}}, "df": 3, "r": {"docs": {"sqlglot.parser.binary_range_parser": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 2}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.coalesce_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.Remove.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.Move.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.Update.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.Keep.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 2}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 2}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Condition.like": {"tf": 1}, "sqlglot.expressions.Condition.ilike": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1.4142135623730951}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 2.23606797749979}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.7320508075688772}, "sqlglot.expressions.insert": {"tf": 2}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.paren": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.cast": {"tf": 1.4142135623730951}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1.7320508075688772}, "sqlglot.expressions.expand": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.wrap": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.clone_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1}, "sqlglot.generator.Generator.returning_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1}, "sqlglot.generator.Generator.set_sql": {"tf": 1}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.concat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.comment_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.expressions": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.op_expressions": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.naked_property": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1}, "sqlglot.generator.cached_generator": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1.7320508075688772}, "sqlglot.lineage.Node.__init__": {"tf": 1.7320508075688772}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalized": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.build_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1}, "sqlglot.optimizer.simplify.flatten": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_parens": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.optimizer.simplify.always_true": {"tf": 1}, "sqlglot.optimizer.simplify.is_false": {"tf": 1}, "sqlglot.optimizer.simplify.is_null": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.parse_like": {"tf": 1}, "sqlglot.parser.binary_range_parser": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 2}, "sqlglot.parser.Parser.validate_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Plan.__init__": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.7320508075688772}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.7320508075688772}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.7320508075688772}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.7320508075688772}, "sqlglot.transforms.unnest_to_explode": {"tf": 1.7320508075688772}, "sqlglot.transforms.explode_to_unnest": {"tf": 1.7320508075688772}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1.7320508075688772}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1.7320508075688772}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1.7320508075688772}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 1.7320508075688772}, "sqlglot.transforms.preprocess": {"tf": 1.7320508075688772}}, "df": 470, "s": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.coalesce_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.Keep.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 2}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 2}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.as_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.isin": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.like": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.ilike": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.eq": {"tf": 1}, "sqlglot.expressions.Condition.neq": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.using": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.order_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.lateral": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.having": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.window": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.qualify": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.distinct": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.is_type": {"tf": 1.4142135623730951}, "sqlglot.expressions.Dot.build": {"tf": 1.7320508075688772}, "sqlglot.expressions.Case.when": {"tf": 1.7320508075688772}, "sqlglot.expressions.Case.else_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Cast.is_type": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 2}, "sqlglot.expressions.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 2}, "sqlglot.expressions.delete": {"tf": 2}, "sqlglot.expressions.insert": {"tf": 2}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.paren": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_interval": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_column": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.column": {"tf": 2.23606797749979}, "sqlglot.expressions.cast": {"tf": 2}, "sqlglot.expressions.table_": {"tf": 2.23606797749979}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.var": {"tf": 1.4142135623730951}, "sqlglot.expressions.rename_table": {"tf": 1.7320508075688772}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1.7320508075688772}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.expressions.false": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.generator.Generator.wrap": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.clone_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1}, "sqlglot.generator.Generator.returning_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1}, "sqlglot.generator.Generator.set_sql": {"tf": 1}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.concat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.comment_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}, "sqlglot.generator.Generator.naked_property": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1}, "sqlglot.generator.cached_generator": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.helper.while_changing": {"tf": 1.4142135623730951}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.is_false": {"tf": 1}, "sqlglot.optimizer.simplify.is_null": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1.4142135623730951}, "sqlglot.parser.parse_like": {"tf": 1}, "sqlglot.parser.binary_range_parser": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.7320508075688772}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.7320508075688772}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.4142135623730951}, "sqlglot.transforms.unnest_to_explode": {"tf": 1.4142135623730951}, "sqlglot.transforms.explode_to_unnest": {"tf": 1.4142135623730951}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1.4142135623730951}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1.4142135623730951}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1.4142135623730951}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 1.4142135623730951}, "sqlglot.transforms.preprocess": {"tf": 1.7320508075688772}}, "df": 469}}}}}}}, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1}}, "df": 1}}}, "s": {"docs": {"sqlglot.generator.Generator.exists_sql": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}}, "df": 5}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1.4142135623730951}, "sqlglot.executor.table.ensure_tables": {"tf": 1}}, "df": 4}}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.extract_sql": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1.4142135623730951}}, "df": 2, "s": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.__init__": {"tf": 1.4142135623730951}}, "df": 6}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 3}}}}}}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 2.449489742783178}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 8}, "v": {"docs": {"sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}}, "df": 2}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor.env.filter_nulls": {"tf": 1}}, "df": 1}}}}, "q": {"docs": {"sqlglot.expressions.Condition.eq": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}}, "df": 2}, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}}, "df": 3, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1.4142135623730951}, "sqlglot.errors.ParseError.new": {"tf": 1.4142135623730951}, "sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.text_width": {"tf": 1}, "sqlglot.generator.cached_generator": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1.7320508075688772}, "sqlglot.planner.Step.to_s": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.__init__": {"tf": 2}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 39, "o": {"docs": {"sqlglot.parse_one": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}}, "df": 5}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}}, "df": 5}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1}}, "df": 4}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.introducer_sql": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.generator.Generator.intdiv_sql": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}}, "df": 5}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 5}, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}}, "df": 2}}}}, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}}, "df": 3}}}}, "d": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.between": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1.4142135623730951}}, "df": 19, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.transpile": {"tf": 1}}, "df": 1}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.column": {"tf": 2}, "sqlglot.expressions.table_": {"tf": 2}, "sqlglot.generator.Generator.__init__": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}}, "df": 10, "s": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 3}}}}, "y": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}}, "df": 4}}}}}}}, "t": {"docs": {"sqlglot.helper.first": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.text_width": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 7}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}}, "df": 5}}}}}}}, "f": {"docs": {"sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}}, "df": 3}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.expressions.Condition.ilike": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}}, "df": 3, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.ilikeany_sql": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {"sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}}, "df": 2, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 1, "d": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}}, "df": 3, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}}, "df": 1}}}}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.lineage.LineageHTML.__init__": {"tf": 1}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 4, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.parse_one": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 6}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.collate_sql": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}}, "df": 18}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.where": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.when": {"tf": 2}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 2}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 2}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.substr": {"tf": 2.449489742783178}, "sqlglot.dataframe.sql.Column.between": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.over": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1.4142135623730951}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.4142135623730951}}, "df": 50, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}}, "df": 2}}}, "s": {"docs": {"sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 8}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.columnposition_sql": {"tf": 1}}, "df": 1}}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.errors.ParseError.new": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.table_iter": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 10}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.executor.context.Context.filter": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1}}, "df": 11}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.constraint_sql": {"tf": 1}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.concat_sql": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.connector_sql": {"tf": 1}}, "df": 1}}}}}}}, "p": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}}, "df": 50}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.pad_comment": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.comment_sql": {"tf": 1}}, "df": 3, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "s": {"docs": {"sqlglot.expressions.Expression.add_comments": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 5}}}}, "a": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.command_sql": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.commit_sql": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}}, "df": 5, "i": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.tableau.Tableau.Generator.coalesce_sql": {"tf": 1}}, "df": 1}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.context.Context.eval": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.executor.context.Context.eval_tuple": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}}, "df": 2}}}}}}, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 30}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}}, "df": 5, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.subclasses": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.clone_sql": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.cluster_sql": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.generator.cached_generator": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.parser.binary_range_parser": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}}, "df": 17}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1}}, "df": 5}, "e": {"docs": {"sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}}, "df": 3}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 5}}}}}, "n": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}}}}}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.cached_generator": {"tf": 1}}, "df": 3}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 5}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}}, "df": 2}}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}}, "df": 3}}}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.characterset_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.scope.Scope.branch": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.generator.Generator.check_sql": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.helper.csv_reader": {"tf": 1}}, "df": 1}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transpile": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.properties": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}}, "df": 12, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1}}, "df": 9}}}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}}, "df": 3, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.generator.Generator.withingroup_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}}, "df": 3}}, "n": {"docs": {"sqlglot.generator.Generator.when_sql": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1}}}}}, "b": {"docs": {"sqlglot.optimizer.simplify.is_complement": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}}, "df": 2, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.window": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.qualify": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.4142135623730951}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.6457513110645907}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.simplify.is_false": {"tf": 1}, "sqlglot.optimizer.simplify.is_null": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}}, "df": 97, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.true": {"tf": 1}, "sqlglot.expressions.false": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}}, "df": 3}}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.bracket_sql": {"tf": 1}}, "df": 1}}}}}}, "y": {"docs": {"sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}}, "df": 2, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.bytestring_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "f": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 7}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}}, "df": 2}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.bitstring_sql": {"tf": 1}}, "df": 1}}}}}}, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}}, "df": 1}}}}}}}}}}, "x": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.binary": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.find_new_name": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.between": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1.4142135623730951}}, "df": 19}}}}}}}, "p": {"docs": {"sqlglot.dialects.dialect.var_map_sql": {"tf": 1.7320508075688772}}, "df": 1, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}}, "df": 9, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}}, "df": 2}}}}}}}}}}}, "x": {"docs": {"sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 5, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.errors.concat_messages": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}}, "df": 2}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.matchagainst_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.generator.Generator.unsupported": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 5}}}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 2, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}}, "df": 2, "e": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}}, "df": 2}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.subclasses": {"tf": 1}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}}, "df": 2}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 3}}}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.generator.Generator.mul_sql": {"tf": 1}}, "df": 1}}}, "g": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.between": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 3.7416573867739413}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}}, "df": 26, "e": {"docs": {"sqlglot.generator.Generator.gte_sql": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}}, "df": 1}}}}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.Dialect.generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.rename_func": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.if_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1.4142135623730951}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}}, "df": 33}}, "e": {"docs": {"sqlglot.optimizer.normalize.distributive_law": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}}, "df": 1}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.generator.Generator.glob_sql": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {"sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.4142135623730951}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1.4142135623730951}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}}, "df": 7}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}}, "df": 3}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 2.449489742783178}, "sqlglot.generator.Generator.indent": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalized": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}}, "df": 24}}, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.lineage.Node.__init__": {"tf": 1}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}}, "df": 2, "c": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}}, "df": 7, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 3.7416573867739413}}, "df": 1, "s": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}}, "df": 2}}}}}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 2}}}}}}}}}, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.env.ordered": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.filter_sql": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1}, "l": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.update": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.generator.Generator.fetch_sql": {"tf": 1}}, "df": 1}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}}, "df": 18, "s": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}}, "df": 4}}}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {"sqlglot.expressions.var": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 3, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}}, "df": 4}}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.parameter_sql": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.Dialect.parser": {"tf": 1.4142135623730951}, "sqlglot.parser.binary_range_parser": {"tf": 1.4142135623730951}}, "df": 2}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1}}, "df": 2}}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.partition_sql": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "s": {"docs": {"sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.paren": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}}, "df": 2, "t": {"docs": {"sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 4}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 3}}, "d": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}}, "df": 2}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.from_dict": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.properties": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.with_properties": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.locate_properties": {"tf": 1.7320508075688772}}, "df": 12}}}, "y": {"docs": {"sqlglot.generator.Generator.locate_properties": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.naked_property": {"tf": 1}}, "df": 3}}}}}, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}}}}}}}}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}}, "df": 7}}}, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 3}}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}}, "df": 3}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.generator.Generator.pragma_sql": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}}, "df": 2}}}}}}}}, "n": {"docs": {"sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Join.from_joins": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}}, "df": 5}}}}}}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.env.str_position": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 2}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.hint_sql": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}}, "df": 1}}}, "g": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Condition.between": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.errors.ParseError.new": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.hexstring_sql": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.having_sql": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 2}}}}}}}}, "k": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.kwarg_sql": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1}}, "df": 34}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}}, "df": 3}}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}}, "df": 2}}, "y": {"docs": {"sqlglot.executor.context.Context.sort": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 8, "s": {"docs": {"sqlglot.schema.flatten_schema": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}}, "df": 2}, "w": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.trie.new_trie": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {"sqlglot.optimizer.simplify.is_complement": {"tf": 1}, "sqlglot.optimizer.simplify.is_false": {"tf": 1}, "sqlglot.optimizer.simplify.is_null": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}}, "df": 6, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}}}}}, "n": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.between": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.eq": {"tf": 1}, "sqlglot.expressions.Condition.neq": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1.7320508075688772}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}}, "df": 31}, "d": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}}, "df": 2}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}}, "df": 2}}}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}}, "df": 2}}}, "e": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.generator.Generator.all_sql": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Condition.as_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 17, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.generator.Generator.aliases_sql": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}}, "df": 2}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}}, "df": 1}}}}}}}}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.text_width": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.parser.parse_like": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 20}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.generator.Generator.lambda_sql": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}}, "df": 17}}}}}, "g": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}}, "df": 1}}}}}}}}}}}, "t": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 3.7416573867739413}}, "df": 1, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.attimezone_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.add_sql": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "j": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.jsonobject_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}}, "df": 9, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.joinhint_sql": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.planner.Join.from_joins": {"tf": 1}}, "df": 2}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "x": {"2": {"7": {"docs": {"sqlglot.helper.open_file": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}}, "df": 1}}}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 3, "d": {"docs": {"sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}}, "df": 5}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}}}, "bases": {"root": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill": {"tf": 1}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive": {"tf": 1}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto": {"tf": 1}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Tokenizer": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.executor.python.Python": {"tf": 1}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.tokens.TokenType": {"tf": 1}}, "df": 85, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}}, "df": 6}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"2": {"docs": {"sqlglot.dialects.databricks.Databricks.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}}, "df": 4}, "docs": {"sqlglot.dialects.databricks.Databricks": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}}, "df": 3}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.Dialects": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 2}, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.planner.Scan": {"tf": 1}, "sqlglot.planner.Join": {"tf": 1}, "sqlglot.planner.Aggregate": {"tf": 1}, "sqlglot.planner.Sort": {"tf": 1}, "sqlglot.planner.SetOperation": {"tf": 1}}, "df": 5}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Union": {"tf": 1}, "sqlglot.expressions.Select": {"tf": 1}}, "df": 2}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.All": {"tf": 1}, "sqlglot.expressions.Any": {"tf": 1}, "sqlglot.expressions.Exists": {"tf": 1}}, "df": 3}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python": {"tf": 1.4142135623730951}}, "df": 15, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse": {"tf": 1}, "sqlglot.dialects.databricks.Databricks": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB": {"tf": 1}, "sqlglot.dialects.hive.Hive": {"tf": 1}, "sqlglot.dialects.mysql.MySQL": {"tf": 1}, "sqlglot.dialects.oracle.Oracle": {"tf": 1}, "sqlglot.dialects.postgres.Postgres": {"tf": 1}, "sqlglot.dialects.presto.Presto": {"tf": 1}, "sqlglot.dialects.redshift.Redshift": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake": {"tf": 1}, "sqlglot.dialects.spark.Spark": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau": {"tf": 1}, "sqlglot.dialects.teradata.Teradata": {"tf": 1}, "sqlglot.dialects.trino.Trino": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL": {"tf": 1}, "sqlglot.executor.python.Python": {"tf": 1}}, "df": 36}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.UDTF": {"tf": 1}, "sqlglot.expressions.CTE": {"tf": 1}, "sqlglot.expressions.Subquery": {"tf": 1}}, "df": 3}}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1}}, "df": 14}, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Tokenizer": {"tf": 1}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1}}, "df": 18}}}}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Interval": {"tf": 1}, "sqlglot.expressions.DateAdd": {"tf": 1}, "sqlglot.expressions.DateSub": {"tf": 1}, "sqlglot.expressions.DateDiff": {"tf": 1}, "sqlglot.expressions.DatetimeAdd": {"tf": 1}, "sqlglot.expressions.DatetimeSub": {"tf": 1}, "sqlglot.expressions.DatetimeDiff": {"tf": 1}, "sqlglot.expressions.DatetimeTrunc": {"tf": 1}, "sqlglot.expressions.TimestampAdd": {"tf": 1}, "sqlglot.expressions.TimestampSub": {"tf": 1}, "sqlglot.expressions.TimestampDiff": {"tf": 1}, "sqlglot.expressions.TimestampTrunc": {"tf": 1}, "sqlglot.expressions.TimeAdd": {"tf": 1}, "sqlglot.expressions.TimeSub": {"tf": 1}, "sqlglot.expressions.TimeDiff": {"tf": 1}, "sqlglot.expressions.TimeTrunc": {"tf": 1}, "sqlglot.expressions.TsOrDsAdd": {"tf": 1}}, "df": 17}}}}}}}, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}}, "df": 19}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.redshift.Redshift": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}}, "df": 4}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dialects.trino.Trino": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1.4142135623730951}}, "df": 3}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.SubqueryPredicate": {"tf": 1}, "sqlglot.expressions.EQ": {"tf": 1}, "sqlglot.expressions.NullSafeEQ": {"tf": 1}, "sqlglot.expressions.NullSafeNEQ": {"tf": 1}, "sqlglot.expressions.Glob": {"tf": 1}, "sqlglot.expressions.GT": {"tf": 1}, "sqlglot.expressions.GTE": {"tf": 1}, "sqlglot.expressions.ILike": {"tf": 1}, "sqlglot.expressions.ILikeAny": {"tf": 1}, "sqlglot.expressions.Is": {"tf": 1}, "sqlglot.expressions.Like": {"tf": 1}, "sqlglot.expressions.LikeAny": {"tf": 1}, "sqlglot.expressions.LT": {"tf": 1}, "sqlglot.expressions.LTE": {"tf": 1}, "sqlglot.expressions.NEQ": {"tf": 1}, "sqlglot.expressions.SimilarTo": {"tf": 1}, "sqlglot.expressions.Between": {"tf": 1}, "sqlglot.expressions.In": {"tf": 1}}, "df": 18}}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.AlgorithmProperty": {"tf": 1}, "sqlglot.expressions.AutoIncrementProperty": {"tf": 1}, "sqlglot.expressions.BlockCompressionProperty": {"tf": 1}, "sqlglot.expressions.CharacterSetProperty": {"tf": 1}, "sqlglot.expressions.ChecksumProperty": {"tf": 1}, "sqlglot.expressions.CollateProperty": {"tf": 1}, "sqlglot.expressions.DataBlocksizeProperty": {"tf": 1}, "sqlglot.expressions.DefinerProperty": {"tf": 1}, "sqlglot.expressions.DistKeyProperty": {"tf": 1}, "sqlglot.expressions.DistStyleProperty": {"tf": 1}, "sqlglot.expressions.EngineProperty": {"tf": 1}, "sqlglot.expressions.ExecuteAsProperty": {"tf": 1}, "sqlglot.expressions.ExternalProperty": {"tf": 1}, "sqlglot.expressions.FallbackProperty": {"tf": 1}, "sqlglot.expressions.FileFormatProperty": {"tf": 1}, "sqlglot.expressions.FreespaceProperty": {"tf": 1}, "sqlglot.expressions.IsolatedLoadingProperty": {"tf": 1}, "sqlglot.expressions.JournalProperty": {"tf": 1}, "sqlglot.expressions.LanguageProperty": {"tf": 1}, "sqlglot.expressions.DictProperty": {"tf": 1}, "sqlglot.expressions.DictSubProperty": {"tf": 1}, "sqlglot.expressions.DictRange": {"tf": 1}, "sqlglot.expressions.LikeProperty": {"tf": 1}, "sqlglot.expressions.LocationProperty": {"tf": 1}, "sqlglot.expressions.LockingProperty": {"tf": 1}, "sqlglot.expressions.LogProperty": {"tf": 1}, "sqlglot.expressions.MaterializedProperty": {"tf": 1}, "sqlglot.expressions.MergeBlockRatioProperty": {"tf": 1}, "sqlglot.expressions.NoPrimaryIndexProperty": {"tf": 1}, "sqlglot.expressions.OnCommitProperty": {"tf": 1}, "sqlglot.expressions.PartitionedByProperty": {"tf": 1}, "sqlglot.expressions.ReturnsProperty": {"tf": 1}, "sqlglot.expressions.RowFormatProperty": {"tf": 1}, "sqlglot.expressions.RowFormatDelimitedProperty": {"tf": 1}, "sqlglot.expressions.RowFormatSerdeProperty": {"tf": 1}, "sqlglot.expressions.SchemaCommentProperty": {"tf": 1}, "sqlglot.expressions.SerdeProperties": {"tf": 1}, "sqlglot.expressions.SetProperty": {"tf": 1}, "sqlglot.expressions.SettingsProperty": {"tf": 1}, "sqlglot.expressions.SortKeyProperty": {"tf": 1}, "sqlglot.expressions.SqlSecurityProperty": {"tf": 1}, "sqlglot.expressions.StabilityProperty": {"tf": 1}, "sqlglot.expressions.TemporaryProperty": {"tf": 1}, "sqlglot.expressions.TransientProperty": {"tf": 1}, "sqlglot.expressions.VolatileProperty": {"tf": 1}, "sqlglot.expressions.WithDataProperty": {"tf": 1}, "sqlglot.expressions.WithJournalTableProperty": {"tf": 1}}, "df": 47}}}}}}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}}, "df": 21}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "~": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 1}}}}}}}}}}, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.Dialects": {"tf": 1}, "sqlglot.errors.SqlglotError": {"tf": 1}}, "df": 2}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Add": {"tf": 1}, "sqlglot.expressions.Connector": {"tf": 1}, "sqlglot.expressions.BitwiseAnd": {"tf": 1}, "sqlglot.expressions.BitwiseLeftShift": {"tf": 1}, "sqlglot.expressions.BitwiseOr": {"tf": 1}, "sqlglot.expressions.BitwiseRightShift": {"tf": 1}, "sqlglot.expressions.BitwiseXor": {"tf": 1}, "sqlglot.expressions.Div": {"tf": 1}, "sqlglot.expressions.Overlaps": {"tf": 1}, "sqlglot.expressions.Dot": {"tf": 1}, "sqlglot.expressions.DPipe": {"tf": 1}, "sqlglot.expressions.EQ": {"tf": 1}, "sqlglot.expressions.NullSafeEQ": {"tf": 1}, "sqlglot.expressions.NullSafeNEQ": {"tf": 1}, "sqlglot.expressions.Distance": {"tf": 1}, "sqlglot.expressions.Escape": {"tf": 1}, "sqlglot.expressions.Glob": {"tf": 1}, "sqlglot.expressions.GT": {"tf": 1}, "sqlglot.expressions.GTE": {"tf": 1}, "sqlglot.expressions.ILike": {"tf": 1}, "sqlglot.expressions.ILikeAny": {"tf": 1}, "sqlglot.expressions.IntDiv": {"tf": 1}, "sqlglot.expressions.Is": {"tf": 1}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.Like": {"tf": 1}, "sqlglot.expressions.LikeAny": {"tf": 1}, "sqlglot.expressions.LT": {"tf": 1}, "sqlglot.expressions.LTE": {"tf": 1}, "sqlglot.expressions.Mod": {"tf": 1}, "sqlglot.expressions.Mul": {"tf": 1}, "sqlglot.expressions.NEQ": {"tf": 1}, "sqlglot.expressions.SimilarTo": {"tf": 1}, "sqlglot.expressions.Slice": {"tf": 1}, "sqlglot.expressions.Sub": {"tf": 1}, "sqlglot.expressions.ArrayOverlaps": {"tf": 1}, "sqlglot.expressions.ArrayContains": {"tf": 1}, "sqlglot.expressions.ArrayContained": {"tf": 1}, "sqlglot.expressions.Collate": {"tf": 1}, "sqlglot.expressions.JSONBContains": {"tf": 1}, "sqlglot.expressions.JSONExtract": {"tf": 1}, "sqlglot.expressions.Pow": {"tf": 1}}, "df": 41}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.dialects.dialect.Dialects": {"tf": 1.4142135623730951}, "sqlglot.helper.AutoName": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.ScopeType": {"tf": 1.4142135623730951}}, "df": 3}}}, "x": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.errors.SqlglotError": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Condition": {"tf": 1}, "sqlglot.expressions.DerivedTable": {"tf": 1}, "sqlglot.expressions.Unionable": {"tf": 1}, "sqlglot.expressions.Cache": {"tf": 1}, "sqlglot.expressions.Uncache": {"tf": 1}, "sqlglot.expressions.Create": {"tf": 1}, "sqlglot.expressions.Clone": {"tf": 1}, "sqlglot.expressions.Describe": {"tf": 1}, "sqlglot.expressions.Pragma": {"tf": 1}, "sqlglot.expressions.Set": {"tf": 1}, "sqlglot.expressions.SetItem": {"tf": 1}, "sqlglot.expressions.Show": {"tf": 1}, "sqlglot.expressions.UserDefinedFunction": {"tf": 1}, "sqlglot.expressions.CharacterSet": {"tf": 1}, "sqlglot.expressions.With": {"tf": 1}, "sqlglot.expressions.WithinGroup": {"tf": 1}, "sqlglot.expressions.TableAlias": {"tf": 1}, "sqlglot.expressions.ColumnPosition": {"tf": 1}, "sqlglot.expressions.ColumnDef": {"tf": 1}, "sqlglot.expressions.AlterColumn": {"tf": 1}, "sqlglot.expressions.RenameTable": {"tf": 1}, "sqlglot.expressions.SetTag": {"tf": 1}, "sqlglot.expressions.Comment": {"tf": 1}, "sqlglot.expressions.MergeTreeTTLAction": {"tf": 1}, "sqlglot.expressions.MergeTreeTTL": {"tf": 1}, "sqlglot.expressions.ColumnConstraint": {"tf": 1}, "sqlglot.expressions.ColumnConstraintKind": {"tf": 1}, "sqlglot.expressions.Constraint": {"tf": 1}, "sqlglot.expressions.Delete": {"tf": 1}, "sqlglot.expressions.Drop": {"tf": 1}, "sqlglot.expressions.Filter": {"tf": 1}, "sqlglot.expressions.Check": {"tf": 1}, "sqlglot.expressions.Directory": {"tf": 1}, "sqlglot.expressions.ForeignKey": {"tf": 1}, "sqlglot.expressions.PrimaryKey": {"tf": 1}, "sqlglot.expressions.Into": {"tf": 1}, "sqlglot.expressions.From": {"tf": 1}, "sqlglot.expressions.Having": {"tf": 1}, "sqlglot.expressions.Hint": {"tf": 1}, "sqlglot.expressions.JoinHint": {"tf": 1}, "sqlglot.expressions.Identifier": {"tf": 1}, "sqlglot.expressions.Index": {"tf": 1}, "sqlglot.expressions.Insert": {"tf": 1}, "sqlglot.expressions.OnConflict": {"tf": 1}, "sqlglot.expressions.Returning": {"tf": 1}, "sqlglot.expressions.Introducer": {"tf": 1}, "sqlglot.expressions.National": {"tf": 1}, "sqlglot.expressions.LoadData": {"tf": 1}, "sqlglot.expressions.Partition": {"tf": 1}, "sqlglot.expressions.Fetch": {"tf": 1}, "sqlglot.expressions.Group": {"tf": 1}, "sqlglot.expressions.Lambda": {"tf": 1}, "sqlglot.expressions.Limit": {"tf": 1}, "sqlglot.expressions.Join": {"tf": 1}, "sqlglot.expressions.MatchRecognize": {"tf": 1}, "sqlglot.expressions.Final": {"tf": 1}, "sqlglot.expressions.Offset": {"tf": 1}, "sqlglot.expressions.Order": {"tf": 1}, "sqlglot.expressions.Ordered": {"tf": 1}, "sqlglot.expressions.Property": {"tf": 1}, "sqlglot.expressions.InputOutputFormat": {"tf": 1}, "sqlglot.expressions.Properties": {"tf": 1}, "sqlglot.expressions.Qualify": {"tf": 1}, "sqlglot.expressions.Return": {"tf": 1}, "sqlglot.expressions.Reference": {"tf": 1}, "sqlglot.expressions.Tuple": {"tf": 1}, "sqlglot.expressions.Table": {"tf": 1}, "sqlglot.expressions.SystemTime": {"tf": 1}, "sqlglot.expressions.Update": {"tf": 1}, "sqlglot.expressions.Var": {"tf": 1}, "sqlglot.expressions.Schema": {"tf": 1}, "sqlglot.expressions.Lock": {"tf": 1}, "sqlglot.expressions.TableSample": {"tf": 1}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Pivot": {"tf": 1}, "sqlglot.expressions.Window": {"tf": 1}, "sqlglot.expressions.WindowSpec": {"tf": 1}, "sqlglot.expressions.Where": {"tf": 1}, "sqlglot.expressions.Star": {"tf": 1}, "sqlglot.expressions.Parameter": {"tf": 1}, "sqlglot.expressions.SessionParameter": {"tf": 1}, "sqlglot.expressions.Placeholder": {"tf": 1}, "sqlglot.expressions.DataTypeSize": {"tf": 1}, "sqlglot.expressions.DataType": {"tf": 1}, "sqlglot.expressions.PseudoType": {"tf": 1}, "sqlglot.expressions.Command": {"tf": 1}, "sqlglot.expressions.Transaction": {"tf": 1}, "sqlglot.expressions.Commit": {"tf": 1}, "sqlglot.expressions.Rollback": {"tf": 1}, "sqlglot.expressions.AlterTable": {"tf": 1}, "sqlglot.expressions.AddConstraint": {"tf": 1}, "sqlglot.expressions.DropPartition": {"tf": 1}, "sqlglot.expressions.Alias": {"tf": 1}, "sqlglot.expressions.Aliases": {"tf": 1}, "sqlglot.expressions.AtTimeZone": {"tf": 1}, "sqlglot.expressions.Distinct": {"tf": 1}, "sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.IgnoreNulls": {"tf": 1}, "sqlglot.expressions.RespectNulls": {"tf": 1}, "sqlglot.expressions.JSONKeyValue": {"tf": 1}, "sqlglot.expressions.OpenJSONColumnDef": {"tf": 1}, "sqlglot.expressions.Use": {"tf": 1}, "sqlglot.expressions.Merge": {"tf": 1}}, "df": 103}}}}}}}}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.spark2.Spark2": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"tf": 1.4142135623730951}}, "df": 4}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.tokens.TokenType": {"tf": 1}}, "df": 4}}}}}}, "m": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.starrocks.StarRocks": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}}, "df": 3}}}}}, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.tokens.TokenType": {"tf": 1}}, "df": 4}}}}}}}, "b": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}}}, "c": {"docs": {"sqlglot.schema.Schema": {"tf": 1.4142135623730951}}, "df": 1}}, "g": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.ParameterizedAgg": {"tf": 1}, "sqlglot.expressions.Hll": {"tf": 1}, "sqlglot.expressions.ApproxDistinct": {"tf": 1}, "sqlglot.expressions.ArrayAgg": {"tf": 1}, "sqlglot.expressions.ArrayUnionAgg": {"tf": 1}, "sqlglot.expressions.Avg": {"tf": 1}, "sqlglot.expressions.AnyValue": {"tf": 1}, "sqlglot.expressions.Count": {"tf": 1}, "sqlglot.expressions.CountIf": {"tf": 1}, "sqlglot.expressions.LogicalOr": {"tf": 1}, "sqlglot.expressions.LogicalAnd": {"tf": 1}, "sqlglot.expressions.Max": {"tf": 1}, "sqlglot.expressions.Min": {"tf": 1}, "sqlglot.expressions.PercentileCont": {"tf": 1}, "sqlglot.expressions.PercentileDisc": {"tf": 1}, "sqlglot.expressions.Quantile": {"tf": 1}, "sqlglot.expressions.SetAgg": {"tf": 1}, "sqlglot.expressions.Sum": {"tf": 1}, "sqlglot.expressions.Stddev": {"tf": 1}, "sqlglot.expressions.StddevPop": {"tf": 1}, "sqlglot.expressions.StddevSamp": {"tf": 1}, "sqlglot.expressions.Variance": {"tf": 1}, "sqlglot.expressions.VariancePop": {"tf": 1}}, "df": 23}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Predicate": {"tf": 1}, "sqlglot.expressions.BitString": {"tf": 1}, "sqlglot.expressions.HexString": {"tf": 1}, "sqlglot.expressions.ByteString": {"tf": 1}, "sqlglot.expressions.RawString": {"tf": 1}, "sqlglot.expressions.Column": {"tf": 1}, "sqlglot.expressions.Literal": {"tf": 1}, "sqlglot.expressions.Null": {"tf": 1}, "sqlglot.expressions.Boolean": {"tf": 1}, "sqlglot.expressions.Binary": {"tf": 1}, "sqlglot.expressions.Unary": {"tf": 1}, "sqlglot.expressions.Bracket": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 13}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.And": {"tf": 1}, "sqlglot.expressions.Or": {"tf": 1}}, "df": 2}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ConcatWs": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.AutoIncrementColumnConstraint": {"tf": 1}, "sqlglot.expressions.CaseSpecificColumnConstraint": {"tf": 1}, "sqlglot.expressions.CharacterSetColumnConstraint": {"tf": 1}, "sqlglot.expressions.CheckColumnConstraint": {"tf": 1}, "sqlglot.expressions.CollateColumnConstraint": {"tf": 1}, "sqlglot.expressions.CommentColumnConstraint": {"tf": 1}, "sqlglot.expressions.CompressColumnConstraint": {"tf": 1}, "sqlglot.expressions.DateFormatColumnConstraint": {"tf": 1}, "sqlglot.expressions.DefaultColumnConstraint": {"tf": 1}, "sqlglot.expressions.EncodeColumnConstraint": {"tf": 1}, "sqlglot.expressions.GeneratedAsIdentityColumnConstraint": {"tf": 1}, "sqlglot.expressions.InlineLengthColumnConstraint": {"tf": 1}, "sqlglot.expressions.NotNullColumnConstraint": {"tf": 1}, "sqlglot.expressions.OnUpdateColumnConstraint": {"tf": 1}, "sqlglot.expressions.PrimaryKeyColumnConstraint": {"tf": 1}, "sqlglot.expressions.TitleColumnConstraint": {"tf": 1}, "sqlglot.expressions.UniqueColumnConstraint": {"tf": 1}, "sqlglot.expressions.UppercaseColumnConstraint": {"tf": 1}, "sqlglot.expressions.PathColumnConstraint": {"tf": 1}}, "df": 19}}}}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.TryCast": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Except": {"tf": 1}, "sqlglot.expressions.Intersect": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.UDTF": {"tf": 1}, "sqlglot.expressions.Subqueryable": {"tf": 1}, "sqlglot.expressions.Subquery": {"tf": 1}}, "df": 3}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.BitwiseNot": {"tf": 1}, "sqlglot.expressions.Not": {"tf": 1}, "sqlglot.expressions.Paren": {"tf": 1}, "sqlglot.expressions.Neg": {"tf": 1}}, "df": 4}}}}, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.Lateral": {"tf": 1}, "sqlglot.expressions.Unnest": {"tf": 1}, "sqlglot.expressions.Values": {"tf": 1}}, "df": 3}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Cluster": {"tf": 1}, "sqlglot.expressions.Distribute": {"tf": 1}, "sqlglot.expressions.Sort": {"tf": 1}}, "df": 3}}}}}, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.AggFunc": {"tf": 1}, "sqlglot.expressions.Abs": {"tf": 1}, "sqlglot.expressions.Anonymous": {"tf": 1}, "sqlglot.expressions.Array": {"tf": 1}, "sqlglot.expressions.ToChar": {"tf": 1}, "sqlglot.expressions.GenerateSeries": {"tf": 1}, "sqlglot.expressions.ArrayAll": {"tf": 1}, "sqlglot.expressions.ArrayAny": {"tf": 1}, "sqlglot.expressions.ArrayConcat": {"tf": 1}, "sqlglot.expressions.ArrayContains": {"tf": 1}, "sqlglot.expressions.ArrayFilter": {"tf": 1}, "sqlglot.expressions.ArrayJoin": {"tf": 1}, "sqlglot.expressions.ArraySize": {"tf": 1}, "sqlglot.expressions.ArraySort": {"tf": 1}, "sqlglot.expressions.ArraySum": {"tf": 1}, "sqlglot.expressions.Case": {"tf": 1}, "sqlglot.expressions.Cast": {"tf": 1}, "sqlglot.expressions.CastToStrType": {"tf": 1}, "sqlglot.expressions.Ceil": {"tf": 1}, "sqlglot.expressions.Coalesce": {"tf": 1}, "sqlglot.expressions.Concat": {"tf": 1}, "sqlglot.expressions.CurrentDate": {"tf": 1}, "sqlglot.expressions.CurrentDatetime": {"tf": 1}, "sqlglot.expressions.CurrentTime": {"tf": 1}, "sqlglot.expressions.CurrentTimestamp": {"tf": 1}, "sqlglot.expressions.CurrentUser": {"tf": 1}, "sqlglot.expressions.DateAdd": {"tf": 1}, "sqlglot.expressions.DateSub": {"tf": 1}, "sqlglot.expressions.DateDiff": {"tf": 1}, "sqlglot.expressions.DateTrunc": {"tf": 1}, "sqlglot.expressions.DatetimeAdd": {"tf": 1}, "sqlglot.expressions.DatetimeSub": {"tf": 1}, "sqlglot.expressions.DatetimeDiff": {"tf": 1}, "sqlglot.expressions.DatetimeTrunc": {"tf": 1}, "sqlglot.expressions.DayOfWeek": {"tf": 1}, "sqlglot.expressions.DayOfMonth": {"tf": 1}, "sqlglot.expressions.DayOfYear": {"tf": 1}, "sqlglot.expressions.WeekOfYear": {"tf": 1}, "sqlglot.expressions.LastDateOfMonth": {"tf": 1}, "sqlglot.expressions.Extract": {"tf": 1}, "sqlglot.expressions.TimestampAdd": {"tf": 1}, "sqlglot.expressions.TimestampSub": {"tf": 1}, "sqlglot.expressions.TimestampDiff": {"tf": 1}, "sqlglot.expressions.TimestampTrunc": {"tf": 1}, "sqlglot.expressions.TimeAdd": {"tf": 1}, "sqlglot.expressions.TimeSub": {"tf": 1}, "sqlglot.expressions.TimeDiff": {"tf": 1}, "sqlglot.expressions.TimeTrunc": {"tf": 1}, "sqlglot.expressions.DateFromParts": {"tf": 1}, "sqlglot.expressions.DateStrToDate": {"tf": 1}, "sqlglot.expressions.DateToDateStr": {"tf": 1}, "sqlglot.expressions.DateToDi": {"tf": 1}, "sqlglot.expressions.Day": {"tf": 1}, "sqlglot.expressions.Decode": {"tf": 1}, "sqlglot.expressions.DiToDate": {"tf": 1}, "sqlglot.expressions.Encode": {"tf": 1}, "sqlglot.expressions.Exp": {"tf": 1}, "sqlglot.expressions.Explode": {"tf": 1}, "sqlglot.expressions.Floor": {"tf": 1}, "sqlglot.expressions.FromBase64": {"tf": 1}, "sqlglot.expressions.ToBase64": {"tf": 1}, "sqlglot.expressions.Greatest": {"tf": 1}, "sqlglot.expressions.GroupConcat": {"tf": 1}, "sqlglot.expressions.Hex": {"tf": 1}, "sqlglot.expressions.If": {"tf": 1}, "sqlglot.expressions.IfNull": {"tf": 1}, "sqlglot.expressions.Initcap": {"tf": 1}, "sqlglot.expressions.JSONObject": {"tf": 1}, "sqlglot.expressions.OpenJSON": {"tf": 1}, "sqlglot.expressions.JSONExtract": {"tf": 1}, "sqlglot.expressions.JSONFormat": {"tf": 1}, "sqlglot.expressions.Least": {"tf": 1}, "sqlglot.expressions.Left": {"tf": 1}, "sqlglot.expressions.Right": {"tf": 1}, "sqlglot.expressions.Length": {"tf": 1}, "sqlglot.expressions.Levenshtein": {"tf": 1}, "sqlglot.expressions.Ln": {"tf": 1}, "sqlglot.expressions.Log": {"tf": 1}, "sqlglot.expressions.Log2": {"tf": 1}, "sqlglot.expressions.Log10": {"tf": 1}, "sqlglot.expressions.Lower": {"tf": 1}, "sqlglot.expressions.Map": {"tf": 1}, "sqlglot.expressions.StarMap": {"tf": 1}, "sqlglot.expressions.VarMap": {"tf": 1}, "sqlglot.expressions.MatchAgainst": {"tf": 1}, "sqlglot.expressions.MD5": {"tf": 1}, "sqlglot.expressions.Month": {"tf": 1}, "sqlglot.expressions.Nvl2": {"tf": 1}, "sqlglot.expressions.Posexplode": {"tf": 1}, "sqlglot.expressions.Pow": {"tf": 1}, "sqlglot.expressions.RangeN": {"tf": 1}, "sqlglot.expressions.ReadCSV": {"tf": 1}, "sqlglot.expressions.Reduce": {"tf": 1}, "sqlglot.expressions.RegexpExtract": {"tf": 1}, "sqlglot.expressions.RegexpLike": {"tf": 1}, "sqlglot.expressions.RegexpILike": {"tf": 1}, "sqlglot.expressions.RegexpSplit": {"tf": 1}, "sqlglot.expressions.Repeat": {"tf": 1}, "sqlglot.expressions.Round": {"tf": 1}, "sqlglot.expressions.RowNumber": {"tf": 1}, "sqlglot.expressions.SafeDivide": {"tf": 1}, "sqlglot.expressions.SHA": {"tf": 1}, "sqlglot.expressions.SHA2": {"tf": 1}, "sqlglot.expressions.SortArray": {"tf": 1}, "sqlglot.expressions.Split": {"tf": 1}, "sqlglot.expressions.Substring": {"tf": 1}, "sqlglot.expressions.StandardHash": {"tf": 1}, "sqlglot.expressions.StrPosition": {"tf": 1}, "sqlglot.expressions.StrToDate": {"tf": 1}, "sqlglot.expressions.StrToTime": {"tf": 1}, "sqlglot.expressions.StrToUnix": {"tf": 1}, "sqlglot.expressions.NumberToStr": {"tf": 1}, "sqlglot.expressions.Struct": {"tf": 1}, "sqlglot.expressions.StructExtract": {"tf": 1}, "sqlglot.expressions.Sqrt": {"tf": 1}, "sqlglot.expressions.TimeToStr": {"tf": 1}, "sqlglot.expressions.TimeToTimeStr": {"tf": 1}, "sqlglot.expressions.TimeToUnix": {"tf": 1}, "sqlglot.expressions.TimeStrToDate": {"tf": 1}, "sqlglot.expressions.TimeStrToTime": {"tf": 1}, "sqlglot.expressions.TimeStrToUnix": {"tf": 1}, "sqlglot.expressions.Trim": {"tf": 1}, "sqlglot.expressions.TsOrDsAdd": {"tf": 1}, "sqlglot.expressions.TsOrDsToDateStr": {"tf": 1}, "sqlglot.expressions.TsOrDsToDate": {"tf": 1}, "sqlglot.expressions.TsOrDiToDi": {"tf": 1}, "sqlglot.expressions.Unhex": {"tf": 1}, "sqlglot.expressions.UnixToStr": {"tf": 1}, "sqlglot.expressions.UnixToTime": {"tf": 1}, "sqlglot.expressions.UnixToTimeStr": {"tf": 1}, "sqlglot.expressions.Upper": {"tf": 1}, "sqlglot.expressions.Week": {"tf": 1}, "sqlglot.expressions.XMLTable": {"tf": 1}, "sqlglot.expressions.Year": {"tf": 1}, "sqlglot.expressions.When": {"tf": 1}, "sqlglot.expressions.NextValueFor": {"tf": 1}}, "df": 136}}}}, "j": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JSONExtractScalar": {"tf": 1}, "sqlglot.expressions.JSONBExtract": {"tf": 1}, "sqlglot.expressions.JSONBExtractScalar": {"tf": 1}}, "df": 3}}}}}}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.ApproxQuantile": {"tf": 1}}, "df": 1}}}}}}}}}}, "doc": {"root": {"0": {"0": {"0": {"9": {"9": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "1": {"0": {"7": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "9": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "3": {"6": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "3": {"4": {"2": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "6": {"2": {"5": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "5": {"2": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "1": {"0": {"5": {"4": {"5": {"5": {"2": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "1": {"2": {"6": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "6": {"8": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "3": {"0": {"8": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "9": {"9": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 1.7320508075688772}}, "df": 1}, "2": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}, "3": {"2": {"8": {"0": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"1": {"7": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "6": {"9": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "4": {"4": {"1": {"0": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"5": {"8": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "6": {"6": {"7": {"1": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "7": {"6": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "8": {"0": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "5": {"4": {"3": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 6.928203230275509}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.diff": {"tf": 4.47213595499958}, "sqlglot.executor": {"tf": 2}, "sqlglot.expressions.Expression.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Star.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Alias.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Cast.output_name": {"tf": 1.7320508075688772}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 2}, "sqlglot.trie.in_trie": {"tf": 2.23606797749979}}, "df": 41, "/": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "1": {"0": {"0": {"0": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "4": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}}, "df": 4, "^": {"1": {"2": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "1": {"4": {"5": {"docs": {}, "df": 0, "/": {"2": {"6": {"4": {"2": {"9": {"3": {"7": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}, "2": {"2": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "3": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}, "6": {"3": {"2": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "3": {"1": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}}, "df": 2}, "4": {"1": {"3": {"4": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "5": {"1": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}, "7": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "6": {"1": {"8": {"0": {"8": {"8": {"0": {"2": {"8": {"2": {"9": {"5": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "7": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "8": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "9": {"8": {"6": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "9": {"6": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {"sqlglot": {"tf": 6}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.diff": {"tf": 4}, "sqlglot.executor": {"tf": 3.7416573867739413}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 2}, "sqlglot.expressions.Condition.or_": {"tf": 2}, "sqlglot.expressions.Condition.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Predicate": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 2}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 2.449489742783178}, "sqlglot.expressions.and_": {"tf": 2.449489742783178}, "sqlglot.expressions.or_": {"tf": 2.449489742783178}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.cast": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 2}, "sqlglot.helper.dict_depth": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1.4142135623730951}}, "df": 43, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 2}}}, "2": {"0": {"0": {"7": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "1": {"4": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "2": {"1": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}, "4": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "5": {"1": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "5": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "6": {"4": {"2": {"9": {"8": {"2": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "6": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 2}, "9": {"4": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 3.3166247903554}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 4.358898943540674}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 2}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1.4142135623730951}}, "df": 46}, "3": {"1": {"3": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "2": {"4": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "3": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "4": {"docs": {"sqlglot.dataframe": {"tf": 2.449489742783178}}, "df": 1}, "7": {"7": {"7": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"docs": {"sqlglot": {"tf": 6.855654600401044}, "sqlglot.dataframe": {"tf": 7.0710678118654755}, "sqlglot.dialects": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 2.8284271247461903}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Identifier.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Literal.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.where": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 2.8284271247461903}, "sqlglot.expressions.Subquery.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Star.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Alias.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Cast.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 2}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 2}, "sqlglot.expressions.paren": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 4}, "sqlglot.expressions.subquery": {"tf": 2.8284271247461903}, "sqlglot.expressions.cast": {"tf": 2.449489742783178}, "sqlglot.expressions.values": {"tf": 2}, "sqlglot.expressions.var": {"tf": 3.1622776601683795}, "sqlglot.expressions.column_table_names": {"tf": 2}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 2}, "sqlglot.expressions.func": {"tf": 2}, "sqlglot.helper.split_num_words": {"tf": 3.4641016151377544}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 2}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 2}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 2}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 2.8284271247461903}, "sqlglot.optimizer.simplify.simplify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_set": {"tf": 4}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 3.7416573867739413}, "sqlglot.trie.in_trie": {"tf": 2.8284271247461903}}, "df": 90}, "docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.paren": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}, "sqlglot.helper.flatten": {"tf": 2}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 53}, "4": {"0": {"0": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "3": {"4": {"3": {"4": {"1": {"6": {"6": {"2": {"4": {"docs": {"sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 3}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "7": {"9": {"8": {"3": {"1": {"3": {"6": {"docs": {"sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 3}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "4": {"6": {"2": {"4": {"docs": {"sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 3}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "5": {"2": {"9": {"6": {"docs": {"sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 3}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "6": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 1}, "8": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}, "9": {"3": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 27}, "5": {"0": {"4": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 20}, "3": {"1": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.paren": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.func": {"tf": 2}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}}, "df": 8}, "6": {"0": {"6": {"2": {"6": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "1": {"4": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 2.23606797749979}}, "df": 2, "m": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "7": {"0": {"4": {"3": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"5": {"docs": {}, "df": 0, "\u2013": {"7": {"4": {"3": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}, "6": {"7": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "9": {"1": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "2": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 2}, "8": {"0": {"5": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22}, "9": {"3": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2}, "9": {"0": {"6": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "7": {"0": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "8": {"7": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {"sqlglot": {"tf": 62.12889826803627}, "sqlglot.pretty": {"tf": 1.7320508075688772}, "sqlglot.schema": {"tf": 1.7320508075688772}, "sqlglot.parse": {"tf": 5.916079783099616}, "sqlglot.parse_one": {"tf": 6.324555320336759}, "sqlglot.transpile": {"tf": 7.211102550927978}, "sqlglot.dataframe": {"tf": 48.86716689148246}, "sqlglot.dataframe.sql": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.SparkSession": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 2.23606797749979}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 2.23606797749979}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 2.6457513110645907}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 2}, "sqlglot.dataframe.sql.GroupedData": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.when": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.like": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.between": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.over": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameNaFunctions": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.WindowSpec": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameReader": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameWriter": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1.7320508075688772}, "sqlglot.dialects": {"tf": 21.817424229271428}, "sqlglot.dialects.bigquery": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 13.638181696985855}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 13.638181696985855}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 13.638181696985855}, "sqlglot.dialects.databricks.Databricks.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.rename_func": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.if_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 5.744562646538029}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 13.638181696985855}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 13.638181696985855}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 13.638181696985855}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 13.638181696985855}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 13.638181696985855}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 13.638181696985855}, "sqlglot.dialects.presto": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 13.638181696985855}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 13.638181696985855}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 3.3166247903554}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 4.47213595499958}, "sqlglot.dialects.snowflake": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 13.638181696985855}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 13.638181696985855}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 13.638181696985855}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 13.638181696985855}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 13.638181696985855}, "sqlglot.dialects.tableau": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 13.638181696985855}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator.coalesce_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.teradata": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 13.638181696985855}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 13.638181696985855}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 13.638181696985855}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 50.556898639058154}, "sqlglot.diff.Insert": {"tf": 1.4142135623730951}, "sqlglot.diff.Insert.__init__": {"tf": 1.7320508075688772}, "sqlglot.diff.Remove": {"tf": 1.4142135623730951}, "sqlglot.diff.Remove.__init__": {"tf": 1.7320508075688772}, "sqlglot.diff.Move": {"tf": 1.4142135623730951}, "sqlglot.diff.Move.__init__": {"tf": 1.7320508075688772}, "sqlglot.diff.Update": {"tf": 1.4142135623730951}, "sqlglot.diff.Update.__init__": {"tf": 1.7320508075688772}, "sqlglot.diff.Keep": {"tf": 1.4142135623730951}, "sqlglot.diff.Keep.__init__": {"tf": 1.7320508075688772}, "sqlglot.diff.diff": {"tf": 11.313708498984761}, "sqlglot.diff.ChangeDistiller": {"tf": 2.6457513110645907}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1.7320508075688772}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1.7320508075688772}, "sqlglot.errors": {"tf": 1.7320508075688772}, "sqlglot.errors.ErrorLevel": {"tf": 1.7320508075688772}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1.7320508075688772}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1.7320508075688772}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1.7320508075688772}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1.7320508075688772}, "sqlglot.errors.SqlglotError": {"tf": 1.7320508075688772}, "sqlglot.errors.UnsupportedError": {"tf": 1.7320508075688772}, "sqlglot.errors.ParseError": {"tf": 1.7320508075688772}, "sqlglot.errors.ParseError.__init__": {"tf": 1.7320508075688772}, "sqlglot.errors.ParseError.new": {"tf": 1.7320508075688772}, "sqlglot.errors.TokenError": {"tf": 1.7320508075688772}, "sqlglot.errors.OptimizeError": {"tf": 1.7320508075688772}, "sqlglot.errors.SchemaError": {"tf": 1.7320508075688772}, "sqlglot.errors.ExecuteError": {"tf": 1.7320508075688772}, "sqlglot.errors.concat_messages": {"tf": 1.7320508075688772}, "sqlglot.errors.merge_errors": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 35.11409973215888}, "sqlglot.executor.execute": {"tf": 7}, "sqlglot.executor.context": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context": {"tf": 3}, "sqlglot.executor.context.Context.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.eval": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.add_columns": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.table_iter": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.filter": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.sort": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.set_row": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.set_index": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.set_range": {"tf": 1.7320508075688772}, "sqlglot.executor.env": {"tf": 1.7320508075688772}, "sqlglot.executor.env.reverse_key": {"tf": 1.7320508075688772}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.env.filter_nulls": {"tf": 1.7320508075688772}, "sqlglot.executor.env.null_if_any": {"tf": 4.58257569495584}, "sqlglot.executor.env.str_position": {"tf": 1.7320508075688772}, "sqlglot.executor.env.substring": {"tf": 1.7320508075688772}, "sqlglot.executor.env.cast": {"tf": 1.7320508075688772}, "sqlglot.executor.env.ordered": {"tf": 1.7320508075688772}, "sqlglot.executor.env.interval": {"tf": 1.7320508075688772}, "sqlglot.executor.python": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.static": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 13.638181696985855}, "sqlglot.executor.table": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Table": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Table.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Table.add_columns": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Table.append": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Table.pop": {"tf": 1.7320508075688772}, "sqlglot.executor.table.TableIter": {"tf": 1.7320508075688772}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.table.RangeReader": {"tf": 1.7320508075688772}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.table.RowReader": {"tf": 1.7320508075688772}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Tables": {"tf": 4}, "sqlglot.executor.table.ensure_tables": {"tf": 1.7320508075688772}, "sqlglot.expressions": {"tf": 4}, "sqlglot.expressions.Expression": {"tf": 10.954451150103322}, "sqlglot.expressions.Expression.__init__": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.this": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.expressions": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.text": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.is_string": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.is_number": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.is_int": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.is_star": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.alias": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.Expression.copy": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.add_comments": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.append": {"tf": 4.358898943540674}, "sqlglot.expressions.Expression.set": {"tf": 4.898979485566356}, "sqlglot.expressions.Expression.depth": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find": {"tf": 5.291502622129181}, "sqlglot.expressions.Expression.find_all": {"tf": 5.291502622129181}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 4.795831523312719}, "sqlglot.expressions.Expression.parent_select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.same_parent": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.root": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.walk": {"tf": 5.5677643628300215}, "sqlglot.expressions.Expression.dfs": {"tf": 3.4641016151377544}, "sqlglot.expressions.Expression.bfs": {"tf": 3.4641016151377544}, "sqlglot.expressions.Expression.unnest": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.unalias": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.flatten": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression.sql": {"tf": 5.477225575051661}, "sqlglot.expressions.Expression.transform": {"tf": 5.477225575051661}, "sqlglot.expressions.Expression.replace": {"tf": 5.477225575051661}, "sqlglot.expressions.Expression.pop": {"tf": 3.4641016151377544}, "sqlglot.expressions.Expression.assert_is": {"tf": 9.643650760992955}, "sqlglot.expressions.Expression.error_messages": {"tf": 4.795831523312719}, "sqlglot.expressions.Expression.dump": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.load": {"tf": 2.23606797749979}, "sqlglot.expressions.Condition": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.and_": {"tf": 9.797958971132712}, "sqlglot.expressions.Condition.or_": {"tf": 9.797958971132712}, "sqlglot.expressions.Condition.not_": {"tf": 8.54400374531753}, "sqlglot.expressions.Condition.as_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.isin": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.between": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.is_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.like": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.ilike": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.eq": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.neq": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.rlike": {"tf": 1.7320508075688772}, "sqlglot.expressions.Predicate": {"tf": 1.7320508075688772}, "sqlglot.expressions.DerivedTable": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.union": {"tf": 10.488088481701515}, "sqlglot.expressions.Unionable.intersect": {"tf": 10.488088481701515}, "sqlglot.expressions.Unionable.except_": {"tf": 10.535653752852738}, "sqlglot.expressions.UDTF": {"tf": 1.7320508075688772}, "sqlglot.expressions.Cache": {"tf": 1.7320508075688772}, "sqlglot.expressions.Uncache": {"tf": 1.7320508075688772}, "sqlglot.expressions.Create": {"tf": 1.7320508075688772}, "sqlglot.expressions.Clone": {"tf": 1.7320508075688772}, "sqlglot.expressions.Describe": {"tf": 1.7320508075688772}, "sqlglot.expressions.Pragma": {"tf": 1.7320508075688772}, "sqlglot.expressions.Set": {"tf": 1.7320508075688772}, "sqlglot.expressions.SetItem": {"tf": 1.7320508075688772}, "sqlglot.expressions.Show": {"tf": 1.7320508075688772}, "sqlglot.expressions.UserDefinedFunction": {"tf": 1.7320508075688772}, "sqlglot.expressions.CharacterSet": {"tf": 1.7320508075688772}, "sqlglot.expressions.With": {"tf": 1.7320508075688772}, "sqlglot.expressions.WithinGroup": {"tf": 1.7320508075688772}, "sqlglot.expressions.CTE": {"tf": 1.7320508075688772}, "sqlglot.expressions.TableAlias": {"tf": 1.7320508075688772}, "sqlglot.expressions.BitString": {"tf": 1.7320508075688772}, "sqlglot.expressions.HexString": {"tf": 1.7320508075688772}, "sqlglot.expressions.ByteString": {"tf": 1.7320508075688772}, "sqlglot.expressions.RawString": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.Column.parts": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column.to_dot": {"tf": 1.7320508075688772}, "sqlglot.expressions.ColumnPosition": {"tf": 1.7320508075688772}, "sqlglot.expressions.ColumnDef": {"tf": 1.7320508075688772}, "sqlglot.expressions.AlterColumn": {"tf": 1.7320508075688772}, "sqlglot.expressions.RenameTable": {"tf": 1.7320508075688772}, "sqlglot.expressions.SetTag": {"tf": 1.7320508075688772}, "sqlglot.expressions.Comment": {"tf": 1.7320508075688772}, "sqlglot.expressions.MergeTreeTTLAction": {"tf": 1.7320508075688772}, "sqlglot.expressions.MergeTreeTTL": {"tf": 1.7320508075688772}, "sqlglot.expressions.ColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.ColumnConstraintKind": {"tf": 1.7320508075688772}, "sqlglot.expressions.AutoIncrementColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.CaseSpecificColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.CharacterSetColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.CheckColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.CollateColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.CommentColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.CompressColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateFormatColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.DefaultColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.EncodeColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.GeneratedAsIdentityColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.InlineLengthColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.NotNullColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.OnUpdateColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.PrimaryKeyColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.TitleColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.UniqueColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.UppercaseColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.PathColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.Constraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete.delete": {"tf": 9.055385138137417}, "sqlglot.expressions.Delete.where": {"tf": 10.198039027185569}, "sqlglot.expressions.Delete.returning": {"tf": 10.488088481701515}, "sqlglot.expressions.Drop": {"tf": 1.7320508075688772}, "sqlglot.expressions.Filter": {"tf": 1.7320508075688772}, "sqlglot.expressions.Check": {"tf": 1.7320508075688772}, "sqlglot.expressions.Directory": {"tf": 1.7320508075688772}, "sqlglot.expressions.ForeignKey": {"tf": 1.7320508075688772}, "sqlglot.expressions.PrimaryKey": {"tf": 1.7320508075688772}, "sqlglot.expressions.Into": {"tf": 1.7320508075688772}, "sqlglot.expressions.From": {"tf": 1.7320508075688772}, "sqlglot.expressions.Having": {"tf": 1.7320508075688772}, "sqlglot.expressions.Hint": {"tf": 1.7320508075688772}, "sqlglot.expressions.JoinHint": {"tf": 1.7320508075688772}, "sqlglot.expressions.Identifier": {"tf": 1.7320508075688772}, "sqlglot.expressions.Identifier.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.Index": {"tf": 1.7320508075688772}, "sqlglot.expressions.Insert": {"tf": 1.7320508075688772}, "sqlglot.expressions.Insert.with_": {"tf": 11.874342087037917}, "sqlglot.expressions.OnConflict": {"tf": 1.7320508075688772}, "sqlglot.expressions.Returning": {"tf": 1.7320508075688772}, "sqlglot.expressions.Introducer": {"tf": 1.7320508075688772}, "sqlglot.expressions.National": {"tf": 1.7320508075688772}, "sqlglot.expressions.LoadData": {"tf": 1.7320508075688772}, "sqlglot.expressions.Partition": {"tf": 1.7320508075688772}, "sqlglot.expressions.Fetch": {"tf": 1.7320508075688772}, "sqlglot.expressions.Group": {"tf": 1.7320508075688772}, "sqlglot.expressions.Lambda": {"tf": 1.7320508075688772}, "sqlglot.expressions.Limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal.number": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal.string": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.Join": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.on": {"tf": 11.135528725660043}, "sqlglot.expressions.Join.using": {"tf": 11.445523142259598}, "sqlglot.expressions.Lateral": {"tf": 1.7320508075688772}, "sqlglot.expressions.MatchRecognize": {"tf": 1.7320508075688772}, "sqlglot.expressions.Final": {"tf": 1.7320508075688772}, "sqlglot.expressions.Offset": {"tf": 1.7320508075688772}, "sqlglot.expressions.Order": {"tf": 1.7320508075688772}, "sqlglot.expressions.Cluster": {"tf": 1.7320508075688772}, "sqlglot.expressions.Distribute": {"tf": 1.7320508075688772}, "sqlglot.expressions.Sort": {"tf": 1.7320508075688772}, "sqlglot.expressions.Ordered": {"tf": 1.7320508075688772}, "sqlglot.expressions.Property": {"tf": 1.7320508075688772}, "sqlglot.expressions.AlgorithmProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.AutoIncrementProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.BlockCompressionProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.CharacterSetProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.ChecksumProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.CollateProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataBlocksizeProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.DefinerProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.DistKeyProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.DistStyleProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.EngineProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.ExecuteAsProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.ExternalProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.FallbackProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.FileFormatProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.FreespaceProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.InputOutputFormat": {"tf": 1.7320508075688772}, "sqlglot.expressions.IsolatedLoadingProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.JournalProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.LanguageProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.DictProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.DictSubProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.DictRange": {"tf": 1.7320508075688772}, "sqlglot.expressions.LikeProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.LocationProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.LockingProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.LogProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.MaterializedProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.MergeBlockRatioProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.NoPrimaryIndexProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.OnCommitProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.PartitionedByProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.ReturnsProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.RowFormatProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.RowFormatDelimitedProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.RowFormatSerdeProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.SchemaCommentProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.SerdeProperties": {"tf": 1.7320508075688772}, "sqlglot.expressions.SetProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.SettingsProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.SortKeyProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.SqlSecurityProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.StabilityProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.TemporaryProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.TransientProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.VolatileProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.WithDataProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.WithJournalTableProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.from_dict": {"tf": 1.7320508075688772}, "sqlglot.expressions.Qualify": {"tf": 1.7320508075688772}, "sqlglot.expressions.Return": {"tf": 1.7320508075688772}, "sqlglot.expressions.Reference": {"tf": 1.7320508075688772}, "sqlglot.expressions.Tuple": {"tf": 1.7320508075688772}, "sqlglot.expressions.Tuple.isin": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 11.832159566199232}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.with_": {"tf": 12.449899597988733}, "sqlglot.expressions.Table": {"tf": 1.7320508075688772}, "sqlglot.expressions.Table.parts": {"tf": 1.7320508075688772}, "sqlglot.expressions.SystemTime": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 10.816653826391969}, "sqlglot.expressions.Union.select": {"tf": 10.723805294763608}, "sqlglot.expressions.Union.is_star": {"tf": 1.7320508075688772}, "sqlglot.expressions.Except": {"tf": 1.7320508075688772}, "sqlglot.expressions.Intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unnest": {"tf": 1.7320508075688772}, "sqlglot.expressions.Update": {"tf": 1.7320508075688772}, "sqlglot.expressions.Values": {"tf": 1.7320508075688772}, "sqlglot.expressions.Var": {"tf": 1.7320508075688772}, "sqlglot.expressions.Schema": {"tf": 1.7320508075688772}, "sqlglot.expressions.Lock": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.from_": {"tf": 10.488088481701515}, "sqlglot.expressions.Select.group_by": {"tf": 11.704699910719626}, "sqlglot.expressions.Select.order_by": {"tf": 11.445523142259598}, "sqlglot.expressions.Select.sort_by": {"tf": 11.874342087037917}, "sqlglot.expressions.Select.cluster_by": {"tf": 11.874342087037917}, "sqlglot.expressions.Select.limit": {"tf": 10.954451150103322}, "sqlglot.expressions.Select.offset": {"tf": 10.954451150103322}, "sqlglot.expressions.Select.select": {"tf": 10.246950765959598}, "sqlglot.expressions.Select.lateral": {"tf": 11.180339887498949}, "sqlglot.expressions.Select.join": {"tf": 18.894443627691185}, "sqlglot.expressions.Select.where": {"tf": 11.180339887498949}, "sqlglot.expressions.Select.having": {"tf": 12.041594578792296}, "sqlglot.expressions.Select.window": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.qualify": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.distinct": {"tf": 10.14889156509222}, "sqlglot.expressions.Select.ctas": {"tf": 11.135528725660043}, "sqlglot.expressions.Select.lock": {"tf": 14.696938456699069}, "sqlglot.expressions.Select.is_star": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subquery.unnest": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subquery.is_star": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subquery.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.TableSample": {"tf": 1.7320508075688772}, "sqlglot.expressions.Tag": {"tf": 2.23606797749979}, "sqlglot.expressions.Pivot": {"tf": 1.7320508075688772}, "sqlglot.expressions.Window": {"tf": 1.7320508075688772}, "sqlglot.expressions.WindowSpec": {"tf": 1.7320508075688772}, "sqlglot.expressions.Where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Star": {"tf": 1.7320508075688772}, "sqlglot.expressions.Star.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.Parameter": {"tf": 1.7320508075688772}, "sqlglot.expressions.SessionParameter": {"tf": 1.7320508075688772}, "sqlglot.expressions.Placeholder": {"tf": 1.7320508075688772}, "sqlglot.expressions.Null": {"tf": 1.7320508075688772}, "sqlglot.expressions.Boolean": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataTypeSize": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.BIT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.INET": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.INT128": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.INT256": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.UINT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.UINT128": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.UINT256": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.build": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.is_type": {"tf": 1.7320508075688772}, "sqlglot.expressions.PseudoType": {"tf": 1.7320508075688772}, "sqlglot.expressions.SubqueryPredicate": {"tf": 1.7320508075688772}, "sqlglot.expressions.All": {"tf": 1.7320508075688772}, "sqlglot.expressions.Any": {"tf": 1.7320508075688772}, "sqlglot.expressions.Exists": {"tf": 1.7320508075688772}, "sqlglot.expressions.Command": {"tf": 1.7320508075688772}, "sqlglot.expressions.Transaction": {"tf": 1.7320508075688772}, "sqlglot.expressions.Commit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Rollback": {"tf": 1.7320508075688772}, "sqlglot.expressions.AlterTable": {"tf": 1.7320508075688772}, "sqlglot.expressions.AddConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.DropPartition": {"tf": 1.7320508075688772}, "sqlglot.expressions.Binary": {"tf": 1.7320508075688772}, "sqlglot.expressions.Add": {"tf": 1.7320508075688772}, "sqlglot.expressions.Connector": {"tf": 1.7320508075688772}, "sqlglot.expressions.And": {"tf": 1.7320508075688772}, "sqlglot.expressions.Or": {"tf": 1.7320508075688772}, "sqlglot.expressions.BitwiseAnd": {"tf": 1.7320508075688772}, "sqlglot.expressions.BitwiseLeftShift": {"tf": 1.7320508075688772}, "sqlglot.expressions.BitwiseOr": {"tf": 1.7320508075688772}, "sqlglot.expressions.BitwiseRightShift": {"tf": 1.7320508075688772}, "sqlglot.expressions.BitwiseXor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Div": {"tf": 1.7320508075688772}, "sqlglot.expressions.Overlaps": {"tf": 1.7320508075688772}, "sqlglot.expressions.Dot": {"tf": 1.7320508075688772}, "sqlglot.expressions.Dot.build": {"tf": 1.7320508075688772}, "sqlglot.expressions.DPipe": {"tf": 1.7320508075688772}, "sqlglot.expressions.EQ": {"tf": 1.7320508075688772}, "sqlglot.expressions.NullSafeEQ": {"tf": 1.7320508075688772}, "sqlglot.expressions.NullSafeNEQ": {"tf": 1.7320508075688772}, "sqlglot.expressions.Distance": {"tf": 1.7320508075688772}, "sqlglot.expressions.Escape": {"tf": 1.7320508075688772}, "sqlglot.expressions.Glob": {"tf": 1.7320508075688772}, "sqlglot.expressions.GT": {"tf": 1.7320508075688772}, "sqlglot.expressions.GTE": {"tf": 1.7320508075688772}, "sqlglot.expressions.ILike": {"tf": 1.7320508075688772}, "sqlglot.expressions.ILikeAny": {"tf": 1.7320508075688772}, "sqlglot.expressions.IntDiv": {"tf": 1.7320508075688772}, "sqlglot.expressions.Is": {"tf": 1.7320508075688772}, "sqlglot.expressions.Kwarg": {"tf": 1.7320508075688772}, "sqlglot.expressions.Like": {"tf": 1.7320508075688772}, "sqlglot.expressions.LikeAny": {"tf": 1.7320508075688772}, "sqlglot.expressions.LT": {"tf": 1.7320508075688772}, "sqlglot.expressions.LTE": {"tf": 1.7320508075688772}, "sqlglot.expressions.Mod": {"tf": 1.7320508075688772}, "sqlglot.expressions.Mul": {"tf": 1.7320508075688772}, "sqlglot.expressions.NEQ": {"tf": 1.7320508075688772}, "sqlglot.expressions.SimilarTo": {"tf": 1.7320508075688772}, "sqlglot.expressions.Slice": {"tf": 1.7320508075688772}, "sqlglot.expressions.Sub": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayOverlaps": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unary": {"tf": 1.7320508075688772}, "sqlglot.expressions.BitwiseNot": {"tf": 1.7320508075688772}, "sqlglot.expressions.Not": {"tf": 1.7320508075688772}, "sqlglot.expressions.Paren": {"tf": 1.7320508075688772}, "sqlglot.expressions.Neg": {"tf": 1.7320508075688772}, "sqlglot.expressions.Alias": {"tf": 1.7320508075688772}, "sqlglot.expressions.Alias.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.Aliases": {"tf": 1.7320508075688772}, "sqlglot.expressions.AtTimeZone": {"tf": 1.7320508075688772}, "sqlglot.expressions.Between": {"tf": 1.7320508075688772}, "sqlglot.expressions.Bracket": {"tf": 1.7320508075688772}, "sqlglot.expressions.Distinct": {"tf": 1.7320508075688772}, "sqlglot.expressions.In": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeUnit": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1.7320508075688772}, "sqlglot.expressions.Interval": {"tf": 1.7320508075688772}, "sqlglot.expressions.IgnoreNulls": {"tf": 1.7320508075688772}, "sqlglot.expressions.RespectNulls": {"tf": 1.7320508075688772}, "sqlglot.expressions.Func": {"tf": 4.69041575982343}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1.7320508075688772}, "sqlglot.expressions.Func.sql_names": {"tf": 1.7320508075688772}, "sqlglot.expressions.Func.sql_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 1.7320508075688772}, "sqlglot.expressions.AggFunc": {"tf": 1.7320508075688772}, "sqlglot.expressions.ParameterizedAgg": {"tf": 1.7320508075688772}, "sqlglot.expressions.Abs": {"tf": 1.7320508075688772}, "sqlglot.expressions.Anonymous": {"tf": 1.7320508075688772}, "sqlglot.expressions.Hll": {"tf": 1.7320508075688772}, "sqlglot.expressions.ApproxDistinct": {"tf": 1.7320508075688772}, "sqlglot.expressions.Array": {"tf": 1.7320508075688772}, "sqlglot.expressions.ToChar": {"tf": 1.7320508075688772}, "sqlglot.expressions.GenerateSeries": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayAgg": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayAll": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayAny": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayConcat": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayContains": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayContained": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayFilter": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayJoin": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArraySize": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArraySort": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArraySum": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayUnionAgg": {"tf": 1.7320508075688772}, "sqlglot.expressions.Avg": {"tf": 1.7320508075688772}, "sqlglot.expressions.AnyValue": {"tf": 1.7320508075688772}, "sqlglot.expressions.Case": {"tf": 1.7320508075688772}, "sqlglot.expressions.Case.when": {"tf": 1.7320508075688772}, "sqlglot.expressions.Case.else_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Cast": {"tf": 1.7320508075688772}, "sqlglot.expressions.Cast.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.Cast.is_type": {"tf": 1.7320508075688772}, "sqlglot.expressions.CastToStrType": {"tf": 1.7320508075688772}, "sqlglot.expressions.Collate": {"tf": 1.7320508075688772}, "sqlglot.expressions.TryCast": {"tf": 1.7320508075688772}, "sqlglot.expressions.Ceil": {"tf": 1.7320508075688772}, "sqlglot.expressions.Coalesce": {"tf": 1.7320508075688772}, "sqlglot.expressions.Concat": {"tf": 1.7320508075688772}, "sqlglot.expressions.ConcatWs": {"tf": 1.7320508075688772}, "sqlglot.expressions.Count": {"tf": 1.7320508075688772}, "sqlglot.expressions.CountIf": {"tf": 1.7320508075688772}, "sqlglot.expressions.CurrentDate": {"tf": 1.7320508075688772}, "sqlglot.expressions.CurrentDatetime": {"tf": 1.7320508075688772}, "sqlglot.expressions.CurrentTime": {"tf": 1.7320508075688772}, "sqlglot.expressions.CurrentTimestamp": {"tf": 1.7320508075688772}, "sqlglot.expressions.CurrentUser": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateAdd": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateSub": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateDiff": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateTrunc": {"tf": 1.7320508075688772}, "sqlglot.expressions.DatetimeAdd": {"tf": 1.7320508075688772}, "sqlglot.expressions.DatetimeSub": {"tf": 1.7320508075688772}, "sqlglot.expressions.DatetimeDiff": {"tf": 1.7320508075688772}, "sqlglot.expressions.DatetimeTrunc": {"tf": 1.7320508075688772}, "sqlglot.expressions.DayOfWeek": {"tf": 1.7320508075688772}, "sqlglot.expressions.DayOfMonth": {"tf": 1.7320508075688772}, "sqlglot.expressions.DayOfYear": {"tf": 1.7320508075688772}, "sqlglot.expressions.WeekOfYear": {"tf": 1.7320508075688772}, "sqlglot.expressions.LastDateOfMonth": {"tf": 1.7320508075688772}, "sqlglot.expressions.Extract": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimestampAdd": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimestampSub": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimestampDiff": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimestampTrunc": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeAdd": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeSub": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeDiff": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeTrunc": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateFromParts": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateStrToDate": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateToDateStr": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateToDi": {"tf": 1.7320508075688772}, "sqlglot.expressions.Day": {"tf": 1.7320508075688772}, "sqlglot.expressions.Decode": {"tf": 1.7320508075688772}, "sqlglot.expressions.DiToDate": {"tf": 1.7320508075688772}, "sqlglot.expressions.Encode": {"tf": 1.7320508075688772}, "sqlglot.expressions.Exp": {"tf": 1.7320508075688772}, "sqlglot.expressions.Explode": {"tf": 1.7320508075688772}, "sqlglot.expressions.Floor": {"tf": 1.7320508075688772}, "sqlglot.expressions.FromBase64": {"tf": 1.7320508075688772}, "sqlglot.expressions.ToBase64": {"tf": 1.7320508075688772}, "sqlglot.expressions.Greatest": {"tf": 1.7320508075688772}, "sqlglot.expressions.GroupConcat": {"tf": 1.7320508075688772}, "sqlglot.expressions.Hex": {"tf": 1.7320508075688772}, "sqlglot.expressions.If": {"tf": 1.7320508075688772}, "sqlglot.expressions.IfNull": {"tf": 1.7320508075688772}, "sqlglot.expressions.Initcap": {"tf": 1.7320508075688772}, "sqlglot.expressions.JSONKeyValue": {"tf": 1.7320508075688772}, "sqlglot.expressions.JSONObject": {"tf": 1.7320508075688772}, "sqlglot.expressions.OpenJSONColumnDef": {"tf": 1.7320508075688772}, "sqlglot.expressions.OpenJSON": {"tf": 1.7320508075688772}, "sqlglot.expressions.JSONBContains": {"tf": 1.7320508075688772}, "sqlglot.expressions.JSONExtract": {"tf": 1.7320508075688772}, "sqlglot.expressions.JSONExtractScalar": {"tf": 1.7320508075688772}, "sqlglot.expressions.JSONBExtract": {"tf": 1.7320508075688772}, "sqlglot.expressions.JSONBExtractScalar": {"tf": 1.7320508075688772}, "sqlglot.expressions.JSONFormat": {"tf": 1.7320508075688772}, "sqlglot.expressions.Least": {"tf": 1.7320508075688772}, "sqlglot.expressions.Left": {"tf": 1.7320508075688772}, "sqlglot.expressions.Right": {"tf": 1.7320508075688772}, "sqlglot.expressions.Length": {"tf": 1.7320508075688772}, "sqlglot.expressions.Levenshtein": {"tf": 1.7320508075688772}, "sqlglot.expressions.Ln": {"tf": 1.7320508075688772}, "sqlglot.expressions.Log": {"tf": 1.7320508075688772}, "sqlglot.expressions.Log2": {"tf": 1.7320508075688772}, "sqlglot.expressions.Log10": {"tf": 1.7320508075688772}, "sqlglot.expressions.LogicalOr": {"tf": 1.7320508075688772}, "sqlglot.expressions.LogicalAnd": {"tf": 1.7320508075688772}, "sqlglot.expressions.Lower": {"tf": 1.7320508075688772}, "sqlglot.expressions.Map": {"tf": 1.7320508075688772}, "sqlglot.expressions.StarMap": {"tf": 1.7320508075688772}, "sqlglot.expressions.VarMap": {"tf": 1.7320508075688772}, "sqlglot.expressions.MatchAgainst": {"tf": 1.7320508075688772}, "sqlglot.expressions.Max": {"tf": 1.7320508075688772}, "sqlglot.expressions.MD5": {"tf": 1.7320508075688772}, "sqlglot.expressions.Min": {"tf": 1.7320508075688772}, "sqlglot.expressions.Month": {"tf": 1.7320508075688772}, "sqlglot.expressions.Nvl2": {"tf": 1.7320508075688772}, "sqlglot.expressions.Posexplode": {"tf": 1.7320508075688772}, "sqlglot.expressions.Pow": {"tf": 1.7320508075688772}, "sqlglot.expressions.PercentileCont": {"tf": 1.7320508075688772}, "sqlglot.expressions.PercentileDisc": {"tf": 1.7320508075688772}, "sqlglot.expressions.Quantile": {"tf": 1.7320508075688772}, "sqlglot.expressions.ApproxQuantile": {"tf": 1.7320508075688772}, "sqlglot.expressions.RangeN": {"tf": 1.7320508075688772}, "sqlglot.expressions.ReadCSV": {"tf": 1.7320508075688772}, "sqlglot.expressions.Reduce": {"tf": 1.7320508075688772}, "sqlglot.expressions.RegexpExtract": {"tf": 1.7320508075688772}, "sqlglot.expressions.RegexpLike": {"tf": 1.7320508075688772}, "sqlglot.expressions.RegexpILike": {"tf": 1.7320508075688772}, "sqlglot.expressions.RegexpSplit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Repeat": {"tf": 1.7320508075688772}, "sqlglot.expressions.Round": {"tf": 1.7320508075688772}, "sqlglot.expressions.RowNumber": {"tf": 1.7320508075688772}, "sqlglot.expressions.SafeDivide": {"tf": 1.7320508075688772}, "sqlglot.expressions.SetAgg": {"tf": 1.7320508075688772}, "sqlglot.expressions.SHA": {"tf": 1.7320508075688772}, "sqlglot.expressions.SHA2": {"tf": 1.7320508075688772}, "sqlglot.expressions.SortArray": {"tf": 1.7320508075688772}, "sqlglot.expressions.Split": {"tf": 1.7320508075688772}, "sqlglot.expressions.Substring": {"tf": 1.7320508075688772}, "sqlglot.expressions.StandardHash": {"tf": 1.7320508075688772}, "sqlglot.expressions.StrPosition": {"tf": 1.7320508075688772}, "sqlglot.expressions.StrToDate": {"tf": 1.7320508075688772}, "sqlglot.expressions.StrToTime": {"tf": 1.7320508075688772}, "sqlglot.expressions.StrToUnix": {"tf": 1.7320508075688772}, "sqlglot.expressions.NumberToStr": {"tf": 1.7320508075688772}, "sqlglot.expressions.Struct": {"tf": 1.7320508075688772}, "sqlglot.expressions.StructExtract": {"tf": 1.7320508075688772}, "sqlglot.expressions.Sum": {"tf": 1.7320508075688772}, "sqlglot.expressions.Sqrt": {"tf": 1.7320508075688772}, "sqlglot.expressions.Stddev": {"tf": 1.7320508075688772}, "sqlglot.expressions.StddevPop": {"tf": 1.7320508075688772}, "sqlglot.expressions.StddevSamp": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeToStr": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeToTimeStr": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeToUnix": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeStrToDate": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeStrToTime": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeStrToUnix": {"tf": 1.7320508075688772}, "sqlglot.expressions.Trim": {"tf": 1.7320508075688772}, "sqlglot.expressions.TsOrDsAdd": {"tf": 1.7320508075688772}, "sqlglot.expressions.TsOrDsToDateStr": {"tf": 1.7320508075688772}, "sqlglot.expressions.TsOrDsToDate": {"tf": 1.7320508075688772}, "sqlglot.expressions.TsOrDiToDi": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unhex": {"tf": 1.7320508075688772}, "sqlglot.expressions.UnixToStr": {"tf": 1.7320508075688772}, "sqlglot.expressions.UnixToTime": {"tf": 1.7320508075688772}, "sqlglot.expressions.UnixToTimeStr": {"tf": 1.7320508075688772}, "sqlglot.expressions.Upper": {"tf": 1.7320508075688772}, "sqlglot.expressions.Variance": {"tf": 1.7320508075688772}, "sqlglot.expressions.VariancePop": {"tf": 1.7320508075688772}, "sqlglot.expressions.Week": {"tf": 1.7320508075688772}, "sqlglot.expressions.XMLTable": {"tf": 1.7320508075688772}, "sqlglot.expressions.Year": {"tf": 1.7320508075688772}, "sqlglot.expressions.Use": {"tf": 1.7320508075688772}, "sqlglot.expressions.Merge": {"tf": 1.7320508075688772}, "sqlglot.expressions.When": {"tf": 1.7320508075688772}, "sqlglot.expressions.NextValueFor": {"tf": 1.7320508075688772}, "sqlglot.expressions.maybe_parse": {"tf": 10.14889156509222}, "sqlglot.expressions.union": {"tf": 10}, "sqlglot.expressions.intersect": {"tf": 10}, "sqlglot.expressions.except_": {"tf": 10.04987562112089}, "sqlglot.expressions.select": {"tf": 9.797958971132712}, "sqlglot.expressions.from_": {"tf": 9.797958971132712}, "sqlglot.expressions.update": {"tf": 12}, "sqlglot.expressions.delete": {"tf": 9.433981132056603}, "sqlglot.expressions.insert": {"tf": 10.04987562112089}, "sqlglot.expressions.condition": {"tf": 14.071247279470288}, "sqlglot.expressions.and_": {"tf": 10.04987562112089}, "sqlglot.expressions.or_": {"tf": 10.04987562112089}, "sqlglot.expressions.not_": {"tf": 9}, "sqlglot.expressions.paren": {"tf": 8.48528137423857}, "sqlglot.expressions.to_identifier": {"tf": 5.744562646538029}, "sqlglot.expressions.to_interval": {"tf": 1.7320508075688772}, "sqlglot.expressions.to_table": {"tf": 6.244997998398398}, "sqlglot.expressions.to_column": {"tf": 5.291502622129181}, "sqlglot.expressions.alias_": {"tf": 12.84523257866513}, "sqlglot.expressions.subquery": {"tf": 10}, "sqlglot.expressions.column": {"tf": 6.557438524302}, "sqlglot.expressions.cast": {"tf": 8.888194417315589}, "sqlglot.expressions.table_": {"tf": 6.557438524302}, "sqlglot.expressions.values": {"tf": 8.888194417315589}, "sqlglot.expressions.var": {"tf": 10.488088481701515}, "sqlglot.expressions.rename_table": {"tf": 4.898979485566356}, "sqlglot.expressions.convert": {"tf": 5.744562646538029}, "sqlglot.expressions.replace_children": {"tf": 2}, "sqlglot.expressions.column_table_names": {"tf": 8.54400374531753}, "sqlglot.expressions.table_name": {"tf": 9.695359714832659}, "sqlglot.expressions.replace_tables": {"tf": 10.344080432788601}, "sqlglot.expressions.replace_placeholders": {"tf": 12.489995996796797}, "sqlglot.expressions.expand": {"tf": 14.247806848775006}, "sqlglot.expressions.func": {"tf": 12.884098726725126}, "sqlglot.expressions.true": {"tf": 1.7320508075688772}, "sqlglot.expressions.false": {"tf": 1.7320508075688772}, "sqlglot.expressions.null": {"tf": 1.7320508075688772}, "sqlglot.generator": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 13.638181696985855}, "sqlglot.generator.Generator.__init__": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.generate": {"tf": 3}, "sqlglot.generator.Generator.unsupported": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.sep": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.seg": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.pad_comment": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.wrap": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.no_identify": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.normalize_func": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.indent": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.cache_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.column_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.create_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.clone_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.describe_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.with_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.cte_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.directory_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.delete_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.drop_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.except_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.except_op": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.filter_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.hint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.index_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.national_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.partition_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.properties_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.root_properties": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.properties": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.with_properties": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.locate_properties": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.property_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.insert_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.intersect_op": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.returning_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.table_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.update_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.values_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.var_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.into_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.from_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.group_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.having_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.join_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.limit_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.offset_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.set_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.lock_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.literal_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.null_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.order_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.sort_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.select_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.schema_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.star_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.union_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.union_op": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.where_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.window_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.between_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.all_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.any_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.exists_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.case_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.extract_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.trim_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.concat_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.check_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.if_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.in_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.interval_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.return_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.reference_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.paren_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.neg_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.not_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.alias_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.add_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.and_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.connector_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.cast_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.collate_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.command_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.comment_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.commit_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.div_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.distance_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.dot_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.eq_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.escape_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.glob_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.gt_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.gte_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.is_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.like_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.lt_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.lte_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.mod_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.mul_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.neq_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.or_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.slice_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.sub_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.use_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.binary": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.func": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.format_args": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.text_width": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.format_time": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.expressions": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.op_expressions": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.naked_property": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.set_operation": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.tag_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.token_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.when_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.merge_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.cached_generator": {"tf": 1.7320508075688772}, "sqlglot.helper": {"tf": 1.7320508075688772}, "sqlglot.helper.AutoName": {"tf": 2.449489742783178}, "sqlglot.helper.seq_get": {"tf": 3.3166247903554}, "sqlglot.helper.ensure_list": {"tf": 4.795831523312719}, "sqlglot.helper.ensure_collection": {"tf": 5.196152422706632}, "sqlglot.helper.csv": {"tf": 5.291502622129181}, "sqlglot.helper.subclasses": {"tf": 5.744562646538029}, "sqlglot.helper.apply_index_offset": {"tf": 5.830951894845301}, "sqlglot.helper.camel_to_snake_case": {"tf": 2.23606797749979}, "sqlglot.helper.while_changing": {"tf": 5.291502622129181}, "sqlglot.helper.tsort": {"tf": 4.795831523312719}, "sqlglot.helper.open_file": {"tf": 1.7320508075688772}, "sqlglot.helper.csv_reader": {"tf": 5.477225575051661}, "sqlglot.helper.find_new_name": {"tf": 5.291502622129181}, "sqlglot.helper.name_sequence": {"tf": 1.7320508075688772}, "sqlglot.helper.object_to_dict": {"tf": 1.7320508075688772}, "sqlglot.helper.split_num_words": {"tf": 12.649110640673518}, "sqlglot.helper.is_iterable": {"tf": 8.94427190999916}, "sqlglot.helper.flatten": {"tf": 11.224972160321824}, "sqlglot.helper.dict_depth": {"tf": 11.489125293076057}, "sqlglot.helper.first": {"tf": 2.449489742783178}, "sqlglot.helper.case_sensitive": {"tf": 1.7320508075688772}, "sqlglot.helper.should_identify": {"tf": 6.324555320336759}, "sqlglot.lineage": {"tf": 1.7320508075688772}, "sqlglot.lineage.Node": {"tf": 1.7320508075688772}, "sqlglot.lineage.Node.__init__": {"tf": 1.7320508075688772}, "sqlglot.lineage.Node.walk": {"tf": 1.7320508075688772}, "sqlglot.lineage.Node.to_html": {"tf": 1.7320508075688772}, "sqlglot.lineage.lineage": {"tf": 6.928203230275509}, "sqlglot.lineage.LineageHTML": {"tf": 2.6457513110645907}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1.7320508075688772}, "sqlglot.optimizer": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 13.2664991614216}, "sqlglot.optimizer.annotate_types.TypeAnnotator": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 4.123105625617661}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_ctes": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 10.198039027185569}, "sqlglot.optimizer.eliminate_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 10.344080432788601}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 4.58257569495584}, "sqlglot.optimizer.eliminate_subqueries": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 12.727922061357855}, "sqlglot.optimizer.isolate_table_selects": {"tf": 1.7320508075688772}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1.7320508075688772}, "sqlglot.optimizer.merge_subqueries": {"tf": 1.7320508075688772}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 11.180339887498949}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1.7320508075688772}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.normalize": {"tf": 10.535653752852738}, "sqlglot.optimizer.normalize.normalized": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 9.591663046625438}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 2.23606797749979}, "sqlglot.optimizer.normalize_identifiers": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 9.9498743710662}, "sqlglot.optimizer.optimize_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 7.874007874011811}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimizer": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimizer.optimize": {"tf": 7.745966692414834}, "sqlglot.optimizer.pushdown_predicates": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 10.14889156509222}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_projections": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 10.44030650891055}, "sqlglot.optimizer.qualify": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify.qualify": {"tf": 13.674794331177344}, "sqlglot.optimizer.qualify_columns": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 11.832159566199232}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 2}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 2.449489742783178}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 4.795831523312719}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 2}, "sqlglot.optimizer.qualify_tables": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 13.341664064126334}, "sqlglot.optimizer.scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.ScopeType": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope": {"tf": 9.539392014169456}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.find": {"tf": 5.744562646538029}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 5.656854249492381}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 5.477225575051661}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 4.47213595499958}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 4.47213595499958}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 3.4641016151377544}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 3.872983346207417}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 4}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 3.4641016151377544}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 3.1622776601683795}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 4.898979485566356}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 13.601470508735444}, "sqlglot.optimizer.scope.build_scope": {"tf": 4.69041575982343}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 5.385164807134504}, "sqlglot.optimizer.simplify": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.simplify": {"tf": 9.643650760992955}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 2.449489742783178}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 2}, "sqlglot.optimizer.simplify.flatten": {"tf": 2}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 2.6457513110645907}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 2.449489742783178}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 2.8284271247461903}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.simplify_parens": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.always_true": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.is_complement": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.is_false": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.is_null": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.date_literal": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1.7320508075688772}, "sqlglot.optimizer.unnest_subqueries": {"tf": 1.7320508075688772}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 9.899494936611665}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1.7320508075688772}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1.7320508075688772}, "sqlglot.parser": {"tf": 1.7320508075688772}, "sqlglot.parser.parse_var_map": {"tf": 1.7320508075688772}, "sqlglot.parser.parse_like": {"tf": 1.7320508075688772}, "sqlglot.parser.binary_range_parser": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser": {"tf": 6.082762530298219}, "sqlglot.parser.Parser.__init__": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.reset": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.parse": {"tf": 5.291502622129181}, "sqlglot.parser.Parser.parse_into": {"tf": 5.744562646538029}, "sqlglot.parser.Parser.check_errors": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.raise_error": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.expression": {"tf": 5.744562646538029}, "sqlglot.parser.Parser.validate_expression": {"tf": 4.358898943540674}, "sqlglot.planner": {"tf": 1.7320508075688772}, "sqlglot.planner.Plan": {"tf": 1.7320508075688772}, "sqlglot.planner.Plan.__init__": {"tf": 1.7320508075688772}, "sqlglot.planner.Step": {"tf": 1.7320508075688772}, "sqlglot.planner.Step.from_expression": {"tf": 7.681145747868608}, "sqlglot.planner.Step.add_dependency": {"tf": 1.7320508075688772}, "sqlglot.planner.Step.to_s": {"tf": 1.7320508075688772}, "sqlglot.planner.Scan": {"tf": 1.7320508075688772}, "sqlglot.planner.Scan.from_expression": {"tf": 7.681145747868608}, "sqlglot.planner.Join": {"tf": 1.7320508075688772}, "sqlglot.planner.Join.from_joins": {"tf": 1.7320508075688772}, "sqlglot.planner.Aggregate": {"tf": 1.7320508075688772}, "sqlglot.planner.Sort": {"tf": 1.7320508075688772}, "sqlglot.planner.SetOperation": {"tf": 1.7320508075688772}, "sqlglot.planner.SetOperation.__init__": {"tf": 1.7320508075688772}, "sqlglot.planner.SetOperation.from_expression": {"tf": 7.681145747868608}, "sqlglot.schema.Schema": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 5.291502622129181}, "sqlglot.schema.Schema.column_names": {"tf": 6.082762530298219}, "sqlglot.schema.Schema.get_column_type": {"tf": 6.082762530298219}, "sqlglot.schema.Schema.supported_table_args": {"tf": 2.449489742783178}, "sqlglot.schema.Schema.empty": {"tf": 1.7320508075688772}, "sqlglot.schema.AbstractMappingSchema": {"tf": 4}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1.7320508075688772}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1.7320508075688772}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1.7320508075688772}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema": {"tf": 7.3484692283495345}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.copy": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.add_table": {"tf": 5.291502622129181}, "sqlglot.schema.MappingSchema.column_names": {"tf": 6.082762530298219}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 6.082762530298219}, "sqlglot.schema.ensure_schema": {"tf": 1.7320508075688772}, "sqlglot.schema.ensure_column_mapping": {"tf": 1.7320508075688772}, "sqlglot.schema.flatten_schema": {"tf": 1.7320508075688772}, "sqlglot.schema.nested_get": {"tf": 5.830951894845301}, "sqlglot.schema.nested_set": {"tf": 12.649110640673518}, "sqlglot.serde": {"tf": 1.7320508075688772}, "sqlglot.serde.dump": {"tf": 1.7320508075688772}, "sqlglot.serde.load": {"tf": 2.23606797749979}, "sqlglot.time": {"tf": 1.7320508075688772}, "sqlglot.time.format_time": {"tf": 7.681145747868608}, "sqlglot.tokens": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DOT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DASH": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COLON": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.STAR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LTE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GTE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NOT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.EQ": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.AND": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.OR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.AMP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CARET": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.HASH": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LT_AT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DAMP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.STRING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DATABASE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.VAR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BIT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UINT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INT128": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INT256": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.JSON": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TIME": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DATE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UUID": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.XML": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INET": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ALL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ANY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ASC": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CASE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DESC": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DIV": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DROP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.END": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FOR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FROM": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FULL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.HINT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.IF": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.IN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INNER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INTO": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.IS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.KEEP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LOAD": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LOCK": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.MAP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.MOD": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NULL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ON": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.OVER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PRAGMA": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.RETURNING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ROW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SET": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SETTINGS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SOME": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TOP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.THEN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UNION": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.USE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.USING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.WITH": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1.7320508075688772}, "sqlglot.tokens.Token": {"tf": 1.7320508075688772}, "sqlglot.tokens.Token.__init__": {"tf": 6.244997998398398}, "sqlglot.tokens.Token.number": {"tf": 2.23606797749979}, "sqlglot.tokens.Token.string": {"tf": 2.23606797749979}, "sqlglot.tokens.Token.identifier": {"tf": 2.23606797749979}, "sqlglot.tokens.Token.var": {"tf": 2.23606797749979}, "sqlglot.tokens.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1.7320508075688772}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 2.23606797749979}, "sqlglot.transforms": {"tf": 1.7320508075688772}, "sqlglot.transforms.unalias_group": {"tf": 9.327379053088816}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 5.0990195135927845}, "sqlglot.transforms.eliminate_qualify": {"tf": 3.1622776601683795}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.7320508075688772}, "sqlglot.transforms.unnest_to_explode": {"tf": 2}, "sqlglot.transforms.explode_to_unnest": {"tf": 2}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1.7320508075688772}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1.7320508075688772}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1.7320508075688772}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 1.7320508075688772}, "sqlglot.transforms.preprocess": {"tf": 5}, "sqlglot.trie": {"tf": 1.7320508075688772}, "sqlglot.trie.new_trie": {"tf": 9.539392014169456}, "sqlglot.trie.in_trie": {"tf": 13.228756555322953}}, "df": 1843, "s": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 3.4641016151377544}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.7320508075688772}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1.4142135623730951}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}}, "df": 67, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 5.5677643628300215}, "sqlglot.pretty": {"tf": 1}, "sqlglot.parse": {"tf": 2.23606797749979}, "sqlglot.parse_one": {"tf": 2.23606797749979}, "sqlglot.transpile": {"tf": 2.449489742783178}, "sqlglot.dataframe": {"tf": 6.48074069840786}, "sqlglot.dialects": {"tf": 2.23606797749979}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 3.605551275463989}, "sqlglot.executor": {"tf": 6.324555320336759}, "sqlglot.executor.execute": {"tf": 2}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 2}, "sqlglot.expressions.maybe_parse": {"tf": 2.23606797749979}, "sqlglot.expressions.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.select": {"tf": 2}, "sqlglot.expressions.from_": {"tf": 2}, "sqlglot.expressions.update": {"tf": 2}, "sqlglot.expressions.delete": {"tf": 1.7320508075688772}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 2.23606797749979}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.paren": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_column": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.generate": {"tf": 1.7320508075688772}, "sqlglot.lineage.lineage": {"tf": 2}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}}, "df": 133, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 6.4031242374328485}, "sqlglot.schema": {"tf": 1}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 5}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects": {"tf": 3}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 4.242640687119285}, "sqlglot.expressions": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1.4142135623730951}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 2}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 2}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 2.23606797749979}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2.449489742783178}, "sqlglot.optimizer.normalize.normalize": {"tf": 2}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 2.23606797749979}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify.qualify": {"tf": 2}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 2}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 2}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.simplify": {"tf": 2.23606797749979}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2.23606797749979}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}}, "df": 76, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}, "o": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 3.4641016151377544}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}}, "df": 11, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 3.3166247903554}}, "df": 1}}}}}}, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}}, "df": 3}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "t": {"docs": {"sqlglot.helper.split_num_words": {"tf": 2.6457513110645907}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}}, "df": 5}, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 3.7416573867739413}, "sqlglot.executor.python.Python.Generator": {"tf": 3.7416573867739413}, "sqlglot.generator.Generator": {"tf": 3.7416573867739413}}, "df": 22}}, "c": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "y": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}}, "df": 3}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}}, "df": 3}}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 2.8284271247461903}, "sqlglot.executor": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}}, "df": 4}}}}}}}, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Func": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}}, "df": 2}}}}, "y": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}}, "x": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.parse": {"tf": 1.4142135623730951}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}}, "df": 61}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}}, "df": 1}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.not_": {"tf": 1.4142135623730951}}, "df": 1, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2, "d": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 7, "s": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.executor.env.null_if_any": {"tf": 1}}, "df": 2}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 2.23606797749979}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}}, "df": 7}}}}}}}, "b": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 3, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.expressions": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.subclasses": {"tf": 2}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "t": {"docs": {"sqlglot.expressions.Func": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}}, "df": 2}}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.diff": {"tf": 1}}, "df": 2}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.trie.in_trie": {"tf": 1.4142135623730951}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.7320508075688772}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 13}}}, "y": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 2.449489742783178}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 12, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.expressions.expand": {"tf": 1}}, "df": 1}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot": {"tf": 2.6457513110645907}}, "df": 1}}}, "m": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}}, "df": 5, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 5}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 3}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}}, "df": 6}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 1}}}}}}, "g": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 10, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}, "w": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}}, "df": 1, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 2.8284271247461903}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 15, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 12, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "w": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 2}, "sqlglot.diff": {"tf": 7.937253933193772}, "sqlglot.diff.diff": {"tf": 2.8284271247461903}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1.7320508075688772}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 2}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}}, "df": 24, "s": {"docs": {"sqlglot.expressions.expand": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}}, "df": 8}}}}}, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 2}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}}, "df": 2}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.tsort": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}}, "df": 4, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.8284271247461903}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 2.8284271247461903}, "sqlglot.generator.Generator": {"tf": 2.8284271247461903}, "sqlglot.helper.split_num_words": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}}, "df": 27, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.449489742783178}, "sqlglot.executor.python.Python.Generator": {"tf": 2.449489742783178}, "sqlglot.generator.Generator": {"tf": 2.449489742783178}}, "df": 22}}}}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 2.23606797749979}, "sqlglot.expressions.delete": {"tf": 2}, "sqlglot.expressions.insert": {"tf": 1.7320508075688772}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}}, "df": 18, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 2.6457513110645907}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}}, "df": 30}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}, "b": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 4.123105625617661}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 4.123105625617661}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 4.123105625617661}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 4.123105625617661}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 4.123105625617661}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 4.123105625617661}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 4.123105625617661}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 4.123105625617661}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 4.123105625617661}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 4.123105625617661}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 4.123105625617661}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 4.123105625617661}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 4.123105625617661}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 4.123105625617661}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 4.123105625617661}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 4.123105625617661}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 4.123105625617661}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 4.123105625617661}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 4.123105625617661}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 4.123105625617661}, "sqlglot.executor.python.Python.Generator": {"tf": 4.123105625617661}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 4.123105625617661}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 31, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}}}}}}, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 2}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 5, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 3.7416573867739413}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.parse": {"tf": 1.4142135623730951}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 2}, "sqlglot.dataframe": {"tf": 2.6457513110645907}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.449489742783178}, "sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 2.449489742783178}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.from_": {"tf": 1.7320508075688772}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1.7320508075688772}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 2.449489742783178}, "sqlglot.generator.Generator.generate": {"tf": 1.7320508075688772}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1.7320508075688772}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.string": {"tf": 1.4142135623730951}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 106, "s": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 25}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 2.6457513110645907}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 2}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}}, "df": 8, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 6}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 2}}}}, "s": {"docs": {"sqlglot.trie.in_trie": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1.7320508075688772}}, "df": 3}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}}, "df": 2}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.simplify.simplify": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 2.449489742783178}, "sqlglot.executor.execute": {"tf": 1}}, "df": 3}, "y": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 4.242640687119285}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 10}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 4}}}, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}}, "df": 4}}, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.generate": {"tf": 1}}, "df": 3}}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 6.164414002968976}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.executor": {"tf": 3.605551275463989}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.assert_is": {"tf": 2}, "sqlglot.expressions.Unionable.union": {"tf": 2}, "sqlglot.expressions.Unionable.intersect": {"tf": 2}, "sqlglot.expressions.Unionable.except_": {"tf": 2}, "sqlglot.expressions.Column.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Insert.with_": {"tf": 2}, "sqlglot.expressions.Literal.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 2.449489742783178}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2.23606797749979}, "sqlglot.expressions.Union.limit": {"tf": 2.23606797749979}, "sqlglot.expressions.Union.select": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.from_": {"tf": 2}, "sqlglot.expressions.Select.group_by": {"tf": 2}, "sqlglot.expressions.Select.order_by": {"tf": 2}, "sqlglot.expressions.Select.sort_by": {"tf": 2}, "sqlglot.expressions.Select.cluster_by": {"tf": 2}, "sqlglot.expressions.Select.limit": {"tf": 2}, "sqlglot.expressions.Select.offset": {"tf": 2}, "sqlglot.expressions.Select.select": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.lateral": {"tf": 2}, "sqlglot.expressions.Select.join": {"tf": 3.1622776601683795}, "sqlglot.expressions.Select.where": {"tf": 2}, "sqlglot.expressions.Select.having": {"tf": 2}, "sqlglot.expressions.Select.distinct": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.ctas": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.lock": {"tf": 2.449489742783178}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Alias.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Cast.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.union": {"tf": 2}, "sqlglot.expressions.intersect": {"tf": 2}, "sqlglot.expressions.except_": {"tf": 2}, "sqlglot.expressions.select": {"tf": 2.449489742783178}, "sqlglot.expressions.from_": {"tf": 2.23606797749979}, "sqlglot.expressions.condition": {"tf": 1.7320508075688772}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 2.23606797749979}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 3.1622776601683795}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 3}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2.6457513110645907}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 2}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 2}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 2.449489742783178}, "sqlglot.optimizer.scope.Scope": {"tf": 2.6457513110645907}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 2.449489742783178}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.7320508075688772}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 99, "s": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 4}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 9, "s": {"docs": {"sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.7320508075688772}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 3}}, "|": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 1}}}}}}}, "f": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 6}}, "e": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 5, "n": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 2}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "q": {"docs": {"sqlglot.helper.seq_get": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 8, "s": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 2, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}}}}}}, "r": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}}, "df": 2}}}}}}}}}}, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 5.916079783099616}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.7320508075688772}, "sqlglot.schema.nested_set": {"tf": 2}}, "df": 89, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "s": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}}, "df": 4}, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 3}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.schema.nested_set": {"tf": 2}}, "df": 2, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Expression.assert_is": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 2.449489742783178}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}}, "df": 22}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1.4142135623730951}}, "df": 9, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.find_new_name": {"tf": 1}}, "df": 1}, "d": {"docs": {"sqlglot.trie.in_trie": {"tf": 1}}, "df": 1}}}}}, "m": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {"sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.helper.csv": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 2}}}}}}}}, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.schema": {"tf": 1}, "sqlglot.dataframe": {"tf": 4.242640687119285}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1.7320508075688772}, "sqlglot.expressions.to_table": {"tf": 1.7320508075688772}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2.6457513110645907}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2.449489742783178}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.6457513110645907}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 2.449489742783178}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.Schema.empty": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 2}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}}, "df": 21, "s": {"docs": {"sqlglot.schema.Schema": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 3.605551275463989}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 4.242640687119285}}, "df": 1, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.build_scope": {"tf": 2}}, "df": 35, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 3.605551275463989}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 2.449489742783178}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 4}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "r": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}}, "df": 4}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}}, "df": 44, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 3}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22}}}}}}, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}}, "df": 2, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 8}}, "f": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}}, "df": 23}}, "w": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}}, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Expression.replace": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 4.795831523312719}, "sqlglot.executor": {"tf": 4.69041575982343}, "sqlglot.expressions.Expression": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot": {"tf": 3.605551275463989}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 3.4641016151377544}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2}, "sqlglot.diff": {"tf": 7.54983443527075}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.executor": {"tf": 4.58257569495584}, "sqlglot.executor.context.Context": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 2}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.assert_is": {"tf": 2}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 2.23606797749979}, "sqlglot.expressions.Literal.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2.23606797749979}, "sqlglot.expressions.Union.limit": {"tf": 2}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 2}, "sqlglot.expressions.Select.group_by": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.order_by": {"tf": 2}, "sqlglot.expressions.Select.sort_by": {"tf": 2}, "sqlglot.expressions.Select.cluster_by": {"tf": 2}, "sqlglot.expressions.Select.limit": {"tf": 2}, "sqlglot.expressions.Select.offset": {"tf": 2}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Star.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Alias.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.Cast.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 1.7320508075688772}, "sqlglot.expressions.union": {"tf": 2.23606797749979}, "sqlglot.expressions.intersect": {"tf": 2.23606797749979}, "sqlglot.expressions.except_": {"tf": 2.23606797749979}, "sqlglot.expressions.select": {"tf": 2.23606797749979}, "sqlglot.expressions.from_": {"tf": 2.23606797749979}, "sqlglot.expressions.condition": {"tf": 2.449489742783178}, "sqlglot.expressions.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.not_": {"tf": 1.7320508075688772}, "sqlglot.expressions.paren": {"tf": 1.7320508075688772}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_column": {"tf": 1.7320508075688772}, "sqlglot.expressions.alias_": {"tf": 2}, "sqlglot.expressions.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1.4142135623730951}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 2.23606797749979}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 2}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.empty": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 1.7320508075688772}, "sqlglot.trie.in_trie": {"tf": 2.23606797749979}}, "df": 165, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}, "n": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 3}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}}, "df": 1, "d": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}, "t": {"docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 3.7416573867739413}, "sqlglot.executor": {"tf": 4.898979485566356}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1.4142135623730951}, "sqlglot.helper.ensure_collection": {"tf": 1.4142135623730951}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}}, "df": 96, "e": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 3}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.is_iterable": {"tf": 2.23606797749979}, "sqlglot.helper.flatten": {"tf": 2}, "sqlglot.helper.first": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.helper.flatten": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 12, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 3}}}}, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "n": {"docs": {"sqlglot": {"tf": 4.123105625617661}, "sqlglot.schema": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 3.605551275463989}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.8284271247461903}, "sqlglot.diff": {"tf": 8.12403840463596}, "sqlglot.diff.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.ChangeDistiller": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 3.605551275463989}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 2.8284271247461903}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.bfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 2.8284271247461903}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.helper.tsort": {"tf": 1.4142135623730951}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_set": {"tf": 1.4142135623730951}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 2.6457513110645907}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.7320508075688772}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 2.449489742783178}}, "df": 168, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 2}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 2}, "sqlglot.expressions.from_": {"tf": 2}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 2}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1}}, "df": 45, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1.7320508075688772}}, "df": 1}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}}, "df": 45, "s": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1}}, "df": 1}, "d": {"docs": {"sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 2}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.dataframe": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.group_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.order_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.offset": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}}, "df": 59, "s": {"docs": {"sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 6}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 6, "d": {"docs": {"sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 11}}}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 2.8284271247461903}}, "df": 5, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 3}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 2}}}}}}, "t": {"6": {"4": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.dialects": {"tf": 2.23606797749979}}, "df": 2}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.expressions.cast": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 33, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}, "o": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 4.242640687119285}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.parse_into": {"tf": 1.7320508075688772}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}}, "df": 44}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_interval": {"tf": 1}}, "df": 2}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.generate": {"tf": 1}}, "df": 1}}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}}, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.func": {"tf": 1.7320508075688772}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "f": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Unionable.intersect": {"tf": 2}, "sqlglot.expressions.intersect": {"tf": 2}}, "df": 2}}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}}}}, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}}, "df": 5, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 2.6457513110645907}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "\u2019": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}, "d": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}}, "df": 3}}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}}, "df": 2}}}}}}}, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}}, "df": 6}}}}}, "s": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}}, "df": 4, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.helper.seq_get": {"tf": 1.4142135623730951}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}}, "df": 23}, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22}}}}}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 48}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.helper.is_iterable": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.condition": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}}, "df": 5}, "r": {"docs": {"sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}}, "df": 6, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 4.358898943540674}, "sqlglot.dataframe": {"tf": 4.47213595499958}, "sqlglot.dialects": {"tf": 2}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 42, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}}, "df": 3}}}, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}}, "df": 2}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 3.4641016151377544}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions": {"tf": 1}}, "df": 7, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 20, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}}, "df": 1}}}}}}}}}}, "d": {"1": {"docs": {"sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}}, "df": 1}, "2": {"docs": {"sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {"sqlglot": {"tf": 4.69041575982343}, "sqlglot.dataframe": {"tf": 3}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 6, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.23606797749979}, "sqlglot.diff": {"tf": 3.3166247903554}, "sqlglot.diff.diff": {"tf": 2}, "sqlglot.executor.python.Python.Generator": {"tf": 2.23606797749979}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_identifier": {"tf": 2}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 2.23606797749979}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1.4142135623730951}}, "df": 32, "s": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.23606797749979}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor.python.Python.Generator": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2.23606797749979}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 2}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}}, "df": 31}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.7320508075688772}}, "df": 2}}}, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 25, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.transpile": {"tf": 1}}, "df": 1}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.to_identifier": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 2}}, "s": {"docs": {"sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 3}}, "f": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 3}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 3}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 3}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 3}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 3}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 3}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 3}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 3}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 3}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 3}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 3}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 3}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 3}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 3}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 3}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 3}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 3}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 3}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 3}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 3}, "sqlglot.diff": {"tf": 4.69041575982343}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.env.null_if_any": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 3}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 2}, "sqlglot.expressions.Literal.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.using": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2}, "sqlglot.expressions.Union.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.from_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.group_by": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.order_by": {"tf": 2}, "sqlglot.expressions.Select.sort_by": {"tf": 2}, "sqlglot.expressions.Select.cluster_by": {"tf": 2}, "sqlglot.expressions.Select.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.offset": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.lateral": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.having": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Star.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Alias.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 2}, "sqlglot.expressions.intersect": {"tf": 2}, "sqlglot.expressions.except_": {"tf": 2}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 3}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1.7320508075688772}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 159}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}}}}}, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}}, "df": 2}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}, "\u2019": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "m": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.lineage.LineageHTML": {"tf": 1}}, "df": 1}}}}}}, "a": {"0": {"docs": {"sqlglot.helper.name_sequence": {"tf": 1}}, "df": 1}, "1": {"docs": {"sqlglot.helper.name_sequence": {"tf": 1}}, "df": 1}, "2": {"docs": {"sqlglot.helper.name_sequence": {"tf": 1}}, "df": 1}, "docs": {"sqlglot": {"tf": 7.681145747868608}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 3.1622776601683795}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 2.449489742783178}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 3}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 3}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 3}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 3}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 3}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 3}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 3}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 3}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 3}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 3}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 3}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 3}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 3}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 3}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 3}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 3}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 3}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 3}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 3}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 3}, "sqlglot.diff": {"tf": 8.94427190999916}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.diff": {"tf": 2.449489742783178}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.executor": {"tf": 8.660254037844387}, "sqlglot.executor.execute": {"tf": 1.7320508075688772}, "sqlglot.executor.env.null_if_any": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 3}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 2.6457513110645907}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.is_string": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.is_number": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.transform": {"tf": 2}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 2.449489742783178}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 2}, "sqlglot.expressions.Select.order_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 2}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Star.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Dot.build": {"tf": 1.4142135623730951}, "sqlglot.expressions.Alias.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.7320508075688772}, "sqlglot.expressions.Cast.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.maybe_parse": {"tf": 2.23606797749979}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 2}, "sqlglot.expressions.from_": {"tf": 2.23606797749979}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.7320508075688772}, "sqlglot.expressions.condition": {"tf": 1.7320508075688772}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 2.23606797749979}, "sqlglot.expressions.to_column": {"tf": 2}, "sqlglot.expressions.alias_": {"tf": 2}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1.7320508075688772}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1.7320508075688772}, "sqlglot.expressions.table_name": {"tf": 2}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.expressions.false": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 3}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.cached_generator": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 2.449489742783178}, "sqlglot.helper.ensure_collection": {"tf": 2.23606797749979}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.7320508075688772}, "sqlglot.helper.while_changing": {"tf": 1.7320508075688772}, "sqlglot.helper.tsort": {"tf": 1.4142135623730951}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1.7320508075688772}, "sqlglot.helper.find_new_name": {"tf": 1.4142135623730951}, "sqlglot.helper.name_sequence": {"tf": 1.7320508075688772}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 2}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 2}, "sqlglot.helper.should_identify": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 2}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 2}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2.8284271247461903}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 2}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 2.8284271247461903}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2.6457513110645907}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 3.1622776601683795}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 2.8284271247461903}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 2.6457513110645907}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 2}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 3}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify.flatten": {"tf": 2}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 2}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 4.242640687119285}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 3.4641016151377544}, "sqlglot.parser.Parser": {"tf": 2.23606797749979}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 3.4641016151377544}, "sqlglot.planner.Scan.from_expression": {"tf": 3.4641016151377544}, "sqlglot.planner.SetOperation.from_expression": {"tf": 3.4641016151377544}, "sqlglot.schema.Schema.add_table": {"tf": 1.7320508075688772}, "sqlglot.schema.Schema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 2}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_get": {"tf": 1.7320508075688772}, "sqlglot.schema.nested_set": {"tf": 1.4142135623730951}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.7320508075688772}, "sqlglot.trie.new_trie": {"tf": 2.6457513110645907}, "sqlglot.trie.in_trie": {"tf": 2.449489742783178}}, "df": 239, "n": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2}, "sqlglot.dialects.dialect.Dialects": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2}, "sqlglot.diff": {"tf": 3}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.executor": {"tf": 2.449489742783178}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 2}, "sqlglot.expressions.Expression": {"tf": 2.6457513110645907}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 2}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.7320508075688772}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_identifier": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 2}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1.7320508075688772}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.tokens.TokenType": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 133, "d": {"docs": {"sqlglot": {"tf": 5.196152422706632}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 3.1622776601683795}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 7.0710678118654755}, "sqlglot.diff.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.executor": {"tf": 7.0710678118654755}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 2}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 2.449489742783178}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 2.6457513110645907}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.flatten": {"tf": 2}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 2.449489742783178}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 2.8284271247461903}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 120}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 13}}}}, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.func": {"tf": 1}}, "df": 1}}}}}}}, "y": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}}, "df": 28, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}}, "df": 3, "d": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.7320508075688772}}, "df": 1}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}, "l": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 16}}, "l": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}, "sqlglot.executor": {"tf": 2.8284271247461903}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 2}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 70, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}}, "df": 2}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "/": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 2.23606797749979}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 2.8284271247461903}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.table_": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}}, "df": 59, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 27}, "d": {"docs": {"sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 6}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1.4142135623730951}, "sqlglot.helper.find_new_name": {"tf": 1}}, "df": 3, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.helper.should_identify": {"tf": 1.4142135623730951}}, "df": 25}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "g": {"docs": {"sqlglot.parser.Parser.expression": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "g": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.diff": {"tf": 5}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 4}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2.8284271247461903}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.449489742783178}, "sqlglot.diff": {"tf": 4.47213595499958}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 3.605551275463989}, "sqlglot.executor.env.null_if_any": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 2.449489742783178}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2.449489742783178}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 2.23606797749979}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 80, "n": {"docs": {"sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}}, "df": 1}}, "g": {"docs": {"sqlglot.expressions.Expression": {"tf": 2.6457513110645907}, "sqlglot.expressions.Expression.append": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.set": {"tf": 2}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 7, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1}, "sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.7320508075688772}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 12, "s": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.csv": {"tf": 2}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.validate_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 189}}}}}}, "s": {"docs": {"sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}}, "df": 12}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 20}, "[": {"0": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 20}, "1": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 20}, "docs": {}, "df": 0}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 3}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Tag": {"tf": 1}}, "df": 2}}}}}}}}, "s": {"docs": {"sqlglot": {"tf": 4.47213595499958}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 3}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 2.23606797749979}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 4}, "sqlglot.executor": {"tf": 4.242640687119285}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 2.6457513110645907}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2.6457513110645907}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 2}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 2}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 2.23606797749979}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1.4142135623730951}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2.23606797749979}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 3}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 2.8284271247461903}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 2.6457513110645907}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 2}, "sqlglot.optimizer.scope.Scope": {"tf": 2.23606797749979}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 2}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2.6457513110645907}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 2}, "sqlglot.planner.Scan.from_expression": {"tf": 2}, "sqlglot.planner.SetOperation.from_expression": {"tf": 2}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 147, "t": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.executor": {"tf": 3.4641016151377544}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 20, "s": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}}, "df": 2, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Expression.assert_is": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}}, "df": 4}}}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 1}, "d": {"docs": {"sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}}, "df": 15, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1.7320508075688772}}, "df": 1}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}}, "df": 3, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 5}}}}}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 3}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "j": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.Column.cast": {"tf": 1.4142135623730951}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2}}, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transpile": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 3, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 2.449489742783178}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 3}}}}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}}, "df": 3}}}}, "m": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.helper.tsort": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot": {"tf": 2}}, "df": 1, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}}, "df": 2, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.7320508075688772}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 7}, "s": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}}, "df": 10}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transforms.preprocess": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}}, "df": 17, "s": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 3}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}}, "df": 3}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 4}, "/": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}, "e": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 4}}}}}}}, "e": {"docs": {"sqlglot.dataframe": {"tf": 3.872983346207417}}, "df": 1}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}}, "df": 4, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 3}}}}}}, "b": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.func": {"tf": 1.4142135623730951}}, "df": 1, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.Schema": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 5}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 3}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.expressions.Expression": {"tf": 1}}, "df": 2}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "b": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 2}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}}}, "x": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.expressions.update": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1}}, "df": 2, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}}, "df": 3}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1}}}}}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 20, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 3}}, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 44}}}, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {"sqlglot.diff": {"tf": 3.4641016151377544}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 14, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 4}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.parser.Parser.expression": {"tf": 1}}, "df": 1}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}}, "df": 3}}}}}}}, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "n": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2, "o": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}}, "df": 15, "t": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 3.605551275463989}, "sqlglot.executor": {"tf": 3}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 2.23606797749979}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 2}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.convert": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.449489742783178}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 2.449489742783178}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 2}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.schema.Schema.empty": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 83, "e": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 7, "d": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Select.group_by": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.diff": {"tf": 7.745966692414834}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1.4142135623730951}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 28, "s": {"docs": {"sqlglot.diff": {"tf": 8.12403840463596}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1.4142135623730951}}, "df": 12}, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {"sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}}, "df": 10, "e": {"docs": {"sqlglot.dataframe": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 3}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 3}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 3}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 3}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 3}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 3}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 3}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 3}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 3}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 3}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 3}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 3}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 3}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 3}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 3}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 3}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 3}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 3}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 3}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 3}, "sqlglot.executor.env.null_if_any": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 3}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 3}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 2.23606797749979}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 34}}, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.7320508075688772}}, "df": 2, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema": {"tf": 1.4142135623730951}}, "df": 26, "d": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 3}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 1}}}}}}}}}}}, "w": {"docs": {"sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.dataframe": {"tf": 3}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.helper.split_num_words": {"tf": 2}}, "df": 3, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.number": {"tf": 1.4142135623730951}}, "df": 52}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}}, "df": 2}}}}, "p": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}}, "df": 47, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 2}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 2}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 2}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 2}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 2}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2}, "sqlglot.executor.python.Python.Generator": {"tf": 2}, "sqlglot.generator.Generator": {"tf": 2}, "sqlglot.parser.Parser": {"tf": 2}}, "df": 42}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Star.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Alias.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Func": {"tf": 2.23606797749979}, "sqlglot.expressions.Cast.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.to_identifier": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.column": {"tf": 2}, "sqlglot.expressions.table_": {"tf": 1.7320508075688772}, "sqlglot.expressions.var": {"tf": 1.7320508075688772}, "sqlglot.expressions.rename_table": {"tf": 2}, "sqlglot.expressions.table_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.7320508075688772}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1.7320508075688772}, "sqlglot.helper.name_sequence": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 2}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1.7320508075688772}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1.4142135623730951}}, "df": 49, "s": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 2}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1.4142135623730951}}, "df": 37}, "d": {"docs": {"sqlglot.expressions.replace_placeholders": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}, "\u00ef": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.replace": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1.7320508075688772}, "sqlglot.trie.in_trie": {"tf": 1.7320508075688772}}, "df": 63, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}}, "df": 7, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}}, "df": 3}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 3}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1.7320508075688772}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 5}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1}}}}, "w": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.lineage.LineageHTML": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.7320508075688772}}, "df": 2}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.find_ancestor": {"tf": 1}}, "df": 1}}}}}}, "^": {"2": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1}, "docs": {}, "df": 0}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "d": {"docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.executor": {"tf": 2}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}}, "df": 8, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}}, "df": 6}}}}, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 6}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 2.6457513110645907}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}}, "df": 6}}}, "v": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}}, "df": 7, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 44}}}}}}}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}}, "df": 3, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.ChangeDistiller": {"tf": 1.4142135623730951}}, "df": 2}, "s": {"docs": {"sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 22}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.trie.new_trie": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "f": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}}, "df": 5, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.pretty": {"tf": 1}, "sqlglot.schema": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 5.291502622129181}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 5.291502622129181}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 5.291502622129181}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 5.291502622129181}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 5.291502622129181}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 5.291502622129181}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 5.291502622129181}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 5.291502622129181}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 5.291502622129181}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 5.291502622129181}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 5.291502622129181}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 5.291502622129181}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 5.291502622129181}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 5.291502622129181}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 5.291502622129181}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 5.291502622129181}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 5.291502622129181}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 5.291502622129181}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 5.291502622129181}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 5.291502622129181}, "sqlglot.executor.python.Python.Generator": {"tf": 5.291502622129181}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 5.291502622129181}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 2.6457513110645907}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}}, "df": 52, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 25}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 9}, "s": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}}, "df": 2}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}}, "df": 3, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.env.null_if_any": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.env.null_if_any": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 3.4641016151377544}, "sqlglot.executor.python.Python.Generator": {"tf": 3.4641016151377544}, "sqlglot.generator.Generator": {"tf": 3.4641016151377544}}, "df": 22, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.helper.csv_reader": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {"sqlglot.expressions.Delete.delete": {"tf": 2.449489742783178}, "sqlglot.expressions.Delete.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete.returning": {"tf": 1.7320508075688772}, "sqlglot.expressions.delete": {"tf": 2.23606797749979}}, "df": 4}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1, "d": {"docs": {"sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 2}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}}, "df": 5}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}}, "df": 1}}}}}}}}}}, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 3}}}, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Expression.copy": {"tf": 1}}, "df": 1}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.simplify.simplify_not": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.diff": {"tf": 3.4641016151377544}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}}, "df": 3, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.4142135623730951}}, "df": 5}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 2.6457513110645907}, "sqlglot.dataframe": {"tf": 2.449489742783178}, "sqlglot.dialects": {"tf": 3.1622776601683795}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1.7320508075688772}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.7320508075688772}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.4142135623730951}}, "df": 66, "s": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects": {"tf": 2}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 9}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 2.23606797749979}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}}, "df": 11, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.7320508075688772}}, "df": 2}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 20}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "j": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 2.449489742783178}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 28, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 2}}}, "y": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1.7320508075688772}, "sqlglot.schema.nested_set": {"tf": 2}, "sqlglot.time.format_time": {"tf": 1}}, "df": 35}}}}}}, "|": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 1}}}}}}}}, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}}, "df": 3}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.tsort": {"tf": 1}}, "df": 1}}}}}}, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}}, "df": 22}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 1}}, "df": 2}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1}}, "df": 6}}}}, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 4}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 4, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 2}}}}}}}}}}, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}}, "df": 6, "n": {"docs": {"sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 6, "\u2019": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}}, "df": 4}}}}, "n": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 28, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.4142135623730951}}, "df": 5}, "\u2019": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "m": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 2}}, "t": {"docs": {"sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}}, "df": 2}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}}, "df": 2, "/": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {"sqlglot.dataframe": {"tf": 2.449489742783178}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 3.605551275463989}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}}, "df": 8, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 3}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}}, "df": 8}}}}, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 4.58257569495584}, "sqlglot.executor": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.schema.Schema": {"tf": 1}}, "df": 9}}}}}}, "g": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 2.23606797749979}, "sqlglot.planner.Scan.from_expression": {"tf": 2.23606797749979}, "sqlglot.planner.SetOperation.from_expression": {"tf": 2.23606797749979}}, "df": 5}, "y": {"docs": {"sqlglot.expressions.to_interval": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {"sqlglot.dataframe": {"tf": 2.8284271247461903}}, "df": 1, "s": {"docs": {"sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 5}}, "y": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "b": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 2.449489742783178}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 2}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 2}}, "df": 12, "/": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 4}}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "d": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 3.872983346207417}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.output_name": {"tf": 2}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column.output_name": {"tf": 2}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete.returning": {"tf": 1.7320508075688772}, "sqlglot.expressions.Identifier.output_name": {"tf": 2}, "sqlglot.expressions.Insert.with_": {"tf": 2}, "sqlglot.expressions.Literal.output_name": {"tf": 2}, "sqlglot.expressions.Join.on": {"tf": 2}, "sqlglot.expressions.Join.using": {"tf": 2}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2}, "sqlglot.expressions.Union.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.select": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.from_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.group_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.order_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.offset": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.lateral": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.having": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.ctas": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subquery.output_name": {"tf": 2}, "sqlglot.expressions.Star.output_name": {"tf": 2}, "sqlglot.expressions.Alias.output_name": {"tf": 2}, "sqlglot.expressions.Cast.output_name": {"tf": 2}, "sqlglot.expressions.maybe_parse": {"tf": 2.23606797749979}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.from_": {"tf": 1.7320508075688772}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.7320508075688772}, "sqlglot.expressions.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.not_": {"tf": 1.7320508075688772}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 2.449489742783178}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 90, "r": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.parse": {"tf": 1.4142135623730951}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.executor": {"tf": 2.6457513110645907}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 26, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "/": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 21}}}}}, "d": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}}, "df": 29}, "s": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 6}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 2}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 8, "/": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "t": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}}, "df": 5, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}}, "df": 4}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.paren": {"tf": 1}}, "df": 1, "t": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 9, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.unnest": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.paren": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {"sqlglot.expressions.Expression.flatten": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}}, "h": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_column": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1.4142135623730951}}, "df": 5, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "/": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 3, "w": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "d": {"docs": {"sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}}, "df": 43}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22}}}}}, "i": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.diff": {"tf": 3.4641016151377544}, "sqlglot.diff.diff": {"tf": 1}}, "df": 2}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2}}}}, "r": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1, "e": {"docs": {"sqlglot.diff.diff": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}}, "df": 10}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 23, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}}, "df": 25}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.4142135623730951}}, "df": 2}}}, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}}, "df": 6}}}}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.helper.name_sequence": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 3}}}}, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions": {"tf": 1}}, "df": 3}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 2}, "sqlglot.planner.Scan.from_expression": {"tf": 2}, "sqlglot.planner.SetOperation.from_expression": {"tf": 2}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 7}}}}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 3, "d": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 12}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 4}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 5, "d": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 24}, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 44}}, "t": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}, "c": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.dataframe": {"tf": 2}}, "df": 2}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2.449489742783178}}, "df": 1}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}}, "df": 1, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 5, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.time.format_time": {"tf": 1}}, "df": 3}}}}, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 2}}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 3, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}}, "df": 3}}}}}}}, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 4.898979485566356}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}}, "df": 29, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "k": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dataframe": {"tf": 2.8284271247461903}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}}, "df": 3, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "p": {"3": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2}}}}}}, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "f": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}}, "df": 29, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.replace_placeholders": {"tf": 2}}, "df": 1}}}}}}}}}, "n": {"docs": {"sqlglot.executor": {"tf": 2.8284271247461903}}, "df": 1, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}}, "df": 7}, "y": {"docs": {"sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}}, "df": 3}}}}}, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 45, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Delete.returning": {"tf": 1}}, "df": 1, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.replace_placeholders": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 2}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.pop": {"tf": 1}}, "df": 1}}}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}, "h": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}}, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 3.3166247903554}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.executor": {"tf": 2.6457513110645907}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1.4142135623730951}}, "df": 43, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2.8284271247461903}, "sqlglot.transpile": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3, "r": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "d": {"docs": {"sqlglot.transpile": {"tf": 1}}, "df": 1}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}}, "df": 3}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.7320508075688772}}, "df": 7, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}, "d": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1.7320508075688772}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}}, "df": 12}}, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}}, "df": 6}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 3}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.7320508075688772}}, "df": 5, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.walk": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1.4142135623730951}}, "df": 2, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22}}}}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 3.872983346207417}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.replace": {"tf": 1.7320508075688772}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}}, "df": 75, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.parse": {"tf": 1}, "sqlglot.diff": {"tf": 3.3166247903554}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}}, "df": 6}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Func": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2.449489742783178}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.7320508075688772}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1.7320508075688772}, "sqlglot.trie.in_trie": {"tf": 1.7320508075688772}}, "df": 69}}, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 7}, "i": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 2.8284271247461903}, "sqlglot.trie.in_trie": {"tf": 3.4641016151377544}}, "df": 25, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {"sqlglot": {"tf": 4.358898943540674}, "sqlglot.pretty": {"tf": 1}, "sqlglot.parse": {"tf": 1.4142135623730951}, "sqlglot.parse_one": {"tf": 1.7320508075688772}, "sqlglot.transpile": {"tf": 2.23606797749979}, "sqlglot.dataframe": {"tf": 3.7416573867739413}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 2.23606797749979}, "sqlglot.dialects": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 5.477225575051661}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 5.477225575051661}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 5.477225575051661}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 5.477225575051661}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 5.477225575051661}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 5.477225575051661}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 5.477225575051661}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 5.477225575051661}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 5.477225575051661}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 5.477225575051661}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 5.477225575051661}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 5.477225575051661}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 5.477225575051661}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 5.477225575051661}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 5.477225575051661}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 5.477225575051661}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 5.477225575051661}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 5.477225575051661}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 5.477225575051661}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 5.477225575051661}, "sqlglot.diff": {"tf": 8.48528137423857}, "sqlglot.diff.diff": {"tf": 2}, "sqlglot.executor": {"tf": 6.708203932499369}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 2}, "sqlglot.executor.python.Python.Generator": {"tf": 5.477225575051661}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.set": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 2.449489742783178}, "sqlglot.expressions.Condition.or_": {"tf": 2.449489742783178}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete.delete": {"tf": 2}, "sqlglot.expressions.Delete.where": {"tf": 2.449489742783178}, "sqlglot.expressions.Delete.returning": {"tf": 2}, "sqlglot.expressions.Insert.with_": {"tf": 2.8284271247461903}, "sqlglot.expressions.Join.on": {"tf": 2.449489742783178}, "sqlglot.expressions.Join.using": {"tf": 2.449489742783178}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2.8284271247461903}, "sqlglot.expressions.Union.limit": {"tf": 2}, "sqlglot.expressions.Union.select": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.from_": {"tf": 2}, "sqlglot.expressions.Select.group_by": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.order_by": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.sort_by": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.cluster_by": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.limit": {"tf": 2}, "sqlglot.expressions.Select.offset": {"tf": 2}, "sqlglot.expressions.Select.select": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.lateral": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.join": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.where": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.having": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 2.23606797749979}, "sqlglot.expressions.Func": {"tf": 2.449489742783178}, "sqlglot.expressions.maybe_parse": {"tf": 2.6457513110645907}, "sqlglot.expressions.union": {"tf": 2.23606797749979}, "sqlglot.expressions.intersect": {"tf": 2.23606797749979}, "sqlglot.expressions.except_": {"tf": 2.23606797749979}, "sqlglot.expressions.select": {"tf": 2}, "sqlglot.expressions.from_": {"tf": 2}, "sqlglot.expressions.update": {"tf": 2.23606797749979}, "sqlglot.expressions.delete": {"tf": 1.7320508075688772}, "sqlglot.expressions.insert": {"tf": 2.6457513110645907}, "sqlglot.expressions.condition": {"tf": 2.449489742783178}, "sqlglot.expressions.and_": {"tf": 2.449489742783178}, "sqlglot.expressions.or_": {"tf": 2.449489742783178}, "sqlglot.expressions.not_": {"tf": 2.23606797749979}, "sqlglot.expressions.paren": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_identifier": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 2.8284271247461903}, "sqlglot.expressions.subquery": {"tf": 2.23606797749979}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 2.23606797749979}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.convert": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.7320508075688772}, "sqlglot.expressions.expand": {"tf": 2}, "sqlglot.expressions.func": {"tf": 2.23606797749979}, "sqlglot.generator.Generator": {"tf": 5.477225575051661}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.7320508075688772}, "sqlglot.helper.apply_index_offset": {"tf": 1.7320508075688772}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1.7320508075688772}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 2}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.7320508075688772}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 2.23606797749979}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 3.3166247903554}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.expression": {"tf": 2}, "sqlglot.parser.Parser.validate_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 2.23606797749979}, "sqlglot.planner.Scan.from_expression": {"tf": 2.23606797749979}, "sqlglot.planner.SetOperation.from_expression": {"tf": 2.23606797749979}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 2}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1.7320508075688772}, "sqlglot.schema.nested_set": {"tf": 1.7320508075688772}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 2.449489742783178}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 2}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 200, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "g": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.__init__": {"tf": 2.6457513110645907}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 7, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 2}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.parse_into": {"tf": 1.7320508075688772}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 26}, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dialects": {"tf": 2.23606797749979}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.executor": {"tf": 2}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 23}, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dialects": {"tf": 1.7320508075688772}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 3}}}}}}}, "o": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 2, "l": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "k": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "p": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 2.23606797749979}}, "df": 2, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}}, "df": 3}}}}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1}}}, "b": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 4.69041575982343}, "sqlglot.schema": {"tf": 1.4142135623730951}, "sqlglot.parse": {"tf": 2}, "sqlglot.parse_one": {"tf": 2.6457513110645907}, "sqlglot.transpile": {"tf": 4}, "sqlglot.dataframe": {"tf": 4.358898943540674}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 3}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects": {"tf": 2.6457513110645907}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 2}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 4.358898943540674}, "sqlglot.diff": {"tf": 15.066519173319364}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.diff": {"tf": 3.7416573867739413}, "sqlglot.diff.ChangeDistiller": {"tf": 1.7320508075688772}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.executor": {"tf": 8.48528137423857}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 4.358898943540674}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 3.1622776601683795}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1}, "sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.set": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 2.6457513110645907}, "sqlglot.expressions.Expression.find_all": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.dfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.bfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.transform": {"tf": 2.6457513110645907}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 2.449489742783178}, "sqlglot.expressions.Condition.or_": {"tf": 2.449489742783178}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 2.449489742783178}, "sqlglot.expressions.Unionable.intersect": {"tf": 2.449489742783178}, "sqlglot.expressions.Unionable.except_": {"tf": 2.449489742783178}, "sqlglot.expressions.Column.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 2.449489742783178}, "sqlglot.expressions.Delete.where": {"tf": 2.8284271247461903}, "sqlglot.expressions.Delete.returning": {"tf": 2.449489742783178}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 3.4641016151377544}, "sqlglot.expressions.Literal.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 2.8284271247461903}, "sqlglot.expressions.Join.using": {"tf": 3}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 3.4641016151377544}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 2.449489742783178}, "sqlglot.expressions.Union.select": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.from_": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.group_by": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.order_by": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.sort_by": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.cluster_by": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.limit": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.offset": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.select": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.lateral": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.join": {"tf": 3.4641016151377544}, "sqlglot.expressions.Select.where": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.having": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.distinct": {"tf": 2}, "sqlglot.expressions.Select.ctas": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.lock": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Star.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Alias.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 2.8284271247461903}, "sqlglot.expressions.Cast.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 3.1622776601683795}, "sqlglot.expressions.union": {"tf": 3}, "sqlglot.expressions.intersect": {"tf": 3}, "sqlglot.expressions.except_": {"tf": 3}, "sqlglot.expressions.select": {"tf": 3}, "sqlglot.expressions.from_": {"tf": 3.3166247903554}, "sqlglot.expressions.update": {"tf": 2.23606797749979}, "sqlglot.expressions.delete": {"tf": 2.23606797749979}, "sqlglot.expressions.insert": {"tf": 3.1622776601683795}, "sqlglot.expressions.condition": {"tf": 3}, "sqlglot.expressions.and_": {"tf": 2.23606797749979}, "sqlglot.expressions.or_": {"tf": 2.23606797749979}, "sqlglot.expressions.not_": {"tf": 2.449489742783178}, "sqlglot.expressions.paren": {"tf": 1.7320508075688772}, "sqlglot.expressions.to_identifier": {"tf": 1.7320508075688772}, "sqlglot.expressions.to_table": {"tf": 2}, "sqlglot.expressions.alias_": {"tf": 3}, "sqlglot.expressions.subquery": {"tf": 2.449489742783178}, "sqlglot.expressions.column": {"tf": 1.4142135623730951}, "sqlglot.expressions.cast": {"tf": 1.7320508075688772}, "sqlglot.expressions.table_": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 2}, "sqlglot.expressions.rename_table": {"tf": 2}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1.7320508075688772}, "sqlglot.expressions.func": {"tf": 3}, "sqlglot.generator.Generator": {"tf": 4.358898943540674}, "sqlglot.generator.Generator.generate": {"tf": 2}, "sqlglot.helper.AutoName": {"tf": 1.4142135623730951}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1.7320508075688772}, "sqlglot.helper.ensure_collection": {"tf": 1.7320508075688772}, "sqlglot.helper.csv": {"tf": 1.7320508075688772}, "sqlglot.helper.subclasses": {"tf": 2.23606797749979}, "sqlglot.helper.apply_index_offset": {"tf": 2.8284271247461903}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1.7320508075688772}, "sqlglot.helper.tsort": {"tf": 1.4142135623730951}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 2.6457513110645907}, "sqlglot.helper.is_iterable": {"tf": 1.7320508075688772}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 2.449489742783178}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 2.449489742783178}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 2}, "sqlglot.optimizer.optimizer.optimize": {"tf": 3}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 2}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 2.23606797749979}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 2.449489742783178}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser": {"tf": 2.6457513110645907}, "sqlglot.parser.Parser.parse": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.parse_into": {"tf": 2.8284271247461903}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 2.23606797749979}, "sqlglot.parser.Parser.validate_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 2.449489742783178}, "sqlglot.planner.Scan.from_expression": {"tf": 2.449489742783178}, "sqlglot.planner.SetOperation.from_expression": {"tf": 2.449489742783178}, "sqlglot.schema.Schema.add_table": {"tf": 2.23606797749979}, "sqlglot.schema.Schema.column_names": {"tf": 2}, "sqlglot.schema.Schema.get_column_type": {"tf": 2.449489742783178}, "sqlglot.schema.Schema.empty": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 2.449489742783178}, "sqlglot.schema.MappingSchema.add_table": {"tf": 2.23606797749979}, "sqlglot.schema.MappingSchema.column_names": {"tf": 2}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 2.449489742783178}, "sqlglot.schema.nested_get": {"tf": 2.23606797749979}, "sqlglot.schema.nested_set": {"tf": 2.449489742783178}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 3.3166247903554}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 2.6457513110645907}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.7320508075688772}, "sqlglot.transforms.preprocess": {"tf": 2}, "sqlglot.trie.new_trie": {"tf": 2.23606797749979}, "sqlglot.trie.in_trie": {"tf": 2.23606797749979}}, "df": 252, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.executor": {"tf": 2}, "sqlglot.helper.should_identify": {"tf": 1}}, "df": 5, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 16}, "m": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}}, "df": 6}, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 30}, "i": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 9}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 4}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 3.1622776601683795}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.diff": {"tf": 5.744562646538029}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 3.605551275463989}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.7320508075688772}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 70}, "n": {"docs": {"sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 3, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}}, "df": 4}}, "k": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "s": {"docs": {"sqlglot": {"tf": 4.58257569495584}, "sqlglot.dataframe": {"tf": 2.449489742783178}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 6.244997998398398}, "sqlglot.diff.diff": {"tf": 3.4641016151377544}, "sqlglot.executor": {"tf": 2.6457513110645907}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.order_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.offset": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1.7320508075688772}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 2}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 3}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 180}, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 2}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}}, "df": 5}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 1}}, "df": 3, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 7.280109889280518}, "sqlglot.diff.diff": {"tf": 2.8284271247461903}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 35, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 3.3166247903554}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 2}, "sqlglot.executor.execute": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1.7320508075688772}, "sqlglot.expressions.Insert.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Table.parts": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 2.6457513110645907}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 2.8284271247461903}, "sqlglot.expressions.to_column": {"tf": 1.7320508075688772}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.column": {"tf": 1.4142135623730951}, "sqlglot.expressions.table_": {"tf": 2.449489742783178}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 2}, "sqlglot.expressions.column_table_names": {"tf": 1.7320508075688772}, "sqlglot.expressions.table_name": {"tf": 2.449489742783178}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.split_num_words": {"tf": 2.449489742783178}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 2.6457513110645907}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 2.449489742783178}, "sqlglot.schema.Schema.column_names": {"tf": 2}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.7320508075688772}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 2.449489742783178}, "sqlglot.schema.MappingSchema.add_table": {"tf": 2.449489742783178}, "sqlglot.schema.MappingSchema.column_names": {"tf": 2}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.7320508075688772}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}}, "df": 90, "s": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 2}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 22, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}}, "df": 42}}}}}}, "|": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}}, "df": 2}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 4, "s": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1}}, "df": 3}, "n": {"docs": {"sqlglot.helper.find_new_name": {"tf": 1.4142135623730951}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "g": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Tag": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.helper.is_iterable": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 2.23606797749979}, "sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 33, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Expression.text": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "m": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.449489742783178}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 2.449489742783178}, "sqlglot.generator.Generator": {"tf": 2.449489742783178}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.time.format_time": {"tf": 2.23606797749979}}, "df": 29, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {"sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}}, "df": 2}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "b": {"docs": {}, "df": 0, "l": {"1": {"docs": {"sqlglot.expressions.Select.join": {"tf": 2}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}}, "df": 2}, "2": {"docs": {"sqlglot.expressions.Subqueryable.with_": {"tf": 2}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2.8284271247461903}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}}, "df": 4}, "docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression.replace": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 2}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1.7320508075688772}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 2}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}}, "df": 31}}, "w": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.diff": {"tf": 5}, "sqlglot.executor": {"tf": 1}}, "df": 5}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 2.23606797749979}, "sqlglot.dialects": {"tf": 3.1622776601683795}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Tables": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2.23606797749979}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema": {"tf": 2}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 30, "s": {"docs": {"sqlglot.dataframe": {"tf": 5.656854249492381}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.4142135623730951}}, "df": 22}, "o": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}}}}}}, "p": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1, "h": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}}, "df": 3}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}}, "df": 4, "[": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.schema.nested_get": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.trie.new_trie": {"tf": 1.4142135623730951}}, "df": 3, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}}, "df": 10, "r": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.schema": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}}, "df": 8, "/": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}}, "df": 9}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 3}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 2}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}}, "df": 14, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1}}, "df": 3}}, "[": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 90}}}}, "s": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}}, "df": 46}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}}, "df": 7, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.unnest_operands": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}}, "df": 3}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}}, "r": {"docs": {"sqlglot": {"tf": 4}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 3.7416573867739413}, "sqlglot.executor": {"tf": 2.6457513110645907}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 2.449489742783178}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 2.23606797749979}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1.7320508075688772}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 2.6457513110645907}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1.7320508075688772}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.7320508075688772}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 2}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 2.8284271247461903}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.449489742783178}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.flatten": {"tf": 2}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 2.8284271247461903}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.empty": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 101, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1.4142135623730951}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 22, "s": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}}, "df": 3}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}}, "df": 42}}}}}}, "g": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "/": {"3": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "/": {"4": {"3": {"3": {"9": {"2": {"3": {"0": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 5}}}}}}}, "f": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.parse": {"tf": 1}, "sqlglot.transpile": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 3.1622776601683795}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.23606797749979}, "sqlglot.diff": {"tf": 10.488088481701515}, "sqlglot.diff.diff": {"tf": 2}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.executor": {"tf": 4.69041575982343}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1.4142135623730951}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 2.23606797749979}, "sqlglot.expressions": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 2}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_children": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 2.23606797749979}, "sqlglot.generator.Generator": {"tf": 2.23606797749979}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 2}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 2}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 2}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 2.23606797749979}, "sqlglot.parser.Parser.parse": {"tf": 2}, "sqlglot.parser.Parser.parse_into": {"tf": 2}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1.7320508075688772}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 2}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 165, "f": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 2.23606797749979}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}}, "df": 23}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 5, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Identifier.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Literal.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Subquery.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Star.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Alias.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Cast.output_name": {"tf": 2.23606797749979}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}}, "df": 35, "s": {"docs": {"sqlglot.optimizer.scope.Scope.selects": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}}, "df": 7}}}, "r": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 1}}, "df": 3}}, "n": {"docs": {"sqlglot": {"tf": 2.8284271247461903}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 4.795831523312719}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.executor": {"tf": 3}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 2}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}}, "df": 55, "e": {"docs": {"sqlglot": {"tf": 4.242640687119285}, "sqlglot.parse": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 3.3166247903554}, "sqlglot.diff.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 2}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 2}, "sqlglot.expressions.Identifier.output_name": {"tf": 2}, "sqlglot.expressions.Literal.output_name": {"tf": 2}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subquery.output_name": {"tf": 2}, "sqlglot.expressions.Star.output_name": {"tf": 2}, "sqlglot.expressions.Alias.output_name": {"tf": 2}, "sqlglot.expressions.Cast.output_name": {"tf": 2}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 2.449489742783178}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 64, "s": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 3}}, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 2}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 72}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 3}}, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "s": {"docs": {"sqlglot.expressions.Select.distinct": {"tf": 1}}, "df": 1}}, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.dfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.bfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1.7320508075688772}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 13, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}}, "df": 6}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 50, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 22}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 5, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.insert": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "s": {"docs": {"sqlglot.dataframe": {"tf": 2}}, "df": 1}, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.rename_table": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1.7320508075688772}}, "df": 2}}}, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.schema": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}}, "df": 11, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 4}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 2}}, "df": 3}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.6457513110645907}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 2.6457513110645907}, "sqlglot.generator.Generator": {"tf": 2.6457513110645907}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 26, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.449489742783178}, "sqlglot.executor.python.Python.Generator": {"tf": 2.449489742783178}, "sqlglot.generator.Generator": {"tf": 2.449489742783178}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 23}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1}}, "s": {"docs": {"sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}}, "df": 1}}, "v": {"docs": {"sqlglot.executor.context.Context.__init__": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22}}}}}}}, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}}}}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 2, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.Dialects": {"tf": 1}, "sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType": {"tf": 1}, "sqlglot.tokens.TokenType": {"tf": 1}}, "df": 6}}}}}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}}, "df": 2}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}}, "df": 4}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 4}}}, "y": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 2.8284271247461903}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 12}}, "r": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}, "x": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}}, "df": 1, "p": {"docs": {"sqlglot": {"tf": 4.123105625617661}, "sqlglot.dialects": {"tf": 3.3166247903554}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}}, "df": 19, "r": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}}, "df": 1, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 3.4641016151377544}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.expressions": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 3.7416573867739413}, "sqlglot.expressions.Expression.expression": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 2}, "sqlglot.expressions.Expression.pop": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 2.23606797749979}, "sqlglot.expressions.Unionable.intersect": {"tf": 2.23606797749979}, "sqlglot.expressions.Unionable.except_": {"tf": 2.23606797749979}, "sqlglot.expressions.Column.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 2.23606797749979}, "sqlglot.expressions.Delete.where": {"tf": 2.23606797749979}, "sqlglot.expressions.Delete.returning": {"tf": 2.23606797749979}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 2.6457513110645907}, "sqlglot.expressions.Literal.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 2.23606797749979}, "sqlglot.expressions.Join.using": {"tf": 2}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2.6457513110645907}, "sqlglot.expressions.Union.limit": {"tf": 2.23606797749979}, "sqlglot.expressions.Union.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.group_by": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.order_by": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.sort_by": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.cluster_by": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.limit": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.offset": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.lateral": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.where": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.having": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.distinct": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.ctas": {"tf": 2}, "sqlglot.expressions.Select.lock": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Star.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Alias.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.Cast.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 3}, "sqlglot.expressions.union": {"tf": 2}, "sqlglot.expressions.intersect": {"tf": 2}, "sqlglot.expressions.except_": {"tf": 2}, "sqlglot.expressions.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.from_": {"tf": 2.449489742783178}, "sqlglot.expressions.insert": {"tf": 1.7320508075688772}, "sqlglot.expressions.condition": {"tf": 2.6457513110645907}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 2}, "sqlglot.expressions.paren": {"tf": 2.23606797749979}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1.7320508075688772}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 2.449489742783178}, "sqlglot.expressions.subquery": {"tf": 2.23606797749979}, "sqlglot.expressions.cast": {"tf": 1.7320508075688772}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.convert": {"tf": 1.7320508075688772}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1.7320508075688772}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 2}, "sqlglot.expressions.replace_placeholders": {"tf": 2}, "sqlglot.expressions.expand": {"tf": 2.23606797749979}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.expressions.false": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}, "sqlglot.helper.apply_index_offset": {"tf": 1.7320508075688772}, "sqlglot.helper.while_changing": {"tf": 2}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2.8284271247461903}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 2.8284271247461903}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 2.8284271247461903}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 3}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 3}, "sqlglot.optimizer.normalize.normalize": {"tf": 2.449489742783178}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 2.449489742783178}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 2.23606797749979}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 2.6457513110645907}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 2.6457513110645907}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 2.449489742783178}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 2.6457513110645907}, "sqlglot.optimizer.scope.Scope": {"tf": 2}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 2}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 3}, "sqlglot.optimizer.scope.build_scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 2}, "sqlglot.optimizer.simplify.simplify": {"tf": 2.6457513110645907}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2.6457513110645907}, "sqlglot.parser.Parser.parse_into": {"tf": 2.23606797749979}, "sqlglot.parser.Parser.expression": {"tf": 2.23606797749979}, "sqlglot.parser.Parser.validate_expression": {"tf": 2}, "sqlglot.planner.Step.from_expression": {"tf": 2.6457513110645907}, "sqlglot.planner.Scan.from_expression": {"tf": 2.6457513110645907}, "sqlglot.planner.SetOperation.from_expression": {"tf": 2.6457513110645907}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.7320508075688772}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.7320508075688772}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}}, "df": 153, "s": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 2.23606797749979}, "sqlglot.expressions.Condition.or_": {"tf": 2.23606797749979}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 2.449489742783178}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Insert.with_": {"tf": 2}, "sqlglot.expressions.Literal.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.on": {"tf": 2.449489742783178}, "sqlglot.expressions.Join.using": {"tf": 2.23606797749979}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.order_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.lateral": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.join": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.where": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.having": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Star.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 2.23606797749979}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 2}, "sqlglot.expressions.or_": {"tf": 2}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.4142135623730951}}, "df": 95}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Expression.iter_expressions": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}}, "df": 3, "/": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transforms.explode_to_unnest": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 42}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}}, "df": 4, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.expand": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 94, "s": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 17}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 2.8284271247461903}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 7}}, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "e": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "d": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 2}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 2}, "sqlglot.expressions.except_": {"tf": 2}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 5, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}}, "df": 7}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}}, "df": 2}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}}, "df": 4}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}}, "df": 2}}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}}, "df": 1}}, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}}, "df": 1}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}}, "df": 21}}}}}, "t": {"docs": {"sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}}, "df": 7}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 2.449489742783178}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 2.449489742783178}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 30, "s": {"docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 29}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}}, "df": 42}}}}}}}}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 3, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}}, "df": 2, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}}, "df": 2, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "s": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}, "g": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 26}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 4.123105625617661}}, "df": 1, "s": {"docs": {"sqlglot.dataframe": {"tf": 3}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.schema.Schema.empty": {"tf": 1}}, "df": 10}}}, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {"sqlglot.helper.subclasses": {"tf": 1.4142135623730951}}, "df": 1, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 2}, "sqlglot.generator.Generator": {"tf": 2}}, "df": 23}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}}, "df": 2}}}}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 1, "d": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.helper.first": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}}, "df": 2}, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 5}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}}, "df": 3}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}}, "df": 1}}}}}}}}}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 11}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 3.872983346207417}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 3}}, "df": 1}}}, "u": {"docs": {}, "df": 0, ":": {"8": {"0": {"9": {"0": {"docs": {}, "df": 0, "/": {"1": {"1": {"5": {"docs": {}, "df": 0, "/": {"1": {"docs": {}, "df": 0, "/": {"1": {"9": {"9": {"5": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 3, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 1}}}}}}}}}}}, "t": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2, "c": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 3}}, "u": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}}}, "c": {"docs": {"sqlglot": {"tf": 3.605551275463989}, "sqlglot.diff": {"tf": 3.4641016151377544}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.flatten": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Literal.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Star.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Alias.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Cast.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 1.7320508075688772}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.flatten": {"tf": 2}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 22, "a": {"docs": {"sqlglot.trie.in_trie": {"tf": 1}}, "df": 1, "n": {"docs": {"sqlglot": {"tf": 3.872983346207417}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 4.242640687119285}, "sqlglot.executor": {"tf": 4}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 51, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 3, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2.6457513110645907}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 2.449489742783178}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.helper.ensure_list": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.helper.ensure_list": {"tf": 1}}, "df": 1}}, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}}, "df": 13, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 4}, "d": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.helper.csv_reader": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 4}, "r": {"docs": {"sqlglot.diff.diff": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}}, "df": 2}}}}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "d": {"docs": {"sqlglot.diff.diff": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}}, "df": 3, "d": {"docs": {"sqlglot.generator.cached_generator": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 20, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 2}}, "t": {"docs": {"sqlglot.trie.in_trie": {"tf": 2}}, "df": 1, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1.7320508075688772}, "sqlglot.expressions.column": {"tf": 1.4142135623730951}, "sqlglot.expressions.table_": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.4142135623730951}}, "df": 11}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.camel_to_snake_case": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.func": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 16}}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}}, "df": 3}}}}}}}, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}}, "df": 1, "d": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}}, "df": 1}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.4142135623730951}}, "df": 4, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "x": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}}, "df": 2}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 3.7416573867739413}}, "df": 1, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1.4142135623730951}}, "df": 26}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 3.4641016151377544}, "sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}}, "df": 14, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}}, "df": 22, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}}, "df": 1}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}}, "df": 2, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}}, "df": 5}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 3, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 43}}, "/": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "/": {"8": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.transpile": {"tf": 1}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.__init__": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 2}, "sqlglot.planner.Scan.from_expression": {"tf": 2}, "sqlglot.planner.SetOperation.from_expression": {"tf": 2}}, "df": 29}}}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 24, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}}, "df": 6}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}}, "df": 2}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 2}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}}, "df": 10, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 4}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}}, "df": 2}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Join.using": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 23}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 21}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.transform": {"tf": 1}}, "df": 1}}, "s": {"docs": {"sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 2.8284271247461903}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.7320508075688772}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}}, "df": 12, "s": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 9}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 2.449489742783178}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}}, "df": 4}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.convert": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}}, "df": 1}}}}}}}, "j": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}}, "df": 2}}}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 3.605551275463989}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 72}}, "l": {"1": {"docs": {"sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}}, "df": 3}, "2": {"docs": {"sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}}, "df": 3}, "docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 3.1622776601683795}, "sqlglot.executor.execute": {"tf": 1.7320508075688772}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify.qualify": {"tf": 2}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 2}, "sqlglot.schema.MappingSchema": {"tf": 1.7320508075688772}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 10, "a": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2.6457513110645907}}, "df": 3}, "b": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 2}}, "df": 2}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 3.3166247903554}, "sqlglot.dataframe": {"tf": 2.23606797749979}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.diff.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 2.449489742783178}, "sqlglot.expressions.column": {"tf": 2}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 1.7320508075688772}, "sqlglot.schema.Schema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 2}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 2}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 63, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}}, "df": 28}, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.execute": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.parse": {"tf": 1.4142135623730951}, "sqlglot.helper.ensure_collection": {"tf": 1.4142135623730951}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}}, "df": 2}}}}}}}}, "s": {"docs": {"sqlglot.schema.MappingSchema": {"tf": 1.7320508075688772}}, "df": 1}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 2}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1.4142135623730951}}, "df": 6, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 3, "n": {"docs": {}, "df": 0, "\u2019": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 1}, "d": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.paren": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_identifier": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.convert": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1.4142135623730951}}, "df": 42}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 27, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 8, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 3}}}}}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}}, "df": 7, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 14}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 3.3166247903554}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.diff.diff": {"tf": 1}}, "df": 3}, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}}, "df": 3}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 3.7416573867739413}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 3.7416573867739413}, "sqlglot.generator.Generator": {"tf": 3.7416573867739413}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 25, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 44}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2}}}}}, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.assert_is": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.transforms.preprocess": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "n": {"docs": {"sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 2.23606797749979}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 7, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.expressions.replace_children": {"tf": 1}}, "df": 2}}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 2}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}}, "df": 5, "s": {"docs": {"sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}}, "df": 9}}, "x": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Select.ctas": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1.7320508075688772}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 9, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.helper.object_to_dict": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}}, "df": 1}}}}}}}}}, "s": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 5}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 25}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.diff": {"tf": 3.3166247903554}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}}, "df": 4}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}}, "df": 5}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 2.23606797749979}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 2}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.4142135623730951}, "sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 2}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 2}}, "df": 23, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 1}}, "df": 4}}}}, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}, "m": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 2}}, "df": 2, "s": {"docs": {"sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Select.cluster_by": {"tf": 2}}, "df": 1}}}}}}, "s": {"docs": {"sqlglot.dataframe": {"tf": 2}}, "df": 1, "v": {"docs": {"sqlglot.helper.csv": {"tf": 1.4142135623730951}, "sqlglot.helper.csv_reader": {"tf": 2}}, "df": 2}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}}, "df": 2, "/": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1}}, "df": 1}}}}}}}, "b": {"docs": {"sqlglot": {"tf": 4.795831523312719}, "sqlglot.diff": {"tf": 3.7416573867739413}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 2}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.helper.dict_depth": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.flatten": {"tf": 2}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 3.1622776601683795}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 1.7320508075688772}, "sqlglot.trie.new_trie": {"tf": 1.4142135623730951}}, "df": 36, "e": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 3}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 4.242640687119285}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 4}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.helper.while_changing": {"tf": 1.4142135623730951}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.7320508075688772}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.7320508075688772}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2.449489742783178}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 111, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}}, "df": 6}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.diff.diff": {"tf": 1}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 3}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 2}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 24}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 24}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 2}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}}, "df": 27}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.var": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.executor": {"tf": 2.23606797749979}}, "df": 6}}, "y": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 2.449489742783178}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.4142135623730951}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.diff": {"tf": 3.872983346207417}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1}}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2}, "sqlglot.executor.python.Python.Generator": {"tf": 2}, "sqlglot.generator.Generator": {"tf": 2}}, "df": 22}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.build_scope": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 16, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}}, "df": 4}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}, "s": {"docs": {"sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 10}}, "t": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 2}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "t": {"docs": {"sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 6}}, "y": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.pretty": {"tf": 1}, "sqlglot.schema": {"tf": 1}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.diff": {"tf": 4.47213595499958}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 2}, "sqlglot.expressions.Select.order_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.7320508075688772}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1.4142135623730951}}, "df": 59, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 2}, "sqlglot.generator.Generator": {"tf": 2}}, "df": 23, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}}, "df": 3}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "z": {"docs": {"sqlglot": {"tf": 2.8284271247461903}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}}, "df": 3}, "r": {"docs": {"sqlglot": {"tf": 2.8284271247461903}, "sqlglot.executor": {"tf": 2}, "sqlglot.expressions.alias_": {"tf": 2}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 2}}, "df": 5, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "a": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "c": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "e": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 15, "d": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.executor": {"tf": 2}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 6}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.6457513110645907}, "sqlglot.executor.python.Python.Generator": {"tf": 2.6457513110645907}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2.6457513110645907}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 35, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.expressions.false": {"tf": 1}}, "df": 5, "s": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}}, "df": 4}, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.seq_get": {"tf": 1}}, "df": 1}}}}, "b": {"docs": {"sqlglot.trie.in_trie": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}}, "df": 3}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {"sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 9, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.not_": {"tf": 1.4142135623730951}}, "df": 1}}, "b": {"docs": {"sqlglot.trie.new_trie": {"tf": 1}}, "df": 1}}}, "f": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.find": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1.4142135623730951}}, "df": 7}}}, "u": {"docs": {"sqlglot.executor": {"tf": 2}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.7320508075688772}}, "df": 2, "s": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 3.605551275463989}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 3.605551275463989}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 3.605551275463989}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 97, "d": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.schema": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 86}, "s": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1}}, "df": 4}, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}}, "df": 6}}}, "r": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}}, "df": 5, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 2}, "sqlglot.expressions.Select.join": {"tf": 2}, "sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 11}}}, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}}, "df": 26, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}}, "df": 2}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.replace_placeholders": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.23606797749979}, "sqlglot.executor.python.Python.Generator": {"tf": 2.23606797749979}, "sqlglot.generator.Generator": {"tf": 2.23606797749979}}, "df": 23, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22}}}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}}}}, "r": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.trie.in_trie": {"tf": 1}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.TimeUnit": {"tf": 1}}, "df": 3}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}}, "df": 3}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Unionable.union": {"tf": 2}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 2.449489742783178}, "sqlglot.expressions.union": {"tf": 2}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1.4142135623730951}}, "df": 1}, "/": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "q": {"docs": {"sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}}, "df": 3}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}}, "df": 23}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 4}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.apply_index_offset": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.helper.while_changing": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.7320508075688772}}, "df": 3}}}}}, "p": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}}, "df": 5, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}}, "df": 23, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.should_identify": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 2}, "sqlglot.expressions.update": {"tf": 2.23606797749979}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}}, "df": 7, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 2}, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}}, "df": 3}}}}}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}, "d": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}}, "df": 1}}}}}, "f": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.dataframe": {"tf": 4.69041575982343}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 5, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 3.4641016151377544}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 2.8284271247461903}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.diff": {"tf": 6.855654600401044}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}, "sqlglot.executor": {"tf": 4}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 2}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 2.23606797749979}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 2}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 3}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1.4142135623730951}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 89, "m": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.4142135623730951}}, "df": 10, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.pretty": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1.7320508075688772}}, "df": 27, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.helper.csv": {"tf": 1}}, "df": 24}}}, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}}, "df": 2}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "s": {"docs": {"sqlglot.executor.execute": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 3}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 12}}}, "s": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 3}}}}, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "o": {"docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 14}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 9, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}, "/": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot": {"tf": 6.557438524302}, "sqlglot.dataframe": {"tf": 4.58257569495584}, "sqlglot.dialects": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.diff": {"tf": 4.358898943540674}, "sqlglot.executor": {"tf": 4}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 2}, "sqlglot.expressions.Unionable.intersect": {"tf": 2}, "sqlglot.expressions.Unionable.except_": {"tf": 2}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 2}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 2}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.from_": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 2}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.union": {"tf": 2.23606797749979}, "sqlglot.expressions.intersect": {"tf": 2.23606797749979}, "sqlglot.expressions.except_": {"tf": 2.23606797749979}, "sqlglot.expressions.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.from_": {"tf": 2.23606797749979}, "sqlglot.expressions.update": {"tf": 2}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.7320508075688772}, "sqlglot.expressions.replace_placeholders": {"tf": 1.7320508075688772}, "sqlglot.expressions.expand": {"tf": 3.3166247903554}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 2}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 2}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 3}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2.8284271247461903}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 2}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 2}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 2.23606797749979}, "sqlglot.optimizer.scope.Scope": {"tf": 2.449489742783178}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 2.23606797749979}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.7320508075688772}, "sqlglot.planner.Scan.from_expression": {"tf": 1.7320508075688772}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.7320508075688772}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 121, "s": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}}}, "e": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.helper.while_changing": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1.7320508075688772}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}}, "df": 8, "/": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 3}}, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 14}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}}, "df": 1}}, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}, "w": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}}, "df": 4, "c": {"docs": {"sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.func": {"tf": 2}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 5, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 2}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.7320508075688772}, "sqlglot.expressions.func": {"tf": 2.23606797749979}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}}, "df": 35, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 2.23606797749979}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 34}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 4}}}}}}}}}}}, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}}, "df": 3, "y": {"docs": {"sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 2}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"6": {"4": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dialects": {"tf": 1.7320508075688772}}, "df": 2}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dialects": {"tf": 1}}, "df": 2}}, "r": {"docs": {}, "df": 0, "\u00e9": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.helper.flatten": {"tf": 1.4142135623730951}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "s": {"docs": {"sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}}, "df": 5}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.flatten": {"tf": 1}}, "df": 1}}}}}}, "g": {"docs": {"sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 7}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.dataframe": {"tf": 3.4641016151377544}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2}, "sqlglot.diff": {"tf": 1}, "sqlglot.diff.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 2}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.false": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 83}}, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 2, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 2}}, "df": 1}}}}}, "l": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.expressions.Predicate": {"tf": 1}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}}, "df": 9}}, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}}, "df": 24, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 23}, "/": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.lineage.lineage": {"tf": 2}}, "df": 1}}}}, "k": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}}, "df": 8, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.8284271247461903}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 2.8284271247461903}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2.8284271247461903}}, "df": 24}}}}}, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}}, "df": 2, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.transpile": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 2}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.7320508075688772}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 2}, "sqlglot.helper.ensure_collection": {"tf": 1.4142135623730951}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 3}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse": {"tf": 2}, "sqlglot.parser.Parser.parse_into": {"tf": 2}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 57, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 2}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 2.449489742783178}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}}, "df": 11}}}}}}, "b": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.limit": {"tf": 2.23606797749979}}, "df": 4, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Union.limit": {"tf": 1}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}, "o": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "k": {"docs": {"sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}}, "df": 1, "e": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}}, "df": 4}}}}, "s": {"docs": {"sqlglot.parser.Parser.check_errors": {"tf": 1}}, "df": 1}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}}, "df": 23, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.should_identify": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}}, "df": 3}, "o": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}, "p": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 3}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}}, "df": 10}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 47}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}}, "df": 2}}}}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}}, "df": 22}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.7320508075688772}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 5.196152422706632}}, "df": 1}}}, "f": {"docs": {"sqlglot.diff": {"tf": 6.324555320336759}, "sqlglot.expressions.Expression.text": {"tf": 1}}, "df": 2}, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}}, "df": 4}}}, "n": {"docs": {"sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 2, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}}, "df": 25}}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "x": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}}, "df": 9, "/": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "a": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}}, "df": 4}}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 45, "r": {"docs": {"sqlglot.expressions.condition": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 44}}, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 3, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Select.lateral": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 1}}}}}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "z": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}}, "df": 1}}}}, "w": {"docs": {"sqlglot.optimizer.simplify.simplify_not": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1}}, "l": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "r": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2, "e": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 3, "a": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1.4142135623730951}}, "df": 8, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.helper.csv_reader": {"tf": 1.4142135623730951}}, "df": 1}}, "c": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.helper.csv_reader": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {"sqlglot": {"tf": 1.7320508075688772}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "d": {"docs": {"sqlglot.helper.while_changing": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}}, "df": 4, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.env.null_if_any": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 1}}, "df": 4}, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}}, "df": 2}}}}}}, "f": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}}, "df": 4, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 8}, "d": {"docs": {"sqlglot.diff.diff": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 6}, "/": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {"sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}}, "df": 3}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}}, "df": 5, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 6}}}}}}}}, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}}}}, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}}, "df": 12, "s": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.dfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.bfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.expressions.false": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.cached_generator": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.Schema.empty": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 178}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 39}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Delete.returning": {"tf": 1.7320508075688772}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1}, "sqlglot.expressions.Expression.expressions": {"tf": 1}}, "df": 3}}}}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}}, "df": 13}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.expressions.var": {"tf": 1.4142135623730951}}, "df": 2, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 3}}, "s": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 26}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 4}}}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 10, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}}, "df": 2}}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}}, "df": 3}, "s": {"docs": {"sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.cast": {"tf": 1}}, "df": 1}}}}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}}, "df": 13, "d": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}}, "df": 4}, "s": {"docs": {"sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 3}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Predicate": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 45}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}}, "df": 2}, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "y": {"docs": {"sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 10, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}}, "df": 6}}}, "s": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 4}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.expression": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}}, "df": 11}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.flatten": {"tf": 1}}, "df": 1}}}}}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}}, "df": 2}}}}}, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "w": {"docs": {"sqlglot.dataframe": {"tf": 2.449489742783178}, "sqlglot.executor.context.Context": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}}, "df": 2}}, "o": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "t": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}}, "df": 5}}, "u": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 46, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 5}, "d": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 42}}}}, "w": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 2}, "sqlglot.generator.Generator": {"tf": 2}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 25}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1}}, "df": 2}}, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "j": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 6, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}}, "df": 3}}}}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2}}, "df": 2}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}, "\u00e9": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 4}}}}}, "w": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22}}, "o": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 4.58257569495584}, "sqlglot.executor": {"tf": 3.4641016151377544}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1}, "sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}}, "df": 70, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1}}, "df": 5}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 5}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 83}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 3}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}}, "df": 5}}, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 3.605551275463989}, "sqlglot.diff": {"tf": 4}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 3.605551275463989}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 3.605551275463989}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 53}}, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 3.4641016151377544}, "sqlglot.executor": {"tf": 2}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 53}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 3}, "sqlglot.expressions.Delete.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.lock": {"tf": 2}, "sqlglot.expressions.update": {"tf": 2}, "sqlglot.expressions.delete": {"tf": 2}, "sqlglot.expressions.condition": {"tf": 2.449489742783178}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1.4142135623730951}}, "df": 19}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.pretty": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.449489742783178}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.empty": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 59}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 6, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}}}, "o": {"docs": {"sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "y": {"docs": {"sqlglot.executor": {"tf": 2}}, "df": 1}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 5}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 2}}, "df": 4}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {"sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}}, "df": 3, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}}, "df": 12}}}, "s": {"docs": {"sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 23}, "e": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}}, "df": 2, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 4, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}, "s": {"docs": {"sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 2.6457513110645907}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 6, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 5.5677643628300215}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.7320508075688772}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 15, "l": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 3}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}}, "df": 3}}, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "\u2019": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 5, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "\u2019": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 2}, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "t": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "k": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 5, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.split_num_words": {"tf": 2.8284271247461903}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.var": {"tf": 2.449489742783178}, "sqlglot.tokens.Token.var": {"tf": 1.4142135623730951}}, "df": 4, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1.4142135623730951}}, "df": 2, "s": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 2.23606797749979}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.error_messages": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 3, "d": {"docs": {"sqlglot.parser.Parser.expression": {"tf": 1}}, "df": 1}, "s": {"docs": {"sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.append": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.set": {"tf": 1.7320508075688772}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1.7320508075688772}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 2.23606797749979}, "sqlglot.helper.ensure_collection": {"tf": 2.23606797749979}, "sqlglot.helper.split_num_words": {"tf": 2}, "sqlglot.helper.is_iterable": {"tf": 2}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_set": {"tf": 3.1622776601683795}, "sqlglot.trie.in_trie": {"tf": 1.7320508075688772}}, "df": 17, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 2.6457513110645907}, "sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1}}, "df": 10}}}}}, "e": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 2, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 4}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 2.23606797749979}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1.7320508075688772}}, "df": 1}}}, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}}, "df": 2}}}}, "s": {"docs": {"sqlglot.executor.context.Context": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 20}, "i": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.lineage.LineageHTML": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 7}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}}, "df": 3}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}}, "df": 3}}}, "\u00e4": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {"sqlglot.executor.table.Tables": {"tf": 2.23606797749979}, "sqlglot.schema.AbstractMappingSchema": {"tf": 2.23606797749979}}, "df": 2}}, "g": {"docs": {"sqlglot.schema": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}}, "df": 7, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1.7320508075688772}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.7320508075688772}}, "df": 5, "[": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 2.23606797749979}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.walk": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.dfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.bfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.cached_generator": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}, "sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}}, "df": 40}}, "e": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 3, "d": {"docs": {"sqlglot.pretty": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}, "s": {"docs": {"sqlglot.generator.Generator.generate": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Tag": {"tf": 1}}, "df": 3}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 3}}}}, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 16, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 3}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2}}}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "m": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}}, "df": 47}}}, "t": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1, "h": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.lineage.LineageHTML": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 5.656854249492381}, "sqlglot.diff.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.Expression.replace": {"tf": 3}, "sqlglot.expressions.Expression.assert_is": {"tf": 2.449489742783178}, "sqlglot.expressions.Condition.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.not_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Predicate": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 2.449489742783178}, "sqlglot.expressions.Unionable.intersect": {"tf": 2.449489742783178}, "sqlglot.expressions.Unionable.except_": {"tf": 2.449489742783178}, "sqlglot.expressions.Column.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.Delete.delete": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete.returning": {"tf": 1.7320508075688772}, "sqlglot.expressions.Identifier.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.Insert.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.Join.on": {"tf": 2.449489742783178}, "sqlglot.expressions.Join.using": {"tf": 2.449489742783178}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 2.449489742783178}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.select": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.from_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.group_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.order_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.offset": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.lateral": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 3}, "sqlglot.expressions.Select.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.having": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.distinct": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.ctas": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.lock": {"tf": 2.449489742783178}, "sqlglot.expressions.Subquery.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.Star.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.Cast.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.maybe_parse": {"tf": 2.449489742783178}, "sqlglot.expressions.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.from_": {"tf": 1.7320508075688772}, "sqlglot.expressions.update": {"tf": 2.23606797749979}, "sqlglot.expressions.delete": {"tf": 2.23606797749979}, "sqlglot.expressions.insert": {"tf": 1.7320508075688772}, "sqlglot.expressions.condition": {"tf": 3.4641016151377544}, "sqlglot.expressions.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.not_": {"tf": 1.7320508075688772}, "sqlglot.expressions.paren": {"tf": 1.7320508075688772}, "sqlglot.expressions.alias_": {"tf": 2.449489742783178}, "sqlglot.expressions.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.cast": {"tf": 1.7320508075688772}, "sqlglot.expressions.values": {"tf": 1.7320508075688772}, "sqlglot.expressions.var": {"tf": 2.449489742783178}, "sqlglot.expressions.column_table_names": {"tf": 2.449489742783178}, "sqlglot.expressions.table_name": {"tf": 2.449489742783178}, "sqlglot.expressions.replace_tables": {"tf": 2.449489742783178}, "sqlglot.expressions.replace_placeholders": {"tf": 2.449489742783178}, "sqlglot.expressions.expand": {"tf": 3}, "sqlglot.expressions.func": {"tf": 2.449489742783178}, "sqlglot.helper.split_num_words": {"tf": 3}, "sqlglot.helper.is_iterable": {"tf": 2.449489742783178}, "sqlglot.helper.flatten": {"tf": 2.449489742783178}, "sqlglot.helper.dict_depth": {"tf": 3.872983346207417}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 4}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 3.4641016151377544}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 3.4641016151377544}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 3.872983346207417}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 3}, "sqlglot.optimizer.normalize.normalize": {"tf": 3}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 3}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 3}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 2.449489742783178}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 3.4641016151377544}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 3.4641016151377544}, "sqlglot.optimizer.qualify.qualify": {"tf": 3.4641016151377544}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 3.4641016151377544}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 4.242640687119285}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 3.872983346207417}, "sqlglot.optimizer.simplify.simplify": {"tf": 3}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 3}, "sqlglot.schema.nested_set": {"tf": 2.449489742783178}, "sqlglot.time.format_time": {"tf": 1.7320508075688772}, "sqlglot.transforms.unalias_group": {"tf": 2.449489742783178}, "sqlglot.trie.new_trie": {"tf": 1.7320508075688772}, "sqlglot.trie.in_trie": {"tf": 3}}, "df": 101}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 2}}, "df": 13, "b": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1.7320508075688772}, "sqlglot.lineage.lineage": {"tf": 1}}, "df": 3}}, "m": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "z": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1}}}}, "q": {"docs": {"sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 3}}, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 2.8284271247461903}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 7}, "d": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}}, "df": 1}}}, "y": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 3.7416573867739413}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 29, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 14.594519519326424}, "sqlglot.dataframe": {"tf": 10.392304845413264}, "sqlglot.dialects": {"tf": 5.385164807134504}, "sqlglot.diff": {"tf": 5.830951894845301}, "sqlglot.diff.diff": {"tf": 2}, "sqlglot.executor": {"tf": 3.7416573867739413}, "sqlglot.expressions.Expression": {"tf": 2}, "sqlglot.expressions.Expression.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression.assert_is": {"tf": 2}, "sqlglot.expressions.Condition.and_": {"tf": 2}, "sqlglot.expressions.Condition.or_": {"tf": 2}, "sqlglot.expressions.Condition.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 2}, "sqlglot.expressions.Unionable.intersect": {"tf": 2}, "sqlglot.expressions.Unionable.except_": {"tf": 2}, "sqlglot.expressions.Column.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 2.449489742783178}, "sqlglot.expressions.Delete.returning": {"tf": 2.449489742783178}, "sqlglot.expressions.Identifier.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Insert.with_": {"tf": 2.8284271247461903}, "sqlglot.expressions.Literal.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Join.on": {"tf": 2}, "sqlglot.expressions.Join.using": {"tf": 2.449489742783178}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 2.449489742783178}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2.8284271247461903}, "sqlglot.expressions.Union.limit": {"tf": 2}, "sqlglot.expressions.Union.select": {"tf": 2}, "sqlglot.expressions.Select.from_": {"tf": 2}, "sqlglot.expressions.Select.group_by": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.order_by": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.sort_by": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.cluster_by": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.limit": {"tf": 2}, "sqlglot.expressions.Select.offset": {"tf": 2}, "sqlglot.expressions.Select.select": {"tf": 2}, "sqlglot.expressions.Select.lateral": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.join": {"tf": 5.477225575051661}, "sqlglot.expressions.Select.where": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.having": {"tf": 3.1622776601683795}, "sqlglot.expressions.Select.distinct": {"tf": 2}, "sqlglot.expressions.Select.ctas": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.lock": {"tf": 4.47213595499958}, "sqlglot.expressions.Subquery.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Star.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Alias.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Cast.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.maybe_parse": {"tf": 2}, "sqlglot.expressions.union": {"tf": 2}, "sqlglot.expressions.intersect": {"tf": 2}, "sqlglot.expressions.except_": {"tf": 2}, "sqlglot.expressions.select": {"tf": 2.449489742783178}, "sqlglot.expressions.from_": {"tf": 2.449489742783178}, "sqlglot.expressions.update": {"tf": 4}, "sqlglot.expressions.delete": {"tf": 2}, "sqlglot.expressions.insert": {"tf": 2}, "sqlglot.expressions.condition": {"tf": 3.1622776601683795}, "sqlglot.expressions.and_": {"tf": 2.449489742783178}, "sqlglot.expressions.or_": {"tf": 2.449489742783178}, "sqlglot.expressions.not_": {"tf": 2}, "sqlglot.expressions.paren": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 1.4142135623730951}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 2.449489742783178}, "sqlglot.expressions.replace_placeholders": {"tf": 3.1622776601683795}, "sqlglot.expressions.expand": {"tf": 4}, "sqlglot.expressions.func": {"tf": 2.449489742783178}, "sqlglot.helper.split_num_words": {"tf": 3.4641016151377544}, "sqlglot.helper.is_iterable": {"tf": 1.4142135623730951}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.helper.dict_depth": {"tf": 3.1622776601683795}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 3.1622776601683795}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 2}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 2}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 4.242640687119285}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 2.8284271247461903}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 2.449489742783178}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.simplify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_set": {"tf": 4.242640687119285}, "sqlglot.time.format_time": {"tf": 2.449489742783178}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 2.449489742783178}, "sqlglot.trie.in_trie": {"tf": 3.4641016151377544}}, "df": 94, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2}, "sqlglot.executor.python.Python.Generator": {"tf": 2}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.7320508075688772}}, "df": 26, "d": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.diff.diff": {"tf": 2}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1.4142135623730951}}, "df": 7}, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}}, "df": 26}}}, "k": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.7320508075688772}}, "df": 7}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "d": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}}, "df": 5}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.lineage.lineage": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "y": {"docs": {"sqlglot": {"tf": 3.3166247903554}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 3.4641016151377544}, "sqlglot.expressions.Expression.replace": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Predicate": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 3.1622776601683795}, "sqlglot.expressions.Select.having": {"tf": 2}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 2.449489742783178}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 2}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 3}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 2.8284271247461903}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 2}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 2.23606797749979}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 2}, "sqlglot.optimizer.scope.Scope": {"tf": 2.23606797749979}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 2}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2.8284271247461903}, "sqlglot.planner.Step.from_expression": {"tf": 2.8284271247461903}, "sqlglot.planner.Scan.from_expression": {"tf": 2.8284271247461903}, "sqlglot.planner.SetOperation.from_expression": {"tf": 2.8284271247461903}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}}, "df": 40, "o": {"docs": {}, "df": 0, "u": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 2}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}}, "df": 8, "r": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 4}}}, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "y": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.time.format_time": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 9}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "h": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}}, "df": 11}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "y": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.diff": {"tf": 3.7416573867739413}}, "df": 1, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}}, "m": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1.4142135623730951}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}}, "df": 5, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 3}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}, "d": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 20}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "q": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.diff": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.condition": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 4}}}}}, "x": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2}, "sqlglot.executor.python.Python.Generator": {"tf": 2}, "sqlglot.generator.Generator": {"tf": 2}}, "df": 22}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.diff": {"tf": 1}}, "df": 2}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 4.69041575982343}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 10, "n": {"docs": {}, "df": 0, "\u2019": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 2}, "sqlglot.expressions.Select.having": {"tf": 1.7320508075688772}}, "df": 2}}}}, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.executor": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 19, "n": {"docs": {"sqlglot.diff.Keep": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2, "\u2019": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "h": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}}, "df": 2, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}}, "df": 3, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}}, "df": 2}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "f": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, ":": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 1}}}}}}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.lineage.LineageHTML": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 1}}}}, "w": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 1}}}}}}}, ":": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 1}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 4}}}}, "m": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 3, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}}, "df": 9, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}}, "df": 1}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}}, "df": 5}, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.schema.nested_set": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 2}}}}, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}}}, "p": {"docs": {"sqlglot.expressions.Func": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.7320508075688772}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}}, "df": 40, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 23}, "[": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}}, "df": 2}}}, "s": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 5}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 3, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}}, "df": 6}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 2}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}}, "df": 7, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}}, "df": 3}, "d": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.diff": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 7.14142842854285}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.diff": {"tf": 3}, "sqlglot.diff.diff": {"tf": 1}}, "df": 2}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "h": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "x": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 45, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 42}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2, "e": {"docs": {}, "df": 0, "z": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "e": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}}, "df": 2, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}}, "l": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 48}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}}, "df": 20, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 46}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}}, "df": 1, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2}}, "df": 1, "s": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}}, "df": 26}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}}, "df": 2}}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}}, "df": 23}}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}}, "df": 4}}}, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}}, "df": 24, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"sqlglot.expressions.to_interval": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.diff.diff": {"tf": 1}}, "df": 3, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1}}}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "z": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 2}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 8}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 4, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1}}}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "x": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 7}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "e": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 11}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 5}}, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}}, "df": 2}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.trie.new_trie": {"tf": 1}}, "df": 1}}}}}, "y": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}}, "df": 4, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 9}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 2.6457513110645907}}, "df": 1}}}}}, "j": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.executor": {"tf": 2.449489742783178}, "sqlglot.expressions.Join.on": {"tf": 2}, "sqlglot.expressions.Join.using": {"tf": 2}, "sqlglot.expressions.Select.join": {"tf": 3.872983346207417}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 2.23606797749979}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 2}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}}, "df": 18, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}}, "df": 8}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Select.join": {"tf": 1}}, "df": 1}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}}, "df": 1}}}}}}, "h": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "b": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 2.23606797749979}}, "df": 5}}, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "v": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "s": {"docs": {"sqlglot.lineage.LineageHTML": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}}, "df": 2}}}}, "x": {"docs": {"sqlglot": {"tf": 4.898979485566356}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 4.358898943540674}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Predicate": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete.where": {"tf": 2}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 2}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 2}, "sqlglot.expressions.Select.order_by": {"tf": 2}, "sqlglot.expressions.Select.sort_by": {"tf": 2}, "sqlglot.expressions.Select.cluster_by": {"tf": 2}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.having": {"tf": 2}, "sqlglot.expressions.Select.distinct": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 2.8284271247461903}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 2}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 2}, "sqlglot.expressions.cast": {"tf": 1.4142135623730951}, "sqlglot.expressions.var": {"tf": 2}, "sqlglot.expressions.expand": {"tf": 2.449489742783178}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 2.23606797749979}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 2.23606797749979}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2.8284271247461903}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 2.449489742783178}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 2}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 2.6457513110645907}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 2.23606797749979}, "sqlglot.optimizer.scope.Scope": {"tf": 2.6457513110645907}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 2}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 2}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2.449489742783178}, "sqlglot.planner.Step.from_expression": {"tf": 4}, "sqlglot.planner.Scan.from_expression": {"tf": 4}, "sqlglot.planner.SetOperation.from_expression": {"tf": 4}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}}, "df": 62, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "z": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 2.449489742783178}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 2.23606797749979}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 2.449489742783178}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.4142135623730951}}, "df": 15, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 3.605551275463989}, "sqlglot.diff.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}}, "df": 4}}, "y": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 2}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1}, "sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.set": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_get": {"tf": 2}, "sqlglot.schema.nested_set": {"tf": 3.4641016151377544}, "sqlglot.trie.in_trie": {"tf": 2}}, "df": 38, "w": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 5, "s": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 2}}, "df": 3}}}}}, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1.4142135623730951}}, "df": 26}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}}, "d": {"docs": {"sqlglot.trie.new_trie": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.scope.Scope.replace": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 3, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {"sqlglot.diff.diff": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}}, "df": 2}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Kwarg": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.7320508075688772}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1}}, "df": 6}}}}}}}}}, "pipeline": ["trimmer"], "_isPrebuiltIndex": true}; + /** pdoc search index */const docs = {"version": "0.9.5", "fields": ["qualname", "fullname", "annotation", "default_value", "signature", "bases", "doc"], "ref": "fullname", "documentStore": {"docs": {"sqlglot": {"fullname": "sqlglot", "modulename": "sqlglot", "kind": "module", "doc": "

\"SQLGlot

\n\n

SQLGlot is a no-dependency SQL parser, transpiler, optimizer, and engine. It can be used to format SQL or translate between 19 different dialects like DuckDB, Presto, Spark, Snowflake, and BigQuery. It aims to read a wide variety of SQL inputs and output syntactically correct SQL in the targeted dialects.

\n\n

It is a very comprehensive generic SQL parser with a robust test suite. It is also quite performant, while being written purely in Python.

\n\n

You can easily customize the parser, analyze queries, traverse expression trees, and programmatically build SQL.

\n\n

Syntax errors are highlighted and dialect incompatibilities can warn or raise depending on configurations. However, it should be noted that SQL validation is not SQLGlot\u2019s goal, so some syntax errors may go unnoticed.

\n\n

Learn more about the SQLGlot API in the documentation.

\n\n

Contributions are very welcome in SQLGlot; read the contribution guide to get started!

\n\n

Table of Contents

\n\n\n\n

Install

\n\n

From PyPI:

\n\n
pip3 install sqlglot\n
\n\n

Or with a local checkout:

\n\n
make install\n
\n\n

Requirements for development (optional):

\n\n
make install-dev\n
\n\n

Versioning

\n\n

Given a version number MAJOR.MINOR.PATCH, SQLGlot uses the following versioning strategy:

\n\n
    \n
  • The PATCH version is incremented when there are backwards-compatible fixes or feature additions.
  • \n
  • The MINOR version is incremented when there are backwards-incompatible fixes or feature additions.
  • \n
  • The MAJOR version is incremented when there are significant backwards-incompatible fixes or feature additions.
  • \n
\n\n

Get in Touch

\n\n

We'd love to hear from you. Join our community Slack channel!

\n\n

Examples

\n\n

Formatting and Transpiling

\n\n

Easily translate from one dialect to another. For example, date/time functions vary from dialects and can be hard to deal with:

\n\n
\n
import sqlglot\nsqlglot.transpile("SELECT EPOCH_MS(1618088028295)", read="duckdb", write="hive")[0]\n
\n
\n\n
\n
'SELECT FROM_UNIXTIME(1618088028295 / 1000)'\n
\n
\n\n

SQLGlot can even translate custom time formats:

\n\n
\n
import sqlglot\nsqlglot.transpile("SELECT STRFTIME(x, '%y-%-m-%S')", read="duckdb", write="hive")[0]\n
\n
\n\n
\n
"SELECT DATE_FORMAT(x, 'yy-M-ss')"\n
\n
\n\n

As another example, let's suppose that we want to read in a SQL query that contains a CTE and a cast to REAL, and then transpile it to Spark, which uses backticks for identifiers and FLOAT instead of REAL:

\n\n
\n
import sqlglot\n\nsql = """WITH baz AS (SELECT a, c FROM foo WHERE a = 1) SELECT f.a, b.b, baz.c, CAST("b"."a" AS REAL) d FROM foo f JOIN bar b ON f.a = b.a LEFT JOIN baz ON f.a = baz.a"""\nprint(sqlglot.transpile(sql, write="spark", identify=True, pretty=True)[0])\n
\n
\n\n
\n
WITH `baz` AS (\n  SELECT\n    `a`,\n    `c`\n  FROM `foo`\n  WHERE\n    `a` = 1\n)\nSELECT\n  `f`.`a`,\n  `b`.`b`,\n  `baz`.`c`,\n  CAST(`b`.`a` AS FLOAT) AS `d`\nFROM `foo` AS `f`\nJOIN `bar` AS `b`\n  ON `f`.`a` = `b`.`a`\nLEFT JOIN `baz`\n  ON `f`.`a` = `baz`.`a`\n
\n
\n\n

Comments are also preserved in a best-effort basis when transpiling SQL code:

\n\n
\n
sql = """\n/* multi\n   line\n   comment\n*/\nSELECT\n  tbl.cola /* comment 1 */ + tbl.colb /* comment 2 */,\n  CAST(x AS INT), # comment 3\n  y               -- comment 4\nFROM\n  bar /* comment 5 */,\n  tbl #          comment 6\n"""\n\nprint(sqlglot.transpile(sql, read='mysql', pretty=True)[0])\n
\n
\n\n
\n
/* multi\n   line\n   comment\n*/\nSELECT\n  tbl.cola /* comment 1 */ + tbl.colb /* comment 2 */,\n  CAST(x AS INT), /* comment 3 */\n  y /* comment 4 */\nFROM bar /* comment 5 */, tbl /*          comment 6 */\n
\n
\n\n

Metadata

\n\n

You can explore SQL with expression helpers to do things like find columns and tables:

\n\n
\n
from sqlglot import parse_one, exp\n\n# print all column references (a and b)\nfor column in parse_one("SELECT a, b + 1 AS c FROM d").find_all(exp.Column):\n    print(column.alias_or_name)\n\n# find all projections in select statements (a and c)\nfor select in parse_one("SELECT a, b + 1 AS c FROM d").find_all(exp.Select):\n    for projection in select.expressions:\n        print(projection.alias_or_name)\n\n# find all tables (x, y, z)\nfor table in parse_one("SELECT * FROM x JOIN y JOIN z").find_all(exp.Table):\n    print(table.name)\n
\n
\n\n

Parser Errors

\n\n

When the parser detects an error in the syntax, it raises a ParserError:

\n\n
\n
import sqlglot\nsqlglot.transpile("SELECT foo( FROM bar")\n
\n
\n\n
sqlglot.errors.ParseError: Expecting ). Line 1, Col: 13.\n  select foo( FROM bar\n              ~~~~\n
\n\n

Structured syntax errors are accessible for programmatic use:

\n\n
\n
import sqlglot\ntry:\n    sqlglot.transpile("SELECT foo( FROM bar")\nexcept sqlglot.errors.ParseError as e:\n    print(e.errors)\n
\n
\n\n
\n
[{\n  'description': 'Expecting )',\n  'line': 1,\n  'col': 13,\n  'start_context': 'SELECT foo( ',\n  'highlight': 'FROM',\n  'end_context': ' bar'\n}]\n
\n
\n\n

Unsupported Errors

\n\n

Presto APPROX_DISTINCT supports the accuracy argument which is not supported in Hive:

\n\n
\n
import sqlglot\nsqlglot.transpile("SELECT APPROX_DISTINCT(a, 0.1) FROM foo", read="presto", write="hive")\n
\n
\n\n
\n
APPROX_COUNT_DISTINCT does not support accuracy\n'SELECT APPROX_COUNT_DISTINCT(a) FROM foo'\n
\n
\n\n

Build and Modify SQL

\n\n

SQLGlot supports incrementally building sql expressions:

\n\n
\n
from sqlglot import select, condition\n\nwhere = condition("x=1").and_("y=1")\nselect("*").from_("y").where(where).sql()\n
\n
\n\n
\n
'SELECT * FROM y WHERE x = 1 AND y = 1'\n
\n
\n\n

You can also modify a parsed tree:

\n\n
\n
from sqlglot import parse_one\nparse_one("SELECT x FROM y").from_("z").sql()\n
\n
\n\n
\n
'SELECT x FROM z'\n
\n
\n\n

There is also a way to recursively transform the parsed tree by applying a mapping function to each tree node:

\n\n
\n
from sqlglot import exp, parse_one\n\nexpression_tree = parse_one("SELECT a FROM x")\n\ndef transformer(node):\n    if isinstance(node, exp.Column) and node.name == "a":\n        return parse_one("FUN(a)")\n    return node\n\ntransformed_tree = expression_tree.transform(transformer)\ntransformed_tree.sql()\n
\n
\n\n
\n
'SELECT FUN(a) FROM x'\n
\n
\n\n

SQL Optimizer

\n\n

SQLGlot can rewrite queries into an \"optimized\" form. It performs a variety of techniques to create a new canonical AST. This AST can be used to standardize queries or provide the foundations for implementing an actual engine. For example:

\n\n
\n
import sqlglot\nfrom sqlglot.optimizer import optimize\n\nprint(\n    optimize(\n        sqlglot.parse_one("""\n            SELECT A OR (B OR (C AND D))\n            FROM x\n            WHERE Z = date '2021-01-01' + INTERVAL '1' month OR 1 = 0\n        """),\n        schema={"x": {"A": "INT", "B": "INT", "C": "INT", "D": "INT", "Z": "STRING"}}\n    ).sql(pretty=True)\n)\n
\n
\n\n
\n
SELECT\n  (\n    "x"."a" <> 0 OR "x"."b" <> 0 OR "x"."c" <> 0\n  )\n  AND (\n    "x"."a" <> 0 OR "x"."b" <> 0 OR "x"."d" <> 0\n  ) AS "_col_0"\nFROM "x" AS "x"\nWHERE\n  CAST("x"."z" AS DATE) = CAST('2021-02-01' AS DATE)\n
\n
\n\n

AST Introspection

\n\n

You can see the AST version of the sql by calling repr:

\n\n
\n
from sqlglot import parse_one\nprint(repr(parse_one("SELECT a + 1 AS z")))\n
\n
\n\n
\n
(SELECT expressions:\n  (ALIAS this:\n    (ADD this:\n      (COLUMN this:\n        (IDENTIFIER this: a, quoted: False)), expression:\n      (LITERAL this: 1, is_string: False)), alias:\n    (IDENTIFIER this: z, quoted: False)))\n
\n
\n\n

AST Diff

\n\n

SQLGlot can calculate the difference between two expressions and output changes in a form of a sequence of actions needed to transform a source expression into a target one:

\n\n
\n
from sqlglot import diff, parse_one\ndiff(parse_one("SELECT a + b, c, d"), parse_one("SELECT c, a - b, d"))\n
\n
\n\n
\n
[\n  Remove(expression=(ADD this:\n    (COLUMN this:\n      (IDENTIFIER this: a, quoted: False)), expression:\n    (COLUMN this:\n      (IDENTIFIER this: b, quoted: False)))),\n  Insert(expression=(SUB this:\n    (COLUMN this:\n      (IDENTIFIER this: a, quoted: False)), expression:\n    (COLUMN this:\n      (IDENTIFIER this: b, quoted: False)))),\n  Move(expression=(COLUMN this:\n    (IDENTIFIER this: c, quoted: False))),\n  Keep(source=(IDENTIFIER this: b, quoted: False), target=(IDENTIFIER this: b, quoted: False)),\n  ...\n]\n
\n
\n\n

See also: Semantic Diff for SQL.

\n\n

Custom Dialects

\n\n

Dialects can be added by subclassing Dialect:

\n\n
\n
from sqlglot import exp\nfrom sqlglot.dialects.dialect import Dialect\nfrom sqlglot.generator import Generator\nfrom sqlglot.tokens import Tokenizer, TokenType\n\n\nclass Custom(Dialect):\n    class Tokenizer(Tokenizer):\n        QUOTES = ["'", '"']\n        IDENTIFIERS = ["`"]\n\n        KEYWORDS = {\n            **Tokenizer.KEYWORDS,\n            "INT64": TokenType.BIGINT,\n            "FLOAT64": TokenType.DOUBLE,\n        }\n\n    class Generator(Generator):\n        TRANSFORMS = {exp.Array: lambda self, e: f"[{self.expressions(e)}]"}\n\n        TYPE_MAPPING = {\n            exp.DataType.Type.TINYINT: "INT64",\n            exp.DataType.Type.SMALLINT: "INT64",\n            exp.DataType.Type.INT: "INT64",\n            exp.DataType.Type.BIGINT: "INT64",\n            exp.DataType.Type.DECIMAL: "NUMERIC",\n            exp.DataType.Type.FLOAT: "FLOAT64",\n            exp.DataType.Type.DOUBLE: "FLOAT64",\n            exp.DataType.Type.BOOLEAN: "BOOL",\n            exp.DataType.Type.TEXT: "STRING",\n        }\n\nprint(Dialect["custom"])\n
\n
\n\n
<class '__main__.Custom'>\n
\n\n

SQL Execution

\n\n

One can even interpret SQL queries using SQLGlot, where the tables are represented as Python dictionaries. Although the engine is not very fast (it's not supposed to be) and is in a relatively early stage of development, it can be useful for unit testing and running SQL natively across Python objects. Additionally, the foundation can be easily integrated with fast compute kernels (arrow, pandas). Below is an example showcasing the execution of a SELECT expression that involves aggregations and JOINs:

\n\n
\n
from sqlglot.executor import execute\n\ntables = {\n    "sushi": [\n        {"id": 1, "price": 1.0},\n        {"id": 2, "price": 2.0},\n        {"id": 3, "price": 3.0},\n    ],\n    "order_items": [\n        {"sushi_id": 1, "order_id": 1},\n        {"sushi_id": 1, "order_id": 1},\n        {"sushi_id": 2, "order_id": 1},\n        {"sushi_id": 3, "order_id": 2},\n    ],\n    "orders": [\n        {"id": 1, "user_id": 1},\n        {"id": 2, "user_id": 2},\n    ],\n}\n\nexecute(\n    """\n    SELECT\n      o.user_id,\n      SUM(s.price) AS price\n    FROM orders o\n    JOIN order_items i\n      ON o.id = i.order_id\n    JOIN sushi s\n      ON i.sushi_id = s.id\n    GROUP BY o.user_id\n    """,\n    tables=tables\n)\n
\n
\n\n
\n
user_id price\n      1   4.0\n      2   3.0\n
\n
\n\n

See also: Writing a Python SQL engine from scratch.

\n\n

Used By

\n\n\n\n

Documentation

\n\n

SQLGlot uses pdoc to serve its API documentation.

\n\n

A hosted version is on the SQLGlot website, or you can build locally with:

\n\n
make docs-serve\n
\n\n

Run Tests and Lint

\n\n
make check  # Set SKIP_INTEGRATION=1 to skip integration tests\n
\n\n

Benchmarks

\n\n

Benchmarks run on Python 3.10.5 in seconds.

\n\n\n\n\n \n \n \n \n \n \n \n\n\n\n\n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n\n\n
Querysqlglotsqlfluffsqltreesqlparsemoz_sql_parsersqloxide
tpch0.01308 (1.0)1.60626 (122.7)0.01168 (0.893)0.04958 (3.791)0.08543 (6.531)0.00136 (0.104)
short0.00109 (1.0)0.14134 (129.2)0.00099 (0.906)0.00342 (3.131)0.00652 (5.970)8.76E-5 (0.080)
long0.01399 (1.0)2.12632 (151.9)0.01126 (0.805)0.04410 (3.151)0.06671 (4.767)0.00107 (0.076)
crazy0.03969 (1.0)24.3777 (614.1)0.03917 (0.987)11.7043 (294.8)1.03280 (26.02)0.00625 (0.157)
\n\n

Optional Dependencies

\n\n

SQLGlot uses dateutil to simplify literal timedelta expressions. The optimizer will not simplify expressions like the following if the module cannot be found:

\n\n
\n
x + interval '1' month\n
\n
\n\n
\n"}, "sqlglot.pretty": {"fullname": "sqlglot.pretty", "modulename": "sqlglot", "qualname": "pretty", "kind": "variable", "doc": "

Whether to format generated SQL by default.

\n", "default_value": "False"}, "sqlglot.schema": {"fullname": "sqlglot.schema", "modulename": "sqlglot.schema", "kind": "module", "doc": "

\n"}, "sqlglot.parse": {"fullname": "sqlglot.parse", "modulename": "sqlglot", "qualname": "parse", "kind": "function", "doc": "

Parses the given SQL string into a collection of syntax trees, one per parsed SQL statement.

\n\n
Arguments:
\n\n
    \n
  • sql: the SQL code string to parse.
  • \n
  • read: the SQL dialect to apply during parsing (eg. \"spark\", \"hive\", \"presto\", \"mysql\").
  • \n
  • **opts: other sqlglot.parser.Parser options.
  • \n
\n\n
Returns:
\n\n
\n

The resulting syntax tree collection.

\n
\n", "signature": "(\tsql: str,\tread: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> List[Optional[sqlglot.expressions.Expression]]:", "funcdef": "def"}, "sqlglot.parse_one": {"fullname": "sqlglot.parse_one", "modulename": "sqlglot", "qualname": "parse_one", "kind": "function", "doc": "

Parses the given SQL string and returns a syntax tree for the first parsed SQL statement.

\n\n
Arguments:
\n\n
    \n
  • sql: the SQL code string to parse.
  • \n
  • read: the SQL dialect to apply during parsing (eg. \"spark\", \"hive\", \"presto\", \"mysql\").
  • \n
  • into: the SQLGlot Expression to parse into.
  • \n
  • **opts: other sqlglot.parser.Parser options.
  • \n
\n\n
Returns:
\n\n
\n

The syntax tree for the first parsed statement.

\n
\n", "signature": "(\tsql: str,\tread: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tinto: Union[str, Type[sqlglot.expressions.Expression], Collection[Union[str, Type[sqlglot.expressions.Expression]]], NoneType] = None,\t**opts) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transpile": {"fullname": "sqlglot.transpile", "modulename": "sqlglot", "qualname": "transpile", "kind": "function", "doc": "

Parses the given SQL string in accordance with the source dialect and returns a list of SQL strings transformed\nto conform to the target dialect. Each string in the returned list represents a single transformed SQL statement.

\n\n
Arguments:
\n\n
    \n
  • sql: the SQL code string to transpile.
  • \n
  • read: the source dialect used to parse the input string (eg. \"spark\", \"hive\", \"presto\", \"mysql\").
  • \n
  • write: the target dialect into which the input should be transformed (eg. \"spark\", \"hive\", \"presto\", \"mysql\").
  • \n
  • identity: if set to True and if the target dialect is not specified the source dialect will be used as both:\nthe source and the target dialect.
  • \n
  • error_level: the desired error level of the parser.
  • \n
  • **opts: other sqlglot.generator.Generator options.
  • \n
\n\n
Returns:
\n\n
\n

The list of transpiled SQL statements.

\n
\n", "signature": "(\tsql: str,\tread: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\twrite: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tidentity: bool = True,\terror_level: Optional[sqlglot.errors.ErrorLevel] = None,\t**opts) -> List[str]:", "funcdef": "def"}, "sqlglot.dataframe": {"fullname": "sqlglot.dataframe", "modulename": "sqlglot.dataframe", "kind": "module", "doc": "

PySpark DataFrame SQL Generator

\n\n

This is a drop-in replacement for the PySpark DataFrame API that will generate SQL instead of executing DataFrame operations directly. This, when combined with the transpiling support in SQLGlot, allows one to write PySpark DataFrame code and execute it on other engines like DuckDB, Presto, Spark, Snowflake, and BigQuery.

\n\n

Currently many of the common operations are covered and more functionality will be added over time. Please open an issue or PR with your feedback or contribution to help influence what should be prioritized next and make sure your use case is properly supported.

\n\n

How to use

\n\n

Instructions

\n\n
    \n
  • Install SQLGlot and that is all that is required to just generate SQL. The examples show generating SQL and then executing that SQL on a specific engine and that will require that engine's client library.
  • \n
  • Find/replace all from pyspark.sql with from sqlglot.dataframe.
  • \n
  • Prior to any spark.read.table or spark.table run sqlglot.schema.add_table('<table_name>', <column_structure>).\n
      \n
    • The column structure can be defined the following ways:\n
        \n
      • Dictionary where the keys are column names and values are string of the Spark SQL type name.\n
          \n
        • Ex: {'cola': 'string', 'colb': 'int'}
        • \n
      • \n
      • PySpark DataFrame StructType similar to when using createDataFrame.\n
          \n
        • Ex: StructType([StructField('cola', StringType()), StructField('colb', IntegerType())])
        • \n
      • \n
      • A string of names and types similar to what is supported in createDataFrame.\n
          \n
        • Ex: cola: STRING, colb: INT
        • \n
      • \n
      • [Not Recommended] A list of string column names without type.\n
          \n
        • Ex: ['cola', 'colb']
        • \n
        • The lack of types may limit functionality in future releases.
        • \n
      • \n
    • \n
    • See Registering Custom Schema for information on how to skip this step if the information is stored externally.
    • \n
  • \n
  • Add .sql(pretty=True) to your final DataFrame command to return a list of sql statements to run that command.\n
      \n
    • In most cases a single SQL statement is returned. Currently the only exception is when caching DataFrames which isn't supported in other dialects.
    • \n
    • Spark is the default output dialect. See dialects for a full list of dialects.
    • \n
    • Ex: .sql(pretty=True, dialect='bigquery')
    • \n
  • \n
\n\n

Examples

\n\n
\n
import sqlglot\nfrom sqlglot.dataframe.sql.session import SparkSession\nfrom sqlglot.dataframe.sql import functions as F\n\nsqlglot.schema.add_table('employee', {\n  'employee_id': 'INT',\n  'fname': 'STRING',\n  'lname': 'STRING',\n  'age': 'INT',\n})  # Register the table structure prior to reading from the table\n\nspark = SparkSession()\n\ndf = (\n    spark\n    .table('employee')\n    .groupBy(F.col("age"))\n    .agg(F.countDistinct(F.col("employee_id")).alias("num_employees")) \n)\n\nprint(df.sql(pretty=True))  # Spark will be the dialect used by default\n
\n
\n\n
SELECT\n  `employee`.`age` AS `age`,\n  COUNT(DISTINCT `employee`.`employee_id`) AS `num_employees`\nFROM `employee` AS `employee`\nGROUP BY\n  `employee`.`age`\n
\n\n

Registering Custom Schema Class

\n\n

The step of adding sqlglot.schema.add_table can be skipped if you have the column structure stored externally like in a file or from an external metadata table. This can be done by writing a class that implements the sqlglot.schema.Schema abstract class and then assigning that class to sqlglot.schema.

\n\n
\n
import sqlglot\nfrom sqlglot.dataframe.sql.session import SparkSession\nfrom sqlglot.dataframe.sql import functions as F\nfrom sqlglot.schema import Schema\n\n\nclass ExternalSchema(Schema):\n  ...\n\nsqlglot.schema = ExternalSchema()\n\nspark = SparkSession()\n\ndf = (\n    spark\n    .table('employee')\n    .groupBy(F.col("age"))\n    .agg(F.countDistinct(F.col("employee_id")).alias("num_employees")) \n)\n\nprint(df.sql(pretty=True))\n
\n
\n\n

Example Implementations

\n\n

Bigquery

\n\n
\n
from google.cloud import bigquery\nfrom sqlglot.dataframe.sql.session import SparkSession\nfrom sqlglot.dataframe.sql import types\nfrom sqlglot.dataframe.sql import functions as F\n\nclient = bigquery.Client()\n\ndata = [\n    (1, "Jack", "Shephard", 34),\n    (2, "John", "Locke", 48),\n    (3, "Kate", "Austen", 34),\n    (4, "Claire", "Littleton", 22),\n    (5, "Hugo", "Reyes", 26),\n]\nschema = types.StructType([\n    types.StructField('employee_id', types.IntegerType(), False),\n    types.StructField('fname', types.StringType(), False),\n    types.StructField('lname', types.StringType(), False),\n    types.StructField('age', types.IntegerType(), False),\n])\n\nsql_statements = (\n    SparkSession()\n    .createDataFrame(data, schema)\n    .groupBy(F.col("age"))\n    .agg(F.countDistinct(F.col("employee_id")).alias("num_employees"))\n    .sql(dialect="bigquery")\n)\n\nresult = None\nfor sql in sql_statements:\n  result = client.query(sql)\n\nassert result is not None\nfor row in client.query(result):\n    print(f"Age: {row['age']}, Num Employees: {row['num_employees']}")\n
\n
\n\n

Snowflake

\n\n
\n
import os\n\nimport snowflake.connector\nfrom sqlglot.dataframe.session import SparkSession\nfrom sqlglot.dataframe import types\nfrom sqlglot.dataframe import functions as F\n\nctx = snowflake.connector.connect(\n    user=os.environ["SNOWFLAKE_USER"],\n    password=os.environ["SNOWFLAKE_PASS"],\n    account=os.environ["SNOWFLAKE_ACCOUNT"]\n)\ncs = ctx.cursor()\n\ndata = [\n    (1, "Jack", "Shephard", 34),\n    (2, "John", "Locke", 48),\n    (3, "Kate", "Austen", 34),\n    (4, "Claire", "Littleton", 22),\n    (5, "Hugo", "Reyes", 26),\n]\nschema = types.StructType([\n    types.StructField('employee_id', types.IntegerType(), False),\n    types.StructField('fname', types.StringType(), False),\n    types.StructField('lname', types.StringType(), False),\n    types.StructField('age', types.IntegerType(), False),\n])\n\nsql_statements = (\n    SparkSession()\n    .createDataFrame(data, schema)\n    .groupBy(F.col("age"))\n    .agg(F.countDistinct(F.col("lname")).alias("num_employees"))\n    .sql(dialect="snowflake")\n)\n\ntry:\n    for sql in sql_statements:\n        cs.execute(sql)\n    results = cs.fetchall()\n    for row in results:\n        print(f"Age: {row[0]}, Num Employees: {row[1]}")\nfinally:\n    cs.close()\nctx.close()\n
\n
\n\n

Spark

\n\n
\n
from pyspark.sql.session import SparkSession as PySparkSession\nfrom sqlglot.dataframe.sql.session import SparkSession\nfrom sqlglot.dataframe.sql import types\nfrom sqlglot.dataframe.sql import functions as F\n\ndata = [\n    (1, "Jack", "Shephard", 34),\n    (2, "John", "Locke", 48),\n    (3, "Kate", "Austen", 34),\n    (4, "Claire", "Littleton", 22),\n    (5, "Hugo", "Reyes", 26),\n]\nschema = types.StructType([\n    types.StructField('employee_id', types.IntegerType(), False),\n    types.StructField('fname', types.StringType(), False),\n    types.StructField('lname', types.StringType(), False),\n    types.StructField('age', types.IntegerType(), False),\n])\n\nsql_statements = (\n    SparkSession()\n    .createDataFrame(data, schema)\n    .groupBy(F.col("age"))\n    .agg(F.countDistinct(F.col("employee_id")).alias("num_employees"))\n    .sql(dialect="spark")\n)\n\npyspark = PySparkSession.builder.master("local[*]").getOrCreate()\n\ndf = None\nfor sql in sql_statements:\n    df = pyspark.sql(sql)\n\nassert df is not None\ndf.show()\n
\n
\n\n

Unsupportable Operations

\n\n

Any operation that lacks a way to represent it in SQL cannot be supported by this tool. An example of this would be rdd operations. Since the DataFrame API though is mostly modeled around SQL concepts most operations can be supported.

\n"}, "sqlglot.dataframe.sql": {"fullname": "sqlglot.dataframe.sql", "modulename": "sqlglot.dataframe.sql", "kind": "module", "doc": "

\n"}, "sqlglot.dataframe.sql.SparkSession": {"fullname": "sqlglot.dataframe.sql.SparkSession", "modulename": "sqlglot.dataframe.sql", "qualname": "SparkSession", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.SparkSession.table": {"fullname": "sqlglot.dataframe.sql.SparkSession.table", "modulename": "sqlglot.dataframe.sql", "qualname": "SparkSession.table", "kind": "function", "doc": "

\n", "signature": "(self, tableName: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"fullname": "sqlglot.dataframe.sql.SparkSession.createDataFrame", "modulename": "sqlglot.dataframe.sql", "qualname": "SparkSession.createDataFrame", "kind": "function", "doc": "

\n", "signature": "(\tself,\tdata: Sequence[Union[Dict[str, <MagicMock id='140043311982688'>], List[<MagicMock id='140043311982688'>], Tuple]],\tschema: Optional[<MagicMock id='140043311598608'>] = None,\tsamplingRatio: Optional[float] = None,\tverifySchema: bool = False) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.SparkSession.sql": {"fullname": "sqlglot.dataframe.sql.SparkSession.sql", "modulename": "sqlglot.dataframe.sql", "qualname": "SparkSession.sql", "kind": "function", "doc": "

\n", "signature": "(self, sqlQuery: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame": {"fullname": "sqlglot.dataframe.sql.DataFrame", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.DataFrame.__init__": {"fullname": "sqlglot.dataframe.sql.DataFrame.__init__", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.__init__", "kind": "function", "doc": "

\n", "signature": "(\tspark: <MagicMock id='140043314786224'>,\texpression: sqlglot.expressions.Select,\tbranch_id: Optional[str] = None,\tsequence_id: Optional[str] = None,\tlast_op: sqlglot.dataframe.sql.operations.Operation = <Operation.INIT: -1>,\tpending_hints: Optional[List[sqlglot.expressions.Expression]] = None,\toutput_expression_container: Optional[<MagicMock id='140043314972080'>] = None,\t**kwargs)"}, "sqlglot.dataframe.sql.DataFrame.sql": {"fullname": "sqlglot.dataframe.sql.DataFrame.sql", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.sql", "kind": "function", "doc": "

\n", "signature": "(self, dialect='spark', optimize=True, **kwargs) -> List[str]:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.copy": {"fullname": "sqlglot.dataframe.sql.DataFrame.copy", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.copy", "kind": "function", "doc": "

\n", "signature": "(self, **kwargs) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.select": {"fullname": "sqlglot.dataframe.sql.DataFrame.select", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.select", "kind": "function", "doc": "

\n", "signature": "(self, *cols, **kwargs) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.alias": {"fullname": "sqlglot.dataframe.sql.DataFrame.alias", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.alias", "kind": "function", "doc": "

\n", "signature": "(self, name: str, **kwargs) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.where": {"fullname": "sqlglot.dataframe.sql.DataFrame.where", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.where", "kind": "function", "doc": "

\n", "signature": "(\tself,\tcolumn: Union[sqlglot.dataframe.sql.column.Column, bool],\t**kwargs) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.filter": {"fullname": "sqlglot.dataframe.sql.DataFrame.filter", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.filter", "kind": "function", "doc": "

\n", "signature": "(\tself,\tcolumn: Union[sqlglot.dataframe.sql.column.Column, bool],\t**kwargs) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"fullname": "sqlglot.dataframe.sql.DataFrame.groupBy", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.groupBy", "kind": "function", "doc": "

\n", "signature": "(self, *cols, **kwargs) -> sqlglot.dataframe.sql.group.GroupedData:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.agg": {"fullname": "sqlglot.dataframe.sql.DataFrame.agg", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.agg", "kind": "function", "doc": "

\n", "signature": "(self, *exprs, **kwargs) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.join": {"fullname": "sqlglot.dataframe.sql.DataFrame.join", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.join", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother_df: sqlglot.dataframe.sql.dataframe.DataFrame,\ton: Union[str, List[str], sqlglot.dataframe.sql.column.Column, List[sqlglot.dataframe.sql.column.Column]],\thow: str = 'inner',\t**kwargs) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"fullname": "sqlglot.dataframe.sql.DataFrame.orderBy", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.orderBy", "kind": "function", "doc": "

This implementation lets any ordered columns take priority over whatever is provided in ascending. Spark\nhas irregular behavior and can result in runtime errors. Users shouldn't be mixing the two anyways so this\nis unlikely to come up.

\n", "signature": "(\tself,\t*cols: Union[str, sqlglot.dataframe.sql.column.Column],\tascending: Union[Any, List[Any], NoneType] = None) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.sort": {"fullname": "sqlglot.dataframe.sql.DataFrame.sort", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.sort", "kind": "function", "doc": "

This implementation lets any ordered columns take priority over whatever is provided in ascending. Spark\nhas irregular behavior and can result in runtime errors. Users shouldn't be mixing the two anyways so this\nis unlikely to come up.

\n", "signature": "(\tself,\t*cols: Union[str, sqlglot.dataframe.sql.column.Column],\tascending: Union[Any, List[Any], NoneType] = None) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.union": {"fullname": "sqlglot.dataframe.sql.DataFrame.union", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.union", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: sqlglot.dataframe.sql.dataframe.DataFrame) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"fullname": "sqlglot.dataframe.sql.DataFrame.unionAll", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.unionAll", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: sqlglot.dataframe.sql.dataframe.DataFrame) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"fullname": "sqlglot.dataframe.sql.DataFrame.unionByName", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.unionByName", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: sqlglot.dataframe.sql.dataframe.DataFrame,\tallowMissingColumns: bool = False):", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.intersect": {"fullname": "sqlglot.dataframe.sql.DataFrame.intersect", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.intersect", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: sqlglot.dataframe.sql.dataframe.DataFrame) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"fullname": "sqlglot.dataframe.sql.DataFrame.intersectAll", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.intersectAll", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: sqlglot.dataframe.sql.dataframe.DataFrame) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"fullname": "sqlglot.dataframe.sql.DataFrame.exceptAll", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.exceptAll", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: sqlglot.dataframe.sql.dataframe.DataFrame) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.distinct": {"fullname": "sqlglot.dataframe.sql.DataFrame.distinct", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.distinct", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"fullname": "sqlglot.dataframe.sql.DataFrame.dropDuplicates", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.dropDuplicates", "kind": "function", "doc": "

\n", "signature": "(self, subset: Optional[List[str]] = None):", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.dropna": {"fullname": "sqlglot.dataframe.sql.DataFrame.dropna", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.dropna", "kind": "function", "doc": "

\n", "signature": "(\tself,\thow: str = 'any',\tthresh: Optional[int] = None,\tsubset: Union[str, Tuple[str, ...], List[str], NoneType] = None) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.fillna": {"fullname": "sqlglot.dataframe.sql.DataFrame.fillna", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.fillna", "kind": "function", "doc": "

Functionality Difference: If you provide a value to replace a null and that type conflicts\nwith the type of the column then PySpark will just ignore your replacement.\nThis will try to cast them to be the same in some cases. So they won't always match.\nBest to not mix types so make sure replacement is the same type as the column

\n\n

Possibility for improvement: Use typeof function to get the type of the column\nand check if it matches the type of the value provided. If not then make it null.

\n", "signature": "(\tself,\tvalue: <MagicMock id='140043310407712'>,\tsubset: Union[str, Tuple[str, ...], List[str], NoneType] = None) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.replace": {"fullname": "sqlglot.dataframe.sql.DataFrame.replace", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.replace", "kind": "function", "doc": "

\n", "signature": "(\tself,\tto_replace: Union[bool, int, float, str, List, Dict],\tvalue: Union[bool, int, float, str, List, NoneType] = None,\tsubset: Union[Collection[<MagicMock id='140043310703920'>], <MagicMock id='140043310703920'>, NoneType] = None) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"fullname": "sqlglot.dataframe.sql.DataFrame.withColumn", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.withColumn", "kind": "function", "doc": "

\n", "signature": "(\tself,\tcolName: str,\tcol: sqlglot.dataframe.sql.column.Column) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"fullname": "sqlglot.dataframe.sql.DataFrame.withColumnRenamed", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.withColumnRenamed", "kind": "function", "doc": "

\n", "signature": "(self, existing: str, new: str):", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.drop": {"fullname": "sqlglot.dataframe.sql.DataFrame.drop", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.drop", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*cols: Union[str, sqlglot.dataframe.sql.column.Column]) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.limit": {"fullname": "sqlglot.dataframe.sql.DataFrame.limit", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.limit", "kind": "function", "doc": "

\n", "signature": "(self, num: int) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.hint": {"fullname": "sqlglot.dataframe.sql.DataFrame.hint", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.hint", "kind": "function", "doc": "

\n", "signature": "(\tself,\tname: str,\t*parameters: Union[str, int, NoneType]) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.repartition": {"fullname": "sqlglot.dataframe.sql.DataFrame.repartition", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.repartition", "kind": "function", "doc": "

\n", "signature": "(\tself,\tnumPartitions: Union[int, <MagicMock id='140043310895632'>],\t*cols: <MagicMock id='140043310935312'>) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"fullname": "sqlglot.dataframe.sql.DataFrame.coalesce", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.coalesce", "kind": "function", "doc": "

\n", "signature": "(self, numPartitions: int) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.cache": {"fullname": "sqlglot.dataframe.sql.DataFrame.cache", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.cache", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.persist": {"fullname": "sqlglot.dataframe.sql.DataFrame.persist", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.persist", "kind": "function", "doc": "

Storage Level Options: https://spark.apache.org/docs/3.0.0-preview/sql-ref-syntax-aux-cache-cache-table.html

\n", "signature": "(\tself,\tstorageLevel: str = 'MEMORY_AND_DISK_SER') -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.GroupedData": {"fullname": "sqlglot.dataframe.sql.GroupedData", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.GroupedData.__init__": {"fullname": "sqlglot.dataframe.sql.GroupedData.__init__", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.__init__", "kind": "function", "doc": "

\n", "signature": "(\tdf: sqlglot.dataframe.sql.dataframe.DataFrame,\tgroup_by_cols: List[sqlglot.dataframe.sql.column.Column],\tlast_op: sqlglot.dataframe.sql.operations.Operation)"}, "sqlglot.dataframe.sql.GroupedData.agg": {"fullname": "sqlglot.dataframe.sql.GroupedData.agg", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.agg", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*exprs: Union[sqlglot.dataframe.sql.column.Column, Dict[str, str]]) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.GroupedData.count": {"fullname": "sqlglot.dataframe.sql.GroupedData.count", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.count", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.GroupedData.mean": {"fullname": "sqlglot.dataframe.sql.GroupedData.mean", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.mean", "kind": "function", "doc": "

\n", "signature": "(self, *cols: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.GroupedData.avg": {"fullname": "sqlglot.dataframe.sql.GroupedData.avg", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.avg", "kind": "function", "doc": "

\n", "signature": "(self, *cols: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.GroupedData.max": {"fullname": "sqlglot.dataframe.sql.GroupedData.max", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.max", "kind": "function", "doc": "

\n", "signature": "(self, *cols: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.GroupedData.min": {"fullname": "sqlglot.dataframe.sql.GroupedData.min", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.min", "kind": "function", "doc": "

\n", "signature": "(self, *cols: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.GroupedData.sum": {"fullname": "sqlglot.dataframe.sql.GroupedData.sum", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.sum", "kind": "function", "doc": "

\n", "signature": "(self, *cols: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.GroupedData.pivot": {"fullname": "sqlglot.dataframe.sql.GroupedData.pivot", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.pivot", "kind": "function", "doc": "

\n", "signature": "(self, *cols: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column": {"fullname": "sqlglot.dataframe.sql.Column", "modulename": "sqlglot.dataframe.sql", "qualname": "Column", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.Column.__init__": {"fullname": "sqlglot.dataframe.sql.Column.__init__", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.__init__", "kind": "function", "doc": "

\n", "signature": "(\texpression: Union[<MagicMock id='140043312805776'>, sqlglot.expressions.Expression, NoneType])"}, "sqlglot.dataframe.sql.Column.ensure_col": {"fullname": "sqlglot.dataframe.sql.Column.ensure_col", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.ensure_col", "kind": "function", "doc": "

\n", "signature": "(\tcls,\tvalue: Union[<MagicMock id='140043311007728'>, sqlglot.expressions.Expression, NoneType]):", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.ensure_cols": {"fullname": "sqlglot.dataframe.sql.Column.ensure_cols", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.ensure_cols", "kind": "function", "doc": "

\n", "signature": "(\tcls,\targs: List[Union[<MagicMock id='140043311137744'>, sqlglot.expressions.Expression]]) -> List[sqlglot.dataframe.sql.column.Column]:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"fullname": "sqlglot.dataframe.sql.Column.invoke_anonymous_function", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.invoke_anonymous_function", "kind": "function", "doc": "

\n", "signature": "(\tcls,\tcolumn: Optional[<MagicMock id='140043310280000'>],\tfunc_name: str,\t*args: Optional[<MagicMock id='140043311061728'>]) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"fullname": "sqlglot.dataframe.sql.Column.invoke_expression_over_column", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.invoke_expression_over_column", "kind": "function", "doc": "

\n", "signature": "(\tcls,\tcolumn: Optional[<MagicMock id='140043309080992'>],\tcallable_expression: Callable,\t**kwargs) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.binary_op": {"fullname": "sqlglot.dataframe.sql.Column.binary_op", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.binary_op", "kind": "function", "doc": "

\n", "signature": "(\tself,\tklass: Callable,\tother: <MagicMock id='140043309221248'>,\t**kwargs) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"fullname": "sqlglot.dataframe.sql.Column.inverse_binary_op", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.inverse_binary_op", "kind": "function", "doc": "

\n", "signature": "(\tself,\tklass: Callable,\tother: <MagicMock id='140043309230512'>,\t**kwargs) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.unary_op": {"fullname": "sqlglot.dataframe.sql.Column.unary_op", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.unary_op", "kind": "function", "doc": "

\n", "signature": "(self, klass: Callable, **kwargs) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.ensure_literal": {"fullname": "sqlglot.dataframe.sql.Column.ensure_literal", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.ensure_literal", "kind": "function", "doc": "

\n", "signature": "(cls, value) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.copy": {"fullname": "sqlglot.dataframe.sql.Column.copy", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.copy", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.set_table_name": {"fullname": "sqlglot.dataframe.sql.Column.set_table_name", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.set_table_name", "kind": "function", "doc": "

\n", "signature": "(self, table_name: str, copy=False) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.sql": {"fullname": "sqlglot.dataframe.sql.Column.sql", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.sql", "kind": "function", "doc": "

\n", "signature": "(self, **kwargs) -> str:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.alias": {"fullname": "sqlglot.dataframe.sql.Column.alias", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.alias", "kind": "function", "doc": "

\n", "signature": "(self, name: str) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.asc": {"fullname": "sqlglot.dataframe.sql.Column.asc", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.asc", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.desc": {"fullname": "sqlglot.dataframe.sql.Column.desc", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.desc", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"fullname": "sqlglot.dataframe.sql.Column.asc_nulls_first", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.asc_nulls_first", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"fullname": "sqlglot.dataframe.sql.Column.asc_nulls_last", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.asc_nulls_last", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"fullname": "sqlglot.dataframe.sql.Column.desc_nulls_first", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.desc_nulls_first", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"fullname": "sqlglot.dataframe.sql.Column.desc_nulls_last", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.desc_nulls_last", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.when": {"fullname": "sqlglot.dataframe.sql.Column.when", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.when", "kind": "function", "doc": "

\n", "signature": "(\tself,\tcondition: sqlglot.dataframe.sql.column.Column,\tvalue: Any) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.otherwise": {"fullname": "sqlglot.dataframe.sql.Column.otherwise", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.otherwise", "kind": "function", "doc": "

\n", "signature": "(self, value: Any) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.isNull": {"fullname": "sqlglot.dataframe.sql.Column.isNull", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.isNull", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.isNotNull": {"fullname": "sqlglot.dataframe.sql.Column.isNotNull", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.isNotNull", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.cast": {"fullname": "sqlglot.dataframe.sql.Column.cast", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.cast", "kind": "function", "doc": "

Functionality Difference: PySpark cast accepts a datatype instance of the datatype class\nSqlglot doesn't currently replicate this class so it only accepts a string

\n", "signature": "(self, dataType: Union[str, sqlglot.dataframe.sql.types.DataType]):", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.startswith": {"fullname": "sqlglot.dataframe.sql.Column.startswith", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.startswith", "kind": "function", "doc": "

\n", "signature": "(\tself,\tvalue: Union[str, sqlglot.dataframe.sql.column.Column]) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.endswith": {"fullname": "sqlglot.dataframe.sql.Column.endswith", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.endswith", "kind": "function", "doc": "

\n", "signature": "(\tself,\tvalue: Union[str, sqlglot.dataframe.sql.column.Column]) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.rlike": {"fullname": "sqlglot.dataframe.sql.Column.rlike", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.rlike", "kind": "function", "doc": "

\n", "signature": "(self, regexp: str) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.like": {"fullname": "sqlglot.dataframe.sql.Column.like", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.like", "kind": "function", "doc": "

\n", "signature": "(self, other: str):", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.ilike": {"fullname": "sqlglot.dataframe.sql.Column.ilike", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.ilike", "kind": "function", "doc": "

\n", "signature": "(self, other: str):", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.substr": {"fullname": "sqlglot.dataframe.sql.Column.substr", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.substr", "kind": "function", "doc": "

\n", "signature": "(\tself,\tstartPos: Union[int, sqlglot.dataframe.sql.column.Column],\tlength: Union[int, sqlglot.dataframe.sql.column.Column]) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.isin": {"fullname": "sqlglot.dataframe.sql.Column.isin", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.isin", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*cols: Union[<MagicMock id='140043309453744'>, Iterable[<MagicMock id='140043309453744'>]]):", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.between": {"fullname": "sqlglot.dataframe.sql.Column.between", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.between", "kind": "function", "doc": "

\n", "signature": "(\tself,\tlowerBound: <MagicMock id='140043309563968'>,\tupperBound: <MagicMock id='140043309589040'>) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.over": {"fullname": "sqlglot.dataframe.sql.Column.over", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.over", "kind": "function", "doc": "

\n", "signature": "(\tself,\twindow: <MagicMock id='140043309646896'>) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameNaFunctions": {"fullname": "sqlglot.dataframe.sql.DataFrameNaFunctions", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameNaFunctions", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"fullname": "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameNaFunctions.__init__", "kind": "function", "doc": "

\n", "signature": "(df: sqlglot.dataframe.sql.dataframe.DataFrame)"}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"fullname": "sqlglot.dataframe.sql.DataFrameNaFunctions.drop", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameNaFunctions.drop", "kind": "function", "doc": "

\n", "signature": "(\tself,\thow: str = 'any',\tthresh: Optional[int] = None,\tsubset: Union[str, Tuple[str, ...], List[str], NoneType] = None) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"fullname": "sqlglot.dataframe.sql.DataFrameNaFunctions.fill", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameNaFunctions.fill", "kind": "function", "doc": "

\n", "signature": "(\tself,\tvalue: Union[int, bool, float, str, Dict[str, Any]],\tsubset: Union[str, Tuple[str, ...], List[str], NoneType] = None) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"fullname": "sqlglot.dataframe.sql.DataFrameNaFunctions.replace", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameNaFunctions.replace", "kind": "function", "doc": "

\n", "signature": "(\tself,\tto_replace: Union[bool, int, float, str, List, Dict],\tvalue: Union[bool, int, float, str, List, NoneType] = None,\tsubset: Union[str, List[str], NoneType] = None) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.Window": {"fullname": "sqlglot.dataframe.sql.Window", "modulename": "sqlglot.dataframe.sql", "qualname": "Window", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.Window.partitionBy": {"fullname": "sqlglot.dataframe.sql.Window.partitionBy", "modulename": "sqlglot.dataframe.sql", "qualname": "Window.partitionBy", "kind": "function", "doc": "

\n", "signature": "(\tcls,\t*cols: Union[<MagicMock id='140043310053360'>, List[<MagicMock id='140043310053360'>]]) -> sqlglot.dataframe.sql.window.WindowSpec:", "funcdef": "def"}, "sqlglot.dataframe.sql.Window.orderBy": {"fullname": "sqlglot.dataframe.sql.Window.orderBy", "modulename": "sqlglot.dataframe.sql", "qualname": "Window.orderBy", "kind": "function", "doc": "

\n", "signature": "(\tcls,\t*cols: Union[<MagicMock id='140043309956208'>, List[<MagicMock id='140043309956208'>]]) -> sqlglot.dataframe.sql.window.WindowSpec:", "funcdef": "def"}, "sqlglot.dataframe.sql.Window.rowsBetween": {"fullname": "sqlglot.dataframe.sql.Window.rowsBetween", "modulename": "sqlglot.dataframe.sql", "qualname": "Window.rowsBetween", "kind": "function", "doc": "

\n", "signature": "(cls, start: int, end: int) -> sqlglot.dataframe.sql.window.WindowSpec:", "funcdef": "def"}, "sqlglot.dataframe.sql.Window.rangeBetween": {"fullname": "sqlglot.dataframe.sql.Window.rangeBetween", "modulename": "sqlglot.dataframe.sql", "qualname": "Window.rangeBetween", "kind": "function", "doc": "

\n", "signature": "(cls, start: int, end: int) -> sqlglot.dataframe.sql.window.WindowSpec:", "funcdef": "def"}, "sqlglot.dataframe.sql.WindowSpec": {"fullname": "sqlglot.dataframe.sql.WindowSpec", "modulename": "sqlglot.dataframe.sql", "qualname": "WindowSpec", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"fullname": "sqlglot.dataframe.sql.WindowSpec.__init__", "modulename": "sqlglot.dataframe.sql", "qualname": "WindowSpec.__init__", "kind": "function", "doc": "

\n", "signature": "(expression: sqlglot.expressions.Expression = (WINDOW ))"}, "sqlglot.dataframe.sql.WindowSpec.copy": {"fullname": "sqlglot.dataframe.sql.WindowSpec.copy", "modulename": "sqlglot.dataframe.sql", "qualname": "WindowSpec.copy", "kind": "function", "doc": "

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.dataframe.sql.WindowSpec.sql": {"fullname": "sqlglot.dataframe.sql.WindowSpec.sql", "modulename": "sqlglot.dataframe.sql", "qualname": "WindowSpec.sql", "kind": "function", "doc": "

\n", "signature": "(self, **kwargs) -> str:", "funcdef": "def"}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"fullname": "sqlglot.dataframe.sql.WindowSpec.partitionBy", "modulename": "sqlglot.dataframe.sql", "qualname": "WindowSpec.partitionBy", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*cols: Union[<MagicMock id='140043309879920'>, List[<MagicMock id='140043309879920'>]]) -> sqlglot.dataframe.sql.window.WindowSpec:", "funcdef": "def"}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"fullname": "sqlglot.dataframe.sql.WindowSpec.orderBy", "modulename": "sqlglot.dataframe.sql", "qualname": "WindowSpec.orderBy", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*cols: Union[<MagicMock id='140043309740432'>, List[<MagicMock id='140043309740432'>]]) -> sqlglot.dataframe.sql.window.WindowSpec:", "funcdef": "def"}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"fullname": "sqlglot.dataframe.sql.WindowSpec.rowsBetween", "modulename": "sqlglot.dataframe.sql", "qualname": "WindowSpec.rowsBetween", "kind": "function", "doc": "

\n", "signature": "(self, start: int, end: int) -> sqlglot.dataframe.sql.window.WindowSpec:", "funcdef": "def"}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"fullname": "sqlglot.dataframe.sql.WindowSpec.rangeBetween", "modulename": "sqlglot.dataframe.sql", "qualname": "WindowSpec.rangeBetween", "kind": "function", "doc": "

\n", "signature": "(self, start: int, end: int) -> sqlglot.dataframe.sql.window.WindowSpec:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameReader": {"fullname": "sqlglot.dataframe.sql.DataFrameReader", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameReader", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"fullname": "sqlglot.dataframe.sql.DataFrameReader.__init__", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameReader.__init__", "kind": "function", "doc": "

\n", "signature": "(spark: sqlglot.dataframe.sql.session.SparkSession)"}, "sqlglot.dataframe.sql.DataFrameReader.table": {"fullname": "sqlglot.dataframe.sql.DataFrameReader.table", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameReader.table", "kind": "function", "doc": "

\n", "signature": "(self, tableName: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameWriter": {"fullname": "sqlglot.dataframe.sql.DataFrameWriter", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameWriter", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"fullname": "sqlglot.dataframe.sql.DataFrameWriter.__init__", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameWriter.__init__", "kind": "function", "doc": "

\n", "signature": "(\tdf: sqlglot.dataframe.sql.dataframe.DataFrame,\tspark: Optional[sqlglot.dataframe.sql.session.SparkSession] = None,\tmode: Optional[str] = None,\tby_name: bool = False)"}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"fullname": "sqlglot.dataframe.sql.DataFrameWriter.copy", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameWriter.copy", "kind": "function", "doc": "

\n", "signature": "(self, **kwargs) -> sqlglot.dataframe.sql.readwriter.DataFrameWriter:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"fullname": "sqlglot.dataframe.sql.DataFrameWriter.sql", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameWriter.sql", "kind": "function", "doc": "

\n", "signature": "(self, **kwargs) -> List[str]:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"fullname": "sqlglot.dataframe.sql.DataFrameWriter.mode", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameWriter.mode", "kind": "function", "doc": "

\n", "signature": "(\tself,\tsaveMode: Optional[str]) -> sqlglot.dataframe.sql.readwriter.DataFrameWriter:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"fullname": "sqlglot.dataframe.sql.DataFrameWriter.insertInto", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameWriter.insertInto", "kind": "function", "doc": "

\n", "signature": "(\tself,\ttableName: str,\toverwrite: Optional[bool] = None) -> sqlglot.dataframe.sql.readwriter.DataFrameWriter:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"fullname": "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameWriter.saveAsTable", "kind": "function", "doc": "

\n", "signature": "(\tself,\tname: str,\tformat: Optional[str] = None,\tmode: Optional[str] = None):", "funcdef": "def"}, "sqlglot.dialects": {"fullname": "sqlglot.dialects", "modulename": "sqlglot.dialects", "kind": "module", "doc": "

Dialects

\n\n

While there is a SQL standard, most SQL engines support a variation of that standard. This makes it difficult\nto write portable SQL code. SQLGlot bridges all the different variations, called \"dialects\", with an extensible\nSQL transpilation framework.

\n\n

The base sqlglot.dialects.dialect.Dialect class implements a generic dialect that aims to be as universal as possible.

\n\n

Each SQL variation has its own Dialect subclass, extending the corresponding Tokenizer, Parser and Generator\nclasses as needed.

\n\n

Implementing a custom Dialect

\n\n

Consider the following example:

\n\n
\n
from sqlglot import exp\nfrom sqlglot.dialects.dialect import Dialect\nfrom sqlglot.generator import Generator\nfrom sqlglot.tokens import Tokenizer, TokenType\n\n\nclass Custom(Dialect):\n    class Tokenizer(Tokenizer):\n        QUOTES = ["'", '"']\n        IDENTIFIERS = ["`"]\n\n        KEYWORDS = {\n            **Tokenizer.KEYWORDS,\n            "INT64": TokenType.BIGINT,\n            "FLOAT64": TokenType.DOUBLE,\n        }\n\n    class Generator(Generator):\n        TRANSFORMS = {exp.Array: lambda self, e: f"[{self.expressions(e)}]"}\n\n        TYPE_MAPPING = {\n            exp.DataType.Type.TINYINT: "INT64",\n            exp.DataType.Type.SMALLINT: "INT64",\n            exp.DataType.Type.INT: "INT64",\n            exp.DataType.Type.BIGINT: "INT64",\n            exp.DataType.Type.DECIMAL: "NUMERIC",\n            exp.DataType.Type.FLOAT: "FLOAT64",\n            exp.DataType.Type.DOUBLE: "FLOAT64",\n            exp.DataType.Type.BOOLEAN: "BOOL",\n            exp.DataType.Type.TEXT: "STRING",\n        }\n
\n
\n\n

This is a typical example of adding a new dialect implementation in SQLGlot: we specify its identifier and string\ndelimiters, as well as what tokens it uses for its types and how they're associated with SQLGlot types. Since\nthe Expression classes are common for each dialect supported in SQLGlot, we may also need to override the generation\nlogic for some expressions; this is usually done by adding new entries to the TRANSFORMS mapping.

\n\n
\n"}, "sqlglot.dialects.bigquery": {"fullname": "sqlglot.dialects.bigquery", "modulename": "sqlglot.dialects.bigquery", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.bigquery.BigQuery": {"fullname": "sqlglot.dialects.bigquery.BigQuery", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Tokenizer", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Parser", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: The desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: Determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 100
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator", "kind": "class", "doc": "

Generator converts a given syntax tree to the corresponding SQL string.

\n\n
Arguments:
\n\n
    \n
  • pretty: Whether or not to format the produced SQL string.\nDefault: False.
  • \n
  • identify: Determines when an identifier should be quoted. Possible values are:\nFalse (default): Never quote, except in cases where it's mandatory by the dialect.\nTrue or 'always': Always quote.\n'safe': Only quote identifiers that are case insensitive.
  • \n
  • normalize: Whether or not to normalize identifiers to lowercase.\nDefault: False.
  • \n
  • pad: Determines the pad size in a formatted string.\nDefault: 2.
  • \n
  • indent: Determines the indentation size in a formatted string.\nDefault: 2.
  • \n
  • normalize_functions: Whether or not to normalize all function names. Possible values are:\n\"upper\" or True (default): Convert names to uppercase.\n\"lower\": Convert names to lowercase.\nFalse: Disables function name normalization.
  • \n
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.\nDefault ErrorLevel.WARN.
  • \n
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions.\nThis is only relevant when generating in pretty mode.\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator.array_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Array) -> str:", "funcdef": "def"}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator.transaction_sql", "kind": "function", "doc": "

\n", "signature": "(self, *_) -> str:", "funcdef": "def"}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator.commit_sql", "kind": "function", "doc": "

\n", "signature": "(self, *_) -> str:", "funcdef": "def"}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator.rollback_sql", "kind": "function", "doc": "

\n", "signature": "(self, *_) -> str:", "funcdef": "def"}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator.in_unnest_op", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Unnest) -> str:", "funcdef": "def"}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator.except_op", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator.except_op", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Except) -> str:", "funcdef": "def"}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator.intersect_op", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Intersect) -> str:", "funcdef": "def"}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator.with_properties", "kind": "function", "doc": "

\n", "signature": "(self, properties: sqlglot.expressions.Properties) -> str:", "funcdef": "def"}, "sqlglot.dialects.clickhouse": {"fullname": "sqlglot.dialects.clickhouse", "modulename": "sqlglot.dialects.clickhouse", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.clickhouse.ClickHouse": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse.Parser", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: The desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: Determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 100
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse.Generator", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse.Generator", "kind": "class", "doc": "

Generator converts a given syntax tree to the corresponding SQL string.

\n\n
Arguments:
\n\n
    \n
  • pretty: Whether or not to format the produced SQL string.\nDefault: False.
  • \n
  • identify: Determines when an identifier should be quoted. Possible values are:\nFalse (default): Never quote, except in cases where it's mandatory by the dialect.\nTrue or 'always': Always quote.\n'safe': Only quote identifiers that are case insensitive.
  • \n
  • normalize: Whether or not to normalize identifiers to lowercase.\nDefault: False.
  • \n
  • pad: Determines the pad size in a formatted string.\nDefault: 2.
  • \n
  • indent: Determines the indentation size in a formatted string.\nDefault: 2.
  • \n
  • normalize_functions: Whether or not to normalize all function names. Possible values are:\n\"upper\" or True (default): Convert names to uppercase.\n\"lower\": Convert names to lowercase.\nFalse: Disables function name normalization.
  • \n
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.\nDefault ErrorLevel.WARN.
  • \n
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions.\nThis is only relevant when generating in pretty mode.\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.safeconcat_sql": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse.Generator.safeconcat_sql", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse.Generator.safeconcat_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.SafeConcat) -> str:", "funcdef": "def"}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse.Generator.cte_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.CTE) -> str:", "funcdef": "def"}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse.Generator.after_limit_modifiers", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Expression) -> List[str]:", "funcdef": "def"}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse.Generator.parameterizedagg_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Anonymous) -> str:", "funcdef": "def"}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse.Generator.placeholder_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Placeholder) -> str:", "funcdef": "def"}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.oncluster_sql": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse.Generator.oncluster_sql", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse.Generator.oncluster_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.OnCluster) -> str:", "funcdef": "def"}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse.Generator.createable_sql", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: sqlglot.expressions.Create,\tlocations: dict[sqlglot.expressions.Properties.Location, list[sqlglot.expressions.Property]]) -> str:", "funcdef": "def"}, "sqlglot.dialects.databricks": {"fullname": "sqlglot.dialects.databricks", "modulename": "sqlglot.dialects.databricks", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.databricks.Databricks": {"fullname": "sqlglot.dialects.databricks.Databricks", "modulename": "sqlglot.dialects.databricks", "qualname": "Databricks", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.spark.Spark"}, "sqlglot.dialects.databricks.Databricks.Parser": {"fullname": "sqlglot.dialects.databricks.Databricks.Parser", "modulename": "sqlglot.dialects.databricks", "qualname": "Databricks.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: The desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: Determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 100
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
\n", "bases": "sqlglot.dialects.spark.Spark.Parser"}, "sqlglot.dialects.databricks.Databricks.Generator": {"fullname": "sqlglot.dialects.databricks.Databricks.Generator", "modulename": "sqlglot.dialects.databricks", "qualname": "Databricks.Generator", "kind": "class", "doc": "

Generator converts a given syntax tree to the corresponding SQL string.

\n\n
Arguments:
\n\n
    \n
  • pretty: Whether or not to format the produced SQL string.\nDefault: False.
  • \n
  • identify: Determines when an identifier should be quoted. Possible values are:\nFalse (default): Never quote, except in cases where it's mandatory by the dialect.\nTrue or 'always': Always quote.\n'safe': Only quote identifiers that are case insensitive.
  • \n
  • normalize: Whether or not to normalize identifiers to lowercase.\nDefault: False.
  • \n
  • pad: Determines the pad size in a formatted string.\nDefault: 2.
  • \n
  • indent: Determines the indentation size in a formatted string.\nDefault: 2.
  • \n
  • normalize_functions: Whether or not to normalize all function names. Possible values are:\n\"upper\" or True (default): Convert names to uppercase.\n\"lower\": Convert names to lowercase.\nFalse: Disables function name normalization.
  • \n
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.\nDefault ErrorLevel.WARN.
  • \n
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions.\nThis is only relevant when generating in pretty mode.\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.dialects.spark.Spark.Generator"}, "sqlglot.dialects.databricks.Databricks.Tokenizer": {"fullname": "sqlglot.dialects.databricks.Databricks.Tokenizer", "modulename": "sqlglot.dialects.databricks", "qualname": "Databricks.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.spark2.Spark2.Tokenizer"}, "sqlglot.dialects.dialect": {"fullname": "sqlglot.dialects.dialect", "modulename": "sqlglot.dialects.dialect", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.dialect.Dialects": {"fullname": "sqlglot.dialects.dialect.Dialects", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects", "kind": "class", "doc": "

An enumeration.

\n", "bases": "builtins.str, enum.Enum"}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"fullname": "sqlglot.dialects.dialect.Dialects.DIALECT", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.DIALECT", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.DIALECT: ''>"}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"fullname": "sqlglot.dialects.dialect.Dialects.BIGQUERY", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.BIGQUERY", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.BIGQUERY: 'bigquery'>"}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"fullname": "sqlglot.dialects.dialect.Dialects.CLICKHOUSE", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.CLICKHOUSE", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.CLICKHOUSE: 'clickhouse'>"}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"fullname": "sqlglot.dialects.dialect.Dialects.DATABRICKS", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.DATABRICKS", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.DATABRICKS: 'databricks'>"}, "sqlglot.dialects.dialect.Dialects.DRILL": {"fullname": "sqlglot.dialects.dialect.Dialects.DRILL", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.DRILL", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.DRILL: 'drill'>"}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"fullname": "sqlglot.dialects.dialect.Dialects.DUCKDB", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.DUCKDB", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.DUCKDB: 'duckdb'>"}, "sqlglot.dialects.dialect.Dialects.HIVE": {"fullname": "sqlglot.dialects.dialect.Dialects.HIVE", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.HIVE", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.HIVE: 'hive'>"}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"fullname": "sqlglot.dialects.dialect.Dialects.MYSQL", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.MYSQL", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.MYSQL: 'mysql'>"}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"fullname": "sqlglot.dialects.dialect.Dialects.ORACLE", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.ORACLE", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.ORACLE: 'oracle'>"}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"fullname": "sqlglot.dialects.dialect.Dialects.POSTGRES", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.POSTGRES", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.POSTGRES: 'postgres'>"}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"fullname": "sqlglot.dialects.dialect.Dialects.PRESTO", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.PRESTO", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.PRESTO: 'presto'>"}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"fullname": "sqlglot.dialects.dialect.Dialects.REDSHIFT", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.REDSHIFT", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.REDSHIFT: 'redshift'>"}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"fullname": "sqlglot.dialects.dialect.Dialects.SNOWFLAKE", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.SNOWFLAKE", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.SNOWFLAKE: 'snowflake'>"}, "sqlglot.dialects.dialect.Dialects.SPARK": {"fullname": "sqlglot.dialects.dialect.Dialects.SPARK", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.SPARK", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.SPARK: 'spark'>"}, "sqlglot.dialects.dialect.Dialects.SPARK2": {"fullname": "sqlglot.dialects.dialect.Dialects.SPARK2", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.SPARK2", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.SPARK2: 'spark2'>"}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"fullname": "sqlglot.dialects.dialect.Dialects.SQLITE", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.SQLITE", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.SQLITE: 'sqlite'>"}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"fullname": "sqlglot.dialects.dialect.Dialects.STARROCKS", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.STARROCKS", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.STARROCKS: 'starrocks'>"}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"fullname": "sqlglot.dialects.dialect.Dialects.TABLEAU", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.TABLEAU", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.TABLEAU: 'tableau'>"}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"fullname": "sqlglot.dialects.dialect.Dialects.TERADATA", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.TERADATA", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.TERADATA: 'teradata'>"}, "sqlglot.dialects.dialect.Dialects.TRINO": {"fullname": "sqlglot.dialects.dialect.Dialects.TRINO", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.TRINO", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.TRINO: 'trino'>"}, "sqlglot.dialects.dialect.Dialects.TSQL": {"fullname": "sqlglot.dialects.dialect.Dialects.TSQL", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.TSQL", "kind": "variable", "doc": "

\n", "default_value": "<Dialects.TSQL: 'tsql'>"}, "sqlglot.dialects.dialect.Dialect": {"fullname": "sqlglot.dialects.dialect.Dialect", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect", "kind": "class", "doc": "

\n"}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"fullname": "sqlglot.dialects.dialect.Dialect.get_or_raise", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.get_or_raise", "kind": "function", "doc": "

\n", "signature": "(\tcls,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType]) -> Type[sqlglot.dialects.dialect.Dialect]:", "funcdef": "def"}, "sqlglot.dialects.dialect.Dialect.format_time": {"fullname": "sqlglot.dialects.dialect.Dialect.format_time", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.format_time", "kind": "function", "doc": "

\n", "signature": "(\tcls,\texpression: Union[str, sqlglot.expressions.Expression, NoneType]) -> Optional[sqlglot.expressions.Expression]:", "funcdef": "def"}, "sqlglot.dialects.dialect.Dialect.parse": {"fullname": "sqlglot.dialects.dialect.Dialect.parse", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.parse", "kind": "function", "doc": "

\n", "signature": "(self, sql: str, **opts) -> List[Optional[sqlglot.expressions.Expression]]:", "funcdef": "def"}, "sqlglot.dialects.dialect.Dialect.parse_into": {"fullname": "sqlglot.dialects.dialect.Dialect.parse_into", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.parse_into", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression_type: Union[str, Type[sqlglot.expressions.Expression], Collection[Union[str, Type[sqlglot.expressions.Expression]]]],\tsql: str,\t**opts) -> List[Optional[sqlglot.expressions.Expression]]:", "funcdef": "def"}, "sqlglot.dialects.dialect.Dialect.generate": {"fullname": "sqlglot.dialects.dialect.Dialect.generate", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.generate", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: Optional[sqlglot.expressions.Expression],\t**opts) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.Dialect.transpile": {"fullname": "sqlglot.dialects.dialect.Dialect.transpile", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.transpile", "kind": "function", "doc": "

\n", "signature": "(self, sql: str, **opts) -> List[str]:", "funcdef": "def"}, "sqlglot.dialects.dialect.Dialect.tokenize": {"fullname": "sqlglot.dialects.dialect.Dialect.tokenize", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.tokenize", "kind": "function", "doc": "

\n", "signature": "(self, sql: str) -> List[sqlglot.tokens.Token]:", "funcdef": "def"}, "sqlglot.dialects.dialect.Dialect.parser": {"fullname": "sqlglot.dialects.dialect.Dialect.parser", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.parser", "kind": "function", "doc": "

\n", "signature": "(self, **opts) -> sqlglot.parser.Parser:", "funcdef": "def"}, "sqlglot.dialects.dialect.Dialect.generator": {"fullname": "sqlglot.dialects.dialect.Dialect.generator", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.generator", "kind": "function", "doc": "

\n", "signature": "(self, **opts) -> sqlglot.generator.Generator:", "funcdef": "def"}, "sqlglot.dialects.dialect.rename_func": {"fullname": "sqlglot.dialects.dialect.rename_func", "modulename": "sqlglot.dialects.dialect", "qualname": "rename_func", "kind": "function", "doc": "

\n", "signature": "(\tname: str) -> Callable[[sqlglot.generator.Generator, sqlglot.expressions.Expression], str]:", "funcdef": "def"}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"fullname": "sqlglot.dialects.dialect.approx_count_distinct_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "approx_count_distinct_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.ApproxDistinct) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.if_sql": {"fullname": "sqlglot.dialects.dialect.if_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "if_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.If) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"fullname": "sqlglot.dialects.dialect.arrow_json_extract_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "arrow_json_extract_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.JSONExtract | sqlglot.expressions.JSONBExtract) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"fullname": "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "arrow_json_extract_scalar_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.JSONExtractScalar | sqlglot.expressions.JSONBExtractScalar) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.inline_array_sql": {"fullname": "sqlglot.dialects.dialect.inline_array_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "inline_array_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Array) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_ilike_sql": {"fullname": "sqlglot.dialects.dialect.no_ilike_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_ilike_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.ILike) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"fullname": "sqlglot.dialects.dialect.no_paren_current_date_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_paren_current_date_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.CurrentDate) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"fullname": "sqlglot.dialects.dialect.no_recursive_cte_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_recursive_cte_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.With) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"fullname": "sqlglot.dialects.dialect.no_safe_divide_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_safe_divide_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.SafeDivide) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_tablesample_sql": {"fullname": "sqlglot.dialects.dialect.no_tablesample_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_tablesample_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.TableSample) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_pivot_sql": {"fullname": "sqlglot.dialects.dialect.no_pivot_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_pivot_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Pivot) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_trycast_sql": {"fullname": "sqlglot.dialects.dialect.no_trycast_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_trycast_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.TryCast) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_properties_sql": {"fullname": "sqlglot.dialects.dialect.no_properties_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_properties_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Properties) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"fullname": "sqlglot.dialects.dialect.no_comment_column_constraint_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_comment_column_constraint_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.CommentColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.str_position_sql": {"fullname": "sqlglot.dialects.dialect.str_position_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "str_position_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.StrPosition) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.struct_extract_sql": {"fullname": "sqlglot.dialects.dialect.struct_extract_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "struct_extract_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.StructExtract) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.var_map_sql": {"fullname": "sqlglot.dialects.dialect.var_map_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "var_map_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Map | sqlglot.expressions.VarMap,\tmap_func_name: str = 'MAP') -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.format_time_lambda": {"fullname": "sqlglot.dialects.dialect.format_time_lambda", "modulename": "sqlglot.dialects.dialect", "qualname": "format_time_lambda", "kind": "function", "doc": "

Helper used for time expressions.

\n\n
Arguments:
\n\n
    \n
  • exp_class: the expression class to instantiate.
  • \n
  • dialect: target sql dialect.
  • \n
  • default: the default format, True being time.
  • \n
\n\n
Returns:
\n\n
\n

A callable that can be used to return the appropriately formatted time expression.

\n
\n", "signature": "(\texp_class: Type[~E],\tdialect: str,\tdefault: Union[str, bool, NoneType] = None) -> Callable[[List], ~E]:", "funcdef": "def"}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"fullname": "sqlglot.dialects.dialect.create_with_partitions_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "create_with_partitions_sql", "kind": "function", "doc": "

In Hive and Spark, the PARTITIONED BY property acts as an extension of a table's schema. When the\nPARTITIONED BY value is an array of column names, they are transformed into a schema. The corresponding\ncolumns are removed from the create statement.

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Create) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.parse_date_delta": {"fullname": "sqlglot.dialects.dialect.parse_date_delta", "modulename": "sqlglot.dialects.dialect", "qualname": "parse_date_delta", "kind": "function", "doc": "

\n", "signature": "(\texp_class: Type[~E],\tunit_mapping: Optional[Dict[str, str]] = None) -> Callable[[List], ~E]:", "funcdef": "def"}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"fullname": "sqlglot.dialects.dialect.parse_date_delta_with_interval", "modulename": "sqlglot.dialects.dialect", "qualname": "parse_date_delta_with_interval", "kind": "function", "doc": "

\n", "signature": "(expression_class: Type[~E]) -> Callable[[List], Optional[~E]]:", "funcdef": "def"}, "sqlglot.dialects.dialect.date_trunc_to_time": {"fullname": "sqlglot.dialects.dialect.date_trunc_to_time", "modulename": "sqlglot.dialects.dialect", "qualname": "date_trunc_to_time", "kind": "function", "doc": "

\n", "signature": "(\targs: List) -> sqlglot.expressions.DateTrunc | sqlglot.expressions.TimestampTrunc:", "funcdef": "def"}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"fullname": "sqlglot.dialects.dialect.timestamptrunc_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "timestamptrunc_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.TimestampTrunc) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.locate_to_strposition": {"fullname": "sqlglot.dialects.dialect.locate_to_strposition", "modulename": "sqlglot.dialects.dialect", "qualname": "locate_to_strposition", "kind": "function", "doc": "

\n", "signature": "(args: List) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"fullname": "sqlglot.dialects.dialect.strposition_to_locate_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "strposition_to_locate_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.StrPosition) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.left_to_substring_sql": {"fullname": "sqlglot.dialects.dialect.left_to_substring_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "left_to_substring_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Left) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.right_to_substring_sql": {"fullname": "sqlglot.dialects.dialect.right_to_substring_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "right_to_substring_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Left) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.timestrtotime_sql": {"fullname": "sqlglot.dialects.dialect.timestrtotime_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "timestrtotime_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.TimeStrToTime) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.datestrtodate_sql": {"fullname": "sqlglot.dialects.dialect.datestrtodate_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "datestrtodate_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.DateStrToDate) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.min_or_least": {"fullname": "sqlglot.dialects.dialect.min_or_least", "modulename": "sqlglot.dialects.dialect", "qualname": "min_or_least", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Min) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.max_or_greatest": {"fullname": "sqlglot.dialects.dialect.max_or_greatest", "modulename": "sqlglot.dialects.dialect", "qualname": "max_or_greatest", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Max) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.count_if_to_sum": {"fullname": "sqlglot.dialects.dialect.count_if_to_sum", "modulename": "sqlglot.dialects.dialect", "qualname": "count_if_to_sum", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.CountIf) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.trim_sql": {"fullname": "sqlglot.dialects.dialect.trim_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "trim_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Trim) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.str_to_time_sql": {"fullname": "sqlglot.dialects.dialect.str_to_time_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "str_to_time_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Expression) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"fullname": "sqlglot.dialects.dialect.ts_or_ds_to_date_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "ts_or_ds_to_date_sql", "kind": "function", "doc": "

\n", "signature": "(dialect: str) -> Callable:", "funcdef": "def"}, "sqlglot.dialects.dialect.concat_to_dpipe_sql": {"fullname": "sqlglot.dialects.dialect.concat_to_dpipe_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "concat_to_dpipe_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Concat | sqlglot.expressions.SafeConcat) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.pivot_column_names": {"fullname": "sqlglot.dialects.dialect.pivot_column_names", "modulename": "sqlglot.dialects.dialect", "qualname": "pivot_column_names", "kind": "function", "doc": "

\n", "signature": "(\taggregations: List[sqlglot.expressions.Expression],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType]) -> List[str]:", "funcdef": "def"}, "sqlglot.dialects.drill": {"fullname": "sqlglot.dialects.drill", "modulename": "sqlglot.dialects.drill", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.drill.Drill": {"fullname": "sqlglot.dialects.drill.Drill", "modulename": "sqlglot.dialects.drill", "qualname": "Drill", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.drill.Drill.Tokenizer": {"fullname": "sqlglot.dialects.drill.Drill.Tokenizer", "modulename": "sqlglot.dialects.drill", "qualname": "Drill.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.drill.Drill.Parser": {"fullname": "sqlglot.dialects.drill.Drill.Parser", "modulename": "sqlglot.dialects.drill", "qualname": "Drill.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: The desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: Determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 100
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.drill.Drill.Generator": {"fullname": "sqlglot.dialects.drill.Drill.Generator", "modulename": "sqlglot.dialects.drill", "qualname": "Drill.Generator", "kind": "class", "doc": "

Generator converts a given syntax tree to the corresponding SQL string.

\n\n
Arguments:
\n\n
    \n
  • pretty: Whether or not to format the produced SQL string.\nDefault: False.
  • \n
  • identify: Determines when an identifier should be quoted. Possible values are:\nFalse (default): Never quote, except in cases where it's mandatory by the dialect.\nTrue or 'always': Always quote.\n'safe': Only quote identifiers that are case insensitive.
  • \n
  • normalize: Whether or not to normalize identifiers to lowercase.\nDefault: False.
  • \n
  • pad: Determines the pad size in a formatted string.\nDefault: 2.
  • \n
  • indent: Determines the indentation size in a formatted string.\nDefault: 2.
  • \n
  • normalize_functions: Whether or not to normalize all function names. Possible values are:\n\"upper\" or True (default): Convert names to uppercase.\n\"lower\": Convert names to lowercase.\nFalse: Disables function name normalization.
  • \n
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.\nDefault ErrorLevel.WARN.
  • \n
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions.\nThis is only relevant when generating in pretty mode.\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"fullname": "sqlglot.dialects.drill.Drill.Generator.normalize_func", "modulename": "sqlglot.dialects.drill", "qualname": "Drill.Generator.normalize_func", "kind": "function", "doc": "

\n", "signature": "(self, name: str) -> str:", "funcdef": "def"}, "sqlglot.dialects.duckdb": {"fullname": "sqlglot.dialects.duckdb", "modulename": "sqlglot.dialects.duckdb", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.duckdb.DuckDB": {"fullname": "sqlglot.dialects.duckdb.DuckDB", "modulename": "sqlglot.dialects.duckdb", "qualname": "DuckDB", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"fullname": "sqlglot.dialects.duckdb.DuckDB.Tokenizer", "modulename": "sqlglot.dialects.duckdb", "qualname": "DuckDB.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"fullname": "sqlglot.dialects.duckdb.DuckDB.Parser", "modulename": "sqlglot.dialects.duckdb", "qualname": "DuckDB.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: The desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: Determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 100
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"fullname": "sqlglot.dialects.duckdb.DuckDB.Generator", "modulename": "sqlglot.dialects.duckdb", "qualname": "DuckDB.Generator", "kind": "class", "doc": "

Generator converts a given syntax tree to the corresponding SQL string.

\n\n
Arguments:
\n\n
    \n
  • pretty: Whether or not to format the produced SQL string.\nDefault: False.
  • \n
  • identify: Determines when an identifier should be quoted. Possible values are:\nFalse (default): Never quote, except in cases where it's mandatory by the dialect.\nTrue or 'always': Always quote.\n'safe': Only quote identifiers that are case insensitive.
  • \n
  • normalize: Whether or not to normalize identifiers to lowercase.\nDefault: False.
  • \n
  • pad: Determines the pad size in a formatted string.\nDefault: 2.
  • \n
  • indent: Determines the indentation size in a formatted string.\nDefault: 2.
  • \n
  • normalize_functions: Whether or not to normalize all function names. Possible values are:\n\"upper\" or True (default): Convert names to uppercase.\n\"lower\": Convert names to lowercase.\nFalse: Disables function name normalization.
  • \n
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.\nDefault ErrorLevel.WARN.
  • \n
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions.\nThis is only relevant when generating in pretty mode.\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.duckdb.DuckDB.Generator.interval_sql": {"fullname": "sqlglot.dialects.duckdb.DuckDB.Generator.interval_sql", "modulename": "sqlglot.dialects.duckdb", "qualname": "DuckDB.Generator.interval_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Interval) -> str:", "funcdef": "def"}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"fullname": "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql", "modulename": "sqlglot.dialects.duckdb", "qualname": "DuckDB.Generator.tablesample_sql", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: sqlglot.expressions.TableSample,\tseed_prefix: str = 'SEED',\tsep: str = ' AS ') -> str:", "funcdef": "def"}, "sqlglot.dialects.hive": {"fullname": "sqlglot.dialects.hive", "modulename": "sqlglot.dialects.hive", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.hive.Hive": {"fullname": "sqlglot.dialects.hive.Hive", "modulename": "sqlglot.dialects.hive", "qualname": "Hive", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.hive.Hive.Tokenizer": {"fullname": "sqlglot.dialects.hive.Hive.Tokenizer", "modulename": "sqlglot.dialects.hive", "qualname": "Hive.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.hive.Hive.Parser": {"fullname": "sqlglot.dialects.hive.Hive.Parser", "modulename": "sqlglot.dialects.hive", "qualname": "Hive.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: The desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: Determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 100
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.hive.Hive.Generator": {"fullname": "sqlglot.dialects.hive.Hive.Generator", "modulename": "sqlglot.dialects.hive", "qualname": "Hive.Generator", "kind": "class", "doc": "

Generator converts a given syntax tree to the corresponding SQL string.

\n\n
Arguments:
\n\n
    \n
  • pretty: Whether or not to format the produced SQL string.\nDefault: False.
  • \n
  • identify: Determines when an identifier should be quoted. Possible values are:\nFalse (default): Never quote, except in cases where it's mandatory by the dialect.\nTrue or 'always': Always quote.\n'safe': Only quote identifiers that are case insensitive.
  • \n
  • normalize: Whether or not to normalize identifiers to lowercase.\nDefault: False.
  • \n
  • pad: Determines the pad size in a formatted string.\nDefault: 2.
  • \n
  • indent: Determines the indentation size in a formatted string.\nDefault: 2.
  • \n
  • normalize_functions: Whether or not to normalize all function names. Possible values are:\n\"upper\" or True (default): Convert names to uppercase.\n\"lower\": Convert names to lowercase.\nFalse: Disables function name normalization.
  • \n
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.\nDefault ErrorLevel.WARN.
  • \n
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions.\nThis is only relevant when generating in pretty mode.\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"fullname": "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql", "modulename": "sqlglot.dialects.hive", "qualname": "Hive.Generator.arrayagg_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ArrayAgg) -> str:", "funcdef": "def"}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"fullname": "sqlglot.dialects.hive.Hive.Generator.with_properties", "modulename": "sqlglot.dialects.hive", "qualname": "Hive.Generator.with_properties", "kind": "function", "doc": "

\n", "signature": "(self, properties: sqlglot.expressions.Properties) -> str:", "funcdef": "def"}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"fullname": "sqlglot.dialects.hive.Hive.Generator.datatype_sql", "modulename": "sqlglot.dialects.hive", "qualname": "Hive.Generator.datatype_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DataType) -> str:", "funcdef": "def"}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"fullname": "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers", "modulename": "sqlglot.dialects.hive", "qualname": "Hive.Generator.after_having_modifiers", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Expression) -> List[str]:", "funcdef": "def"}, "sqlglot.dialects.mysql": {"fullname": "sqlglot.dialects.mysql", "modulename": "sqlglot.dialects.mysql", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.mysql.MySQL": {"fullname": "sqlglot.dialects.mysql.MySQL", "modulename": "sqlglot.dialects.mysql", "qualname": "MySQL", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"fullname": "sqlglot.dialects.mysql.MySQL.Tokenizer", "modulename": "sqlglot.dialects.mysql", "qualname": "MySQL.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.mysql.MySQL.Parser": {"fullname": "sqlglot.dialects.mysql.MySQL.Parser", "modulename": "sqlglot.dialects.mysql", "qualname": "MySQL.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: The desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: Determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 100
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.mysql.MySQL.Generator": {"fullname": "sqlglot.dialects.mysql.MySQL.Generator", "modulename": "sqlglot.dialects.mysql", "qualname": "MySQL.Generator", "kind": "class", "doc": "

Generator converts a given syntax tree to the corresponding SQL string.

\n\n
Arguments:
\n\n
    \n
  • pretty: Whether or not to format the produced SQL string.\nDefault: False.
  • \n
  • identify: Determines when an identifier should be quoted. Possible values are:\nFalse (default): Never quote, except in cases where it's mandatory by the dialect.\nTrue or 'always': Always quote.\n'safe': Only quote identifiers that are case insensitive.
  • \n
  • normalize: Whether or not to normalize identifiers to lowercase.\nDefault: False.
  • \n
  • pad: Determines the pad size in a formatted string.\nDefault: 2.
  • \n
  • indent: Determines the indentation size in a formatted string.\nDefault: 2.
  • \n
  • normalize_functions: Whether or not to normalize all function names. Possible values are:\n\"upper\" or True (default): Convert names to uppercase.\n\"lower\": Convert names to lowercase.\nFalse: Disables function name normalization.
  • \n
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.\nDefault ErrorLevel.WARN.
  • \n
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions.\nThis is only relevant when generating in pretty mode.\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"fullname": "sqlglot.dialects.mysql.MySQL.Generator.show_sql", "modulename": "sqlglot.dialects.mysql", "qualname": "MySQL.Generator.show_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Show) -> str:", "funcdef": "def"}, "sqlglot.dialects.oracle": {"fullname": "sqlglot.dialects.oracle", "modulename": "sqlglot.dialects.oracle", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.oracle.Oracle": {"fullname": "sqlglot.dialects.oracle.Oracle", "modulename": "sqlglot.dialects.oracle", "qualname": "Oracle", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.oracle.Oracle.Parser": {"fullname": "sqlglot.dialects.oracle.Oracle.Parser", "modulename": "sqlglot.dialects.oracle", "qualname": "Oracle.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: The desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: Determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 100
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.oracle.Oracle.Generator": {"fullname": "sqlglot.dialects.oracle.Oracle.Generator", "modulename": "sqlglot.dialects.oracle", "qualname": "Oracle.Generator", "kind": "class", "doc": "

Generator converts a given syntax tree to the corresponding SQL string.

\n\n
Arguments:
\n\n
    \n
  • pretty: Whether or not to format the produced SQL string.\nDefault: False.
  • \n
  • identify: Determines when an identifier should be quoted. Possible values are:\nFalse (default): Never quote, except in cases where it's mandatory by the dialect.\nTrue or 'always': Always quote.\n'safe': Only quote identifiers that are case insensitive.
  • \n
  • normalize: Whether or not to normalize identifiers to lowercase.\nDefault: False.
  • \n
  • pad: Determines the pad size in a formatted string.\nDefault: 2.
  • \n
  • indent: Determines the indentation size in a formatted string.\nDefault: 2.
  • \n
  • normalize_functions: Whether or not to normalize all function names. Possible values are:\n\"upper\" or True (default): Convert names to uppercase.\n\"lower\": Convert names to lowercase.\nFalse: Disables function name normalization.
  • \n
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.\nDefault ErrorLevel.WARN.
  • \n
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions.\nThis is only relevant when generating in pretty mode.\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"fullname": "sqlglot.dialects.oracle.Oracle.Generator.offset_sql", "modulename": "sqlglot.dialects.oracle", "qualname": "Oracle.Generator.offset_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Offset) -> str:", "funcdef": "def"}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"fullname": "sqlglot.dialects.oracle.Oracle.Generator.column_sql", "modulename": "sqlglot.dialects.oracle", "qualname": "Oracle.Generator.column_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Column) -> str:", "funcdef": "def"}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"fullname": "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql", "modulename": "sqlglot.dialects.oracle", "qualname": "Oracle.Generator.xmltable_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.XMLTable) -> str:", "funcdef": "def"}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"fullname": "sqlglot.dialects.oracle.Oracle.Tokenizer", "modulename": "sqlglot.dialects.oracle", "qualname": "Oracle.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.postgres": {"fullname": "sqlglot.dialects.postgres", "modulename": "sqlglot.dialects.postgres", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.postgres.Postgres": {"fullname": "sqlglot.dialects.postgres.Postgres", "modulename": "sqlglot.dialects.postgres", "qualname": "Postgres", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"fullname": "sqlglot.dialects.postgres.Postgres.Tokenizer", "modulename": "sqlglot.dialects.postgres", "qualname": "Postgres.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.postgres.Postgres.Parser": {"fullname": "sqlglot.dialects.postgres.Postgres.Parser", "modulename": "sqlglot.dialects.postgres", "qualname": "Postgres.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: The desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: Determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 100
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.postgres.Postgres.Generator": {"fullname": "sqlglot.dialects.postgres.Postgres.Generator", "modulename": "sqlglot.dialects.postgres", "qualname": "Postgres.Generator", "kind": "class", "doc": "

Generator converts a given syntax tree to the corresponding SQL string.

\n\n
Arguments:
\n\n
    \n
  • pretty: Whether or not to format the produced SQL string.\nDefault: False.
  • \n
  • identify: Determines when an identifier should be quoted. Possible values are:\nFalse (default): Never quote, except in cases where it's mandatory by the dialect.\nTrue or 'always': Always quote.\n'safe': Only quote identifiers that are case insensitive.
  • \n
  • normalize: Whether or not to normalize identifiers to lowercase.\nDefault: False.
  • \n
  • pad: Determines the pad size in a formatted string.\nDefault: 2.
  • \n
  • indent: Determines the indentation size in a formatted string.\nDefault: 2.
  • \n
  • normalize_functions: Whether or not to normalize all function names. Possible values are:\n\"upper\" or True (default): Convert names to uppercase.\n\"lower\": Convert names to lowercase.\nFalse: Disables function name normalization.
  • \n
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.\nDefault ErrorLevel.WARN.
  • \n
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions.\nThis is only relevant when generating in pretty mode.\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.presto": {"fullname": "sqlglot.dialects.presto", "modulename": "sqlglot.dialects.presto", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.presto.Presto": {"fullname": "sqlglot.dialects.presto.Presto", "modulename": "sqlglot.dialects.presto", "qualname": "Presto", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.presto.Presto.Tokenizer": {"fullname": "sqlglot.dialects.presto.Presto.Tokenizer", "modulename": "sqlglot.dialects.presto", "qualname": "Presto.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.presto.Presto.Parser": {"fullname": "sqlglot.dialects.presto.Presto.Parser", "modulename": "sqlglot.dialects.presto", "qualname": "Presto.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: The desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: Determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 100
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.presto.Presto.Generator": {"fullname": "sqlglot.dialects.presto.Presto.Generator", "modulename": "sqlglot.dialects.presto", "qualname": "Presto.Generator", "kind": "class", "doc": "

Generator converts a given syntax tree to the corresponding SQL string.

\n\n
Arguments:
\n\n
    \n
  • pretty: Whether or not to format the produced SQL string.\nDefault: False.
  • \n
  • identify: Determines when an identifier should be quoted. Possible values are:\nFalse (default): Never quote, except in cases where it's mandatory by the dialect.\nTrue or 'always': Always quote.\n'safe': Only quote identifiers that are case insensitive.
  • \n
  • normalize: Whether or not to normalize identifiers to lowercase.\nDefault: False.
  • \n
  • pad: Determines the pad size in a formatted string.\nDefault: 2.
  • \n
  • indent: Determines the indentation size in a formatted string.\nDefault: 2.
  • \n
  • normalize_functions: Whether or not to normalize all function names. Possible values are:\n\"upper\" or True (default): Convert names to uppercase.\n\"lower\": Convert names to lowercase.\nFalse: Disables function name normalization.
  • \n
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.\nDefault ErrorLevel.WARN.
  • \n
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions.\nThis is only relevant when generating in pretty mode.\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"fullname": "sqlglot.dialects.presto.Presto.Generator.interval_sql", "modulename": "sqlglot.dialects.presto", "qualname": "Presto.Generator.interval_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Interval) -> str:", "funcdef": "def"}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"fullname": "sqlglot.dialects.presto.Presto.Generator.transaction_sql", "modulename": "sqlglot.dialects.presto", "qualname": "Presto.Generator.transaction_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Transaction) -> str:", "funcdef": "def"}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"fullname": "sqlglot.dialects.presto.Presto.Generator.generateseries_sql", "modulename": "sqlglot.dialects.presto", "qualname": "Presto.Generator.generateseries_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.GenerateSeries) -> str:", "funcdef": "def"}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"fullname": "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers", "modulename": "sqlglot.dialects.presto", "qualname": "Presto.Generator.offset_limit_modifiers", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: sqlglot.expressions.Expression,\tfetch: bool,\tlimit: Union[sqlglot.expressions.Fetch, sqlglot.expressions.Limit, NoneType]) -> List[str]:", "funcdef": "def"}, "sqlglot.dialects.redshift": {"fullname": "sqlglot.dialects.redshift", "modulename": "sqlglot.dialects.redshift", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.redshift.Redshift": {"fullname": "sqlglot.dialects.redshift.Redshift", "modulename": "sqlglot.dialects.redshift", "qualname": "Redshift", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.postgres.Postgres"}, "sqlglot.dialects.redshift.Redshift.Parser": {"fullname": "sqlglot.dialects.redshift.Redshift.Parser", "modulename": "sqlglot.dialects.redshift", "qualname": "Redshift.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: The desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: Determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 100
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
\n", "bases": "sqlglot.dialects.postgres.Postgres.Parser"}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"fullname": "sqlglot.dialects.redshift.Redshift.Tokenizer", "modulename": "sqlglot.dialects.redshift", "qualname": "Redshift.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.postgres.Postgres.Tokenizer"}, "sqlglot.dialects.redshift.Redshift.Generator": {"fullname": "sqlglot.dialects.redshift.Redshift.Generator", "modulename": "sqlglot.dialects.redshift", "qualname": "Redshift.Generator", "kind": "class", "doc": "

Generator converts a given syntax tree to the corresponding SQL string.

\n\n
Arguments:
\n\n
    \n
  • pretty: Whether or not to format the produced SQL string.\nDefault: False.
  • \n
  • identify: Determines when an identifier should be quoted. Possible values are:\nFalse (default): Never quote, except in cases where it's mandatory by the dialect.\nTrue or 'always': Always quote.\n'safe': Only quote identifiers that are case insensitive.
  • \n
  • normalize: Whether or not to normalize identifiers to lowercase.\nDefault: False.
  • \n
  • pad: Determines the pad size in a formatted string.\nDefault: 2.
  • \n
  • indent: Determines the indentation size in a formatted string.\nDefault: 2.
  • \n
  • normalize_functions: Whether or not to normalize all function names. Possible values are:\n\"upper\" or True (default): Convert names to uppercase.\n\"lower\": Convert names to lowercase.\nFalse: Disables function name normalization.
  • \n
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.\nDefault ErrorLevel.WARN.
  • \n
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions.\nThis is only relevant when generating in pretty mode.\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.dialects.postgres.Postgres.Generator"}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"fullname": "sqlglot.dialects.redshift.Redshift.Generator.values_sql", "modulename": "sqlglot.dialects.redshift", "qualname": "Redshift.Generator.values_sql", "kind": "function", "doc": "

Converts VALUES... expression into a series of unions.

\n\n

Note: If you have a lot of unions then this will result in a large number of recursive statements to\nevaluate the expression. You may need to increase sys.setrecursionlimit to run and it can also be\nvery slow.

\n", "signature": "(self, expression: sqlglot.expressions.Values) -> str:", "funcdef": "def"}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"fullname": "sqlglot.dialects.redshift.Redshift.Generator.with_properties", "modulename": "sqlglot.dialects.redshift", "qualname": "Redshift.Generator.with_properties", "kind": "function", "doc": "

Redshift doesn't have WITH as part of their with_properties so we remove it

\n", "signature": "(self, properties: sqlglot.expressions.Properties) -> str:", "funcdef": "def"}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"fullname": "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql", "modulename": "sqlglot.dialects.redshift", "qualname": "Redshift.Generator.datatype_sql", "kind": "function", "doc": "

Redshift converts the TEXT data type to VARCHAR(255) by default when people more generally mean\nVARCHAR of max length which is VARCHAR(max) in Redshift. Therefore if we get a TEXT data type\nwithout precision we convert it to VARCHAR(max) and if it does have precision then we just convert\nTEXT to VARCHAR.

\n", "signature": "(self, expression: sqlglot.expressions.DataType) -> str:", "funcdef": "def"}, "sqlglot.dialects.snowflake": {"fullname": "sqlglot.dialects.snowflake", "modulename": "sqlglot.dialects.snowflake", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.snowflake.Snowflake": {"fullname": "sqlglot.dialects.snowflake.Snowflake", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Parser", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: The desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: Determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 100
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Tokenizer", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Generator", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Generator", "kind": "class", "doc": "

Generator converts a given syntax tree to the corresponding SQL string.

\n\n
Arguments:
\n\n
    \n
  • pretty: Whether or not to format the produced SQL string.\nDefault: False.
  • \n
  • identify: Determines when an identifier should be quoted. Possible values are:\nFalse (default): Never quote, except in cases where it's mandatory by the dialect.\nTrue or 'always': Always quote.\n'safe': Only quote identifiers that are case insensitive.
  • \n
  • normalize: Whether or not to normalize identifiers to lowercase.\nDefault: False.
  • \n
  • pad: Determines the pad size in a formatted string.\nDefault: 2.
  • \n
  • indent: Determines the indentation size in a formatted string.\nDefault: 2.
  • \n
  • normalize_functions: Whether or not to normalize all function names. Possible values are:\n\"upper\" or True (default): Convert names to uppercase.\n\"lower\": Convert names to lowercase.\nFalse: Disables function name normalization.
  • \n
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.\nDefault ErrorLevel.WARN.
  • \n
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions.\nThis is only relevant when generating in pretty mode.\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Generator.except_op", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Generator.except_op", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Except) -> str:", "funcdef": "def"}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Generator.intersect_op", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Intersect) -> str:", "funcdef": "def"}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Generator.settag_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.SetTag) -> str:", "funcdef": "def"}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Generator.describe_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Describe) -> str:", "funcdef": "def"}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Generator.generatedasidentitycolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.dialects.spark": {"fullname": "sqlglot.dialects.spark", "modulename": "sqlglot.dialects.spark", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.spark.Spark": {"fullname": "sqlglot.dialects.spark.Spark", "modulename": "sqlglot.dialects.spark", "qualname": "Spark", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.spark2.Spark2"}, "sqlglot.dialects.spark.Spark.Parser": {"fullname": "sqlglot.dialects.spark.Spark.Parser", "modulename": "sqlglot.dialects.spark", "qualname": "Spark.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: The desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: Determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 100
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
\n", "bases": "sqlglot.dialects.spark2.Spark2.Parser"}, "sqlglot.dialects.spark.Spark.Generator": {"fullname": "sqlglot.dialects.spark.Spark.Generator", "modulename": "sqlglot.dialects.spark", "qualname": "Spark.Generator", "kind": "class", "doc": "

Generator converts a given syntax tree to the corresponding SQL string.

\n\n
Arguments:
\n\n
    \n
  • pretty: Whether or not to format the produced SQL string.\nDefault: False.
  • \n
  • identify: Determines when an identifier should be quoted. Possible values are:\nFalse (default): Never quote, except in cases where it's mandatory by the dialect.\nTrue or 'always': Always quote.\n'safe': Only quote identifiers that are case insensitive.
  • \n
  • normalize: Whether or not to normalize identifiers to lowercase.\nDefault: False.
  • \n
  • pad: Determines the pad size in a formatted string.\nDefault: 2.
  • \n
  • indent: Determines the indentation size in a formatted string.\nDefault: 2.
  • \n
  • normalize_functions: Whether or not to normalize all function names. Possible values are:\n\"upper\" or True (default): Convert names to uppercase.\n\"lower\": Convert names to lowercase.\nFalse: Disables function name normalization.
  • \n
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.\nDefault ErrorLevel.WARN.
  • \n
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions.\nThis is only relevant when generating in pretty mode.\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.dialects.spark2.Spark2.Generator"}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"fullname": "sqlglot.dialects.spark.Spark.Generator.datediff_sql", "modulename": "sqlglot.dialects.spark", "qualname": "Spark.Generator.datediff_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DateDiff) -> str:", "funcdef": "def"}, "sqlglot.dialects.spark2": {"fullname": "sqlglot.dialects.spark2", "modulename": "sqlglot.dialects.spark2", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.spark2.Spark2": {"fullname": "sqlglot.dialects.spark2.Spark2", "modulename": "sqlglot.dialects.spark2", "qualname": "Spark2", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.hive.Hive"}, "sqlglot.dialects.spark2.Spark2.Parser": {"fullname": "sqlglot.dialects.spark2.Spark2.Parser", "modulename": "sqlglot.dialects.spark2", "qualname": "Spark2.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: The desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: Determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 100
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
\n", "bases": "sqlglot.dialects.hive.Hive.Parser"}, "sqlglot.dialects.spark2.Spark2.Generator": {"fullname": "sqlglot.dialects.spark2.Spark2.Generator", "modulename": "sqlglot.dialects.spark2", "qualname": "Spark2.Generator", "kind": "class", "doc": "

Generator converts a given syntax tree to the corresponding SQL string.

\n\n
Arguments:
\n\n
    \n
  • pretty: Whether or not to format the produced SQL string.\nDefault: False.
  • \n
  • identify: Determines when an identifier should be quoted. Possible values are:\nFalse (default): Never quote, except in cases where it's mandatory by the dialect.\nTrue or 'always': Always quote.\n'safe': Only quote identifiers that are case insensitive.
  • \n
  • normalize: Whether or not to normalize identifiers to lowercase.\nDefault: False.
  • \n
  • pad: Determines the pad size in a formatted string.\nDefault: 2.
  • \n
  • indent: Determines the indentation size in a formatted string.\nDefault: 2.
  • \n
  • normalize_functions: Whether or not to normalize all function names. Possible values are:\n\"upper\" or True (default): Convert names to uppercase.\n\"lower\": Convert names to lowercase.\nFalse: Disables function name normalization.
  • \n
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.\nDefault ErrorLevel.WARN.
  • \n
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions.\nThis is only relevant when generating in pretty mode.\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.dialects.hive.Hive.Generator"}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"fullname": "sqlglot.dialects.spark2.Spark2.Generator.cast_sql", "modulename": "sqlglot.dialects.spark2", "qualname": "Spark2.Generator.cast_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Cast) -> str:", "funcdef": "def"}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"fullname": "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql", "modulename": "sqlglot.dialects.spark2", "qualname": "Spark2.Generator.columndef_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ColumnDef, sep: str = ' ') -> str:", "funcdef": "def"}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"fullname": "sqlglot.dialects.spark2.Spark2.Tokenizer", "modulename": "sqlglot.dialects.spark2", "qualname": "Spark2.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.hive.Hive.Tokenizer"}, "sqlglot.dialects.sqlite": {"fullname": "sqlglot.dialects.sqlite", "modulename": "sqlglot.dialects.sqlite", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.sqlite.SQLite": {"fullname": "sqlglot.dialects.sqlite.SQLite", "modulename": "sqlglot.dialects.sqlite", "qualname": "SQLite", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"fullname": "sqlglot.dialects.sqlite.SQLite.Tokenizer", "modulename": "sqlglot.dialects.sqlite", "qualname": "SQLite.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.sqlite.SQLite.Parser": {"fullname": "sqlglot.dialects.sqlite.SQLite.Parser", "modulename": "sqlglot.dialects.sqlite", "qualname": "SQLite.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: The desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: Determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 100
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.sqlite.SQLite.Generator": {"fullname": "sqlglot.dialects.sqlite.SQLite.Generator", "modulename": "sqlglot.dialects.sqlite", "qualname": "SQLite.Generator", "kind": "class", "doc": "

Generator converts a given syntax tree to the corresponding SQL string.

\n\n
Arguments:
\n\n
    \n
  • pretty: Whether or not to format the produced SQL string.\nDefault: False.
  • \n
  • identify: Determines when an identifier should be quoted. Possible values are:\nFalse (default): Never quote, except in cases where it's mandatory by the dialect.\nTrue or 'always': Always quote.\n'safe': Only quote identifiers that are case insensitive.
  • \n
  • normalize: Whether or not to normalize identifiers to lowercase.\nDefault: False.
  • \n
  • pad: Determines the pad size in a formatted string.\nDefault: 2.
  • \n
  • indent: Determines the indentation size in a formatted string.\nDefault: 2.
  • \n
  • normalize_functions: Whether or not to normalize all function names. Possible values are:\n\"upper\" or True (default): Convert names to uppercase.\n\"lower\": Convert names to lowercase.\nFalse: Disables function name normalization.
  • \n
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.\nDefault ErrorLevel.WARN.
  • \n
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions.\nThis is only relevant when generating in pretty mode.\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"fullname": "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql", "modulename": "sqlglot.dialects.sqlite", "qualname": "SQLite.Generator.cast_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Cast) -> str:", "funcdef": "def"}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"fullname": "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql", "modulename": "sqlglot.dialects.sqlite", "qualname": "SQLite.Generator.datediff_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DateDiff) -> str:", "funcdef": "def"}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"fullname": "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql", "modulename": "sqlglot.dialects.sqlite", "qualname": "SQLite.Generator.groupconcat_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.GroupConcat) -> str:", "funcdef": "def"}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"fullname": "sqlglot.dialects.sqlite.SQLite.Generator.least_sql", "modulename": "sqlglot.dialects.sqlite", "qualname": "SQLite.Generator.least_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Least) -> str:", "funcdef": "def"}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"fullname": "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql", "modulename": "sqlglot.dialects.sqlite", "qualname": "SQLite.Generator.transaction_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Transaction) -> str:", "funcdef": "def"}, "sqlglot.dialects.starrocks": {"fullname": "sqlglot.dialects.starrocks", "modulename": "sqlglot.dialects.starrocks", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.starrocks.StarRocks": {"fullname": "sqlglot.dialects.starrocks.StarRocks", "modulename": "sqlglot.dialects.starrocks", "qualname": "StarRocks", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.mysql.MySQL"}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"fullname": "sqlglot.dialects.starrocks.StarRocks.Parser", "modulename": "sqlglot.dialects.starrocks", "qualname": "StarRocks.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: The desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: Determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 100
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
\n", "bases": "sqlglot.dialects.mysql.MySQL.Parser"}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"fullname": "sqlglot.dialects.starrocks.StarRocks.Generator", "modulename": "sqlglot.dialects.starrocks", "qualname": "StarRocks.Generator", "kind": "class", "doc": "

Generator converts a given syntax tree to the corresponding SQL string.

\n\n
Arguments:
\n\n
    \n
  • pretty: Whether or not to format the produced SQL string.\nDefault: False.
  • \n
  • identify: Determines when an identifier should be quoted. Possible values are:\nFalse (default): Never quote, except in cases where it's mandatory by the dialect.\nTrue or 'always': Always quote.\n'safe': Only quote identifiers that are case insensitive.
  • \n
  • normalize: Whether or not to normalize identifiers to lowercase.\nDefault: False.
  • \n
  • pad: Determines the pad size in a formatted string.\nDefault: 2.
  • \n
  • indent: Determines the indentation size in a formatted string.\nDefault: 2.
  • \n
  • normalize_functions: Whether or not to normalize all function names. Possible values are:\n\"upper\" or True (default): Convert names to uppercase.\n\"lower\": Convert names to lowercase.\nFalse: Disables function name normalization.
  • \n
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.\nDefault ErrorLevel.WARN.
  • \n
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions.\nThis is only relevant when generating in pretty mode.\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.dialects.mysql.MySQL.Generator"}, "sqlglot.dialects.tableau": {"fullname": "sqlglot.dialects.tableau", "modulename": "sqlglot.dialects.tableau", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.tableau.Tableau": {"fullname": "sqlglot.dialects.tableau.Tableau", "modulename": "sqlglot.dialects.tableau", "qualname": "Tableau", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.tableau.Tableau.Generator": {"fullname": "sqlglot.dialects.tableau.Tableau.Generator", "modulename": "sqlglot.dialects.tableau", "qualname": "Tableau.Generator", "kind": "class", "doc": "

Generator converts a given syntax tree to the corresponding SQL string.

\n\n
Arguments:
\n\n
    \n
  • pretty: Whether or not to format the produced SQL string.\nDefault: False.
  • \n
  • identify: Determines when an identifier should be quoted. Possible values are:\nFalse (default): Never quote, except in cases where it's mandatory by the dialect.\nTrue or 'always': Always quote.\n'safe': Only quote identifiers that are case insensitive.
  • \n
  • normalize: Whether or not to normalize identifiers to lowercase.\nDefault: False.
  • \n
  • pad: Determines the pad size in a formatted string.\nDefault: 2.
  • \n
  • indent: Determines the indentation size in a formatted string.\nDefault: 2.
  • \n
  • normalize_functions: Whether or not to normalize all function names. Possible values are:\n\"upper\" or True (default): Convert names to uppercase.\n\"lower\": Convert names to lowercase.\nFalse: Disables function name normalization.
  • \n
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.\nDefault ErrorLevel.WARN.
  • \n
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions.\nThis is only relevant when generating in pretty mode.\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"fullname": "sqlglot.dialects.tableau.Tableau.Generator.if_sql", "modulename": "sqlglot.dialects.tableau", "qualname": "Tableau.Generator.if_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.If) -> str:", "funcdef": "def"}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"fullname": "sqlglot.dialects.tableau.Tableau.Generator.count_sql", "modulename": "sqlglot.dialects.tableau", "qualname": "Tableau.Generator.count_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Count) -> str:", "funcdef": "def"}, "sqlglot.dialects.tableau.Tableau.Parser": {"fullname": "sqlglot.dialects.tableau.Tableau.Parser", "modulename": "sqlglot.dialects.tableau", "qualname": "Tableau.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: The desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: Determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 100
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.teradata": {"fullname": "sqlglot.dialects.teradata", "modulename": "sqlglot.dialects.teradata", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.teradata.Teradata": {"fullname": "sqlglot.dialects.teradata.Teradata", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.teradata.Teradata.Tokenizer": {"fullname": "sqlglot.dialects.teradata.Teradata.Tokenizer", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.teradata.Teradata.Parser": {"fullname": "sqlglot.dialects.teradata.Teradata.Parser", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: The desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: Determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 100
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.teradata.Teradata.Generator": {"fullname": "sqlglot.dialects.teradata.Teradata.Generator", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata.Generator", "kind": "class", "doc": "

Generator converts a given syntax tree to the corresponding SQL string.

\n\n
Arguments:
\n\n
    \n
  • pretty: Whether or not to format the produced SQL string.\nDefault: False.
  • \n
  • identify: Determines when an identifier should be quoted. Possible values are:\nFalse (default): Never quote, except in cases where it's mandatory by the dialect.\nTrue or 'always': Always quote.\n'safe': Only quote identifiers that are case insensitive.
  • \n
  • normalize: Whether or not to normalize identifiers to lowercase.\nDefault: False.
  • \n
  • pad: Determines the pad size in a formatted string.\nDefault: 2.
  • \n
  • indent: Determines the indentation size in a formatted string.\nDefault: 2.
  • \n
  • normalize_functions: Whether or not to normalize all function names. Possible values are:\n\"upper\" or True (default): Convert names to uppercase.\n\"lower\": Convert names to lowercase.\nFalse: Disables function name normalization.
  • \n
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.\nDefault ErrorLevel.WARN.
  • \n
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions.\nThis is only relevant when generating in pretty mode.\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"fullname": "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata.Generator.partitionedbyproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.PartitionedByProperty) -> str:", "funcdef": "def"}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"fullname": "sqlglot.dialects.teradata.Teradata.Generator.update_sql", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata.Generator.update_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Update) -> str:", "funcdef": "def"}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"fullname": "sqlglot.dialects.teradata.Teradata.Generator.mod_sql", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata.Generator.mod_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Mod) -> str:", "funcdef": "def"}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"fullname": "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata.Generator.datatype_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DataType) -> str:", "funcdef": "def"}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"fullname": "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata.Generator.rangen_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.RangeN) -> str:", "funcdef": "def"}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"fullname": "sqlglot.dialects.teradata.Teradata.Generator.createable_sql", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata.Generator.createable_sql", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: sqlglot.expressions.Create,\tlocations: dict[sqlglot.expressions.Properties.Location, list[sqlglot.expressions.Property]]) -> str:", "funcdef": "def"}, "sqlglot.dialects.trino": {"fullname": "sqlglot.dialects.trino", "modulename": "sqlglot.dialects.trino", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.trino.Trino": {"fullname": "sqlglot.dialects.trino.Trino", "modulename": "sqlglot.dialects.trino", "qualname": "Trino", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.presto.Presto"}, "sqlglot.dialects.trino.Trino.Generator": {"fullname": "sqlglot.dialects.trino.Trino.Generator", "modulename": "sqlglot.dialects.trino", "qualname": "Trino.Generator", "kind": "class", "doc": "

Generator converts a given syntax tree to the corresponding SQL string.

\n\n
Arguments:
\n\n
    \n
  • pretty: Whether or not to format the produced SQL string.\nDefault: False.
  • \n
  • identify: Determines when an identifier should be quoted. Possible values are:\nFalse (default): Never quote, except in cases where it's mandatory by the dialect.\nTrue or 'always': Always quote.\n'safe': Only quote identifiers that are case insensitive.
  • \n
  • normalize: Whether or not to normalize identifiers to lowercase.\nDefault: False.
  • \n
  • pad: Determines the pad size in a formatted string.\nDefault: 2.
  • \n
  • indent: Determines the indentation size in a formatted string.\nDefault: 2.
  • \n
  • normalize_functions: Whether or not to normalize all function names. Possible values are:\n\"upper\" or True (default): Convert names to uppercase.\n\"lower\": Convert names to lowercase.\nFalse: Disables function name normalization.
  • \n
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.\nDefault ErrorLevel.WARN.
  • \n
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions.\nThis is only relevant when generating in pretty mode.\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.dialects.presto.Presto.Generator"}, "sqlglot.dialects.trino.Trino.Tokenizer": {"fullname": "sqlglot.dialects.trino.Trino.Tokenizer", "modulename": "sqlglot.dialects.trino", "qualname": "Trino.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.presto.Presto.Tokenizer"}, "sqlglot.dialects.tsql": {"fullname": "sqlglot.dialects.tsql", "modulename": "sqlglot.dialects.tsql", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"fullname": "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql", "modulename": "sqlglot.dialects.tsql", "qualname": "generate_date_delta_with_unit_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.DateAdd | sqlglot.expressions.DateDiff) -> str:", "funcdef": "def"}, "sqlglot.dialects.tsql.TSQL": {"fullname": "sqlglot.dialects.tsql.TSQL", "modulename": "sqlglot.dialects.tsql", "qualname": "TSQL", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"fullname": "sqlglot.dialects.tsql.TSQL.Tokenizer", "modulename": "sqlglot.dialects.tsql", "qualname": "TSQL.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.tsql.TSQL.Parser": {"fullname": "sqlglot.dialects.tsql.TSQL.Parser", "modulename": "sqlglot.dialects.tsql", "qualname": "TSQL.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: The desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: Determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 100
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.tsql.TSQL.Generator": {"fullname": "sqlglot.dialects.tsql.TSQL.Generator", "modulename": "sqlglot.dialects.tsql", "qualname": "TSQL.Generator", "kind": "class", "doc": "

Generator converts a given syntax tree to the corresponding SQL string.

\n\n
Arguments:
\n\n
    \n
  • pretty: Whether or not to format the produced SQL string.\nDefault: False.
  • \n
  • identify: Determines when an identifier should be quoted. Possible values are:\nFalse (default): Never quote, except in cases where it's mandatory by the dialect.\nTrue or 'always': Always quote.\n'safe': Only quote identifiers that are case insensitive.
  • \n
  • normalize: Whether or not to normalize identifiers to lowercase.\nDefault: False.
  • \n
  • pad: Determines the pad size in a formatted string.\nDefault: 2.
  • \n
  • indent: Determines the indentation size in a formatted string.\nDefault: 2.
  • \n
  • normalize_functions: Whether or not to normalize all function names. Possible values are:\n\"upper\" or True (default): Convert names to uppercase.\n\"lower\": Convert names to lowercase.\nFalse: Disables function name normalization.
  • \n
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.\nDefault ErrorLevel.WARN.
  • \n
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions.\nThis is only relevant when generating in pretty mode.\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"fullname": "sqlglot.dialects.tsql.TSQL.Generator.offset_sql", "modulename": "sqlglot.dialects.tsql", "qualname": "TSQL.Generator.offset_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Offset) -> str:", "funcdef": "def"}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"fullname": "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql", "modulename": "sqlglot.dialects.tsql", "qualname": "TSQL.Generator.systemtime_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.SystemTime) -> str:", "funcdef": "def"}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"fullname": "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql", "modulename": "sqlglot.dialects.tsql", "qualname": "TSQL.Generator.returnsproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ReturnsProperty) -> str:", "funcdef": "def"}, "sqlglot.diff": {"fullname": "sqlglot.diff", "modulename": "sqlglot.diff", "kind": "module", "doc": "

Semantic Diff for SQL

\n\n

by Iaroslav Zeigerman

\n\n

Motivation

\n\n

Software is constantly changing and evolving, and identifying what has changed and reviewing those changes is an integral part of the development process. SQL code is no exception to this.

\n\n

Text-based diff tools such as git diff, when applied to a code base, have certain limitations. First, they can only detect insertions and deletions, not movements or updates of individual pieces of code. Second, such tools can only detect changes between lines of text, which is too coarse for something as granular and detailed as source code. Additionally, the outcome of such a diff is dependent on the underlying code formatting, and yields different results if the formatting should change.

\n\n

Consider the following diff generated by Git:

\n\n

\"Git

\n\n

Semantically the query hasn\u2019t changed. The two arguments b and c have been swapped (moved), posing no impact on the output of the query. Yet Git replaced the whole affected expression alongside a bulk of unrelated elements.

\n\n

The alternative to text-based diffing is to compare Abstract Syntax Trees (AST) instead. The main advantage of ASTs are that they are a direct product of code parsing, which represents the underlying code structure at any desired level of granularity. Comparing ASTs may yield extremely precise diffs; changes such as code movements and updates can also be detected. Even more importantly, this approach facilitates additional use cases beyond eyeballing two versions of source code side by side.

\n\n

The use cases I had in mind for SQL when I decided to embark on this journey of semantic diffing were the following:

\n\n
    \n
  • Query similarity score. Identifying which parts the two queries have in common to automatically suggest opportunities for consolidation, creation of intermediate/staging tables, and so on.
  • \n
  • Differentiating between cosmetic / structural changes and functional ones. For example when a nested query is refactored into a common table expression (CTE), this kind of change doesn\u2019t have any functional impact on either a query or its outcome.
  • \n
  • Automatic suggestions about the need to retroactively backfill data. This is especially important for pipelines that populate very large tables for which restatement is a runtime-intensive procedure. The ability to discern between simple code movements and actual modifications can help assess the impact of a change and make suggestions accordingly.
  • \n
\n\n

The implementation discussed in this post is now a part of the SQLGlot library. You can find a complete source code in the diff.py module. The choice of SQLglot was an obvious one due to its simple but powerful API, lack of external dependencies and, more importantly, extensive list of supported SQL dialects.

\n\n

The Search for a Solution

\n\n

When it comes to any diffing tool (not just a semantic one), the primary challenge is to match as many elements of compared entities as possible. Once such a set of matching elements is available, deriving a sequence of changes becomes an easy task.

\n\n

If our elements have unique identifiers associated with them (for example, an element\u2019s ID in DOM), the matching problem is trivial. However, the SQL syntax trees that we are comparing have neither unique keys nor object identifiers that can be used for the purposes of matching. So, how do we suppose to find pairs of nodes that are related?

\n\n

To better illustrate the problem, consider comparing the following SQL expressions: SELECT a + b + c, d, e and SELECT a - b + c, e, f. Matching individual nodes from respective syntax trees can be visualized as follows:

\n\n

\"Figure\nFigure 1: Example of node matching for two SQL expression trees.

\n\n

By looking at the figure of node matching for two SQL expression trees above, we conclude that the following changes should be captured by our solution:

\n\n
    \n
  • Inserted nodes: Sub and f. These are the nodes from the target AST which do not have a matching node in the source AST.
  • \n
  • Removed nodes: Add and d. These are the nodes from the source AST which do not have a counterpart in the target AST.
  • \n
  • Remaining nodes must be identified as unchanged.
  • \n
\n\n

It should be clear at this point that if we manage to match nodes in the source tree with their counterparts in the target tree, then computing the diff becomes a trivial matter.

\n\n

Na\u00efve Brute-Force

\n\n

The na\u00efve solution would be to try all different permutations of node pair combinations, and see which set of pairs performs the best based on some type of heuristics. The runtime cost of such a solution quickly reaches the escape velocity; if both trees had only 10 nodes each, the number of such sets would approximately be 10! ^ 2 = 3.6M ^ 2 ~= 13 * 10^12. This is a very bad case of factorial complexity (to be precise, it\u2019s actually much worse - O(n! ^ 2) - but I couldn\u2019t come up with a name for it), so there is little need to explore this approach any further.

\n\n

Myers Algorithm

\n\n

After the na\u00efve approach was proven to be infeasible, the next question I asked myself was \u201chow does git diff work?\u201d. This question led me to discover the Myers diff algorithm [1]. This algorithm has been designed to compare sequences of strings. At its core, it\u2019s looking for the shortest path on a graph of possible edits that transform the first sequence into the second one, while heavily rewarding those paths that lead to longest subsequences of unchanged elements. There\u2019s a lot of material out there describing this algorithm in greater detail. I found James Coglan\u2019s series of blog posts to be the most comprehensive.

\n\n

Therefore, I had this \u201cbrilliant\u201d (actually not) idea to transform trees into sequences by traversing them in topological order, and then applying the Myers algorithm on resulting sequences while using a custom heuristics when checking the equality of two nodes. Unsurprisingly, comparing sequences of strings is quite different from comparing hierarchical tree structures, and by flattening trees into sequences, we lose a lot of relevant context. This resulted in a terrible performance of this algorithm on ASTs. It often matched completely unrelated nodes, even when the two trees were mostly the same, and produced extremely inaccurate lists of changes overall. After playing around with it a little and tweaking my equality heuristics to improve accuracy, I ultimately scrapped the whole implementation and went back to the drawing board.

\n\n

Change Distiller

\n\n

The algorithm I settled on at the end was Change Distiller, created by Fluri et al. [2], which in turn is an improvement over the core idea described by Chawathe et al. [3].

\n\n

The algorithm consists of two high-level steps:

\n\n
    \n
  1. Finding appropriate matchings between pairs of nodes that are part of compared ASTs. Identifying what is meant by \u201cappropriate\u201d matching is also a part of this step.
  2. \n
  3. Generating the so-called \u201cedit script\u201d from the matching set built in the 1st step. The edit script is a sequence of edit operations (for example, insert, remove, update, etc.) on individual tree nodes, such that when applied as transformations on the source AST, it eventually becomes the target AST. In general, the shorter the sequence, the better. The length of the edit script can be used to compare the performance of different algorithms, though this is not the only metric that matters.
  4. \n
\n\n

The rest of this section is dedicated to the Python implementation of the steps above using the AST implementation provided by the SQLGlot library.

\n\n

Building the Matching Set

\n\n

Matching Leaves

\n\n

We begin composing the matching set by matching the leaf nodes. Leaf nodes are the nodes that do not have any children nodes (such as literals, identifiers, etc.). In order to match them, we gather all the leaf nodes from the source tree and generate a cartesian product with all the leaves from the target tree, while comparing pairs created this way and assigning them a similarity score. During this stage, we also exclude pairs that don\u2019t pass basic matching criteria. Then, we pick pairs that scored the highest while making sure that each node is matched no more than once.

\n\n

Using the example provided at the beginning of the post, the process of building an initial set of candidate matchings can be seen on Figure 2.

\n\n

\"Figure\nFigure 2: Building a set of candidate matchings between leaf nodes. The third item in each triplet represents a similarity score between two nodes.

\n\n

First, let\u2019s analyze the similarity score. Then, we\u2019ll discuss matching criteria.

\n\n

The similarity score proposed by Fluri et al. [2] is a dice coefficient applied to bigrams of respective node values. A bigram is a sequence of two adjacent elements from a string computed in a sliding window fashion:

\n\n
\n
def bigram(string):\n    count = max(0, len(string) - 1)\n    return [string[i : i + 2] for i in range(count)]\n
\n
\n\n

For reasons that will become clear shortly, we actually need to compute bigram histograms rather than just sequences:

\n\n
\n
from collections import defaultdict\n\ndef bigram_histo(string):\n    count = max(0, len(string) - 1)\n    bigram_histo = defaultdict(int)\n    for i in range(count):\n        bigram_histo[string[i : i + 2]] += 1\n    return bigram_histo\n
\n
\n\n

The dice coefficient formula looks like following:

\n\n

\"Dice

\n\n

Where X is a bigram of the source node and Y is a bigram of the second one. What this essentially does is count the number of bigram elements the two nodes have in common, multiply it by 2, and then divide by the total number of elements in both bigrams. This is where bigram histograms come in handy:

\n\n
\n
def dice_coefficient(source, target):\n    source_histo = bigram_histo(source.sql())\n    target_histo = bigram_histo(target.sql())\n\n    total_grams = (\n        sum(source_histo.values()) + sum(target_histo.values())\n    )\n    if not total_grams:\n        return 1.0 if source == target else 0.0\n\n    overlap_len = 0\n    overlapping_grams = set(source_histo) & set(target_histo)\n    for g in overlapping_grams:\n        overlap_len += min(source_histo[g], target_histo[g])\n\n    return 2 * overlap_len / total_grams\n
\n
\n\n

To compute a bigram given a tree node, we first transform the node into its canonical SQL representation,so that the Literal(123) node becomes just \u201c123\u201d and the Identifier(\u201ca\u201d) node becomes just \u201ca\u201d. We also handle a scenario when strings are too short to derive bigrams. In this case, we fallback to checking the two nodes for equality.

\n\n

Now when we know how to compute the similarity score, we can take care of the matching criteria for leaf nodes. In the original paper [2], the matching criteria is formalized as follows:

\n\n

\"Matching

\n\n

The two nodes are matched if two conditions are met:

\n\n
    \n
  1. The node labels match (in our case labels are just node types).
  2. \n
  3. The similarity score for node values is greater than or equal to some threshold \u201cf\u201d. The authors of the paper recommend setting the value of \u201cf\u201d to 0.6.
  4. \n
\n\n

With building blocks in place, we can now build a matching set for leaf nodes. First, we generate a list of candidates for matching:

\n\n
\n
from heapq import heappush, heappop\n\ncandidate_matchings = []\nsource_leaves = _get_leaves(self._source)\ntarget_leaves = _get_leaves(self._target)\nfor source_leaf in source_leaves:\n    for target_leaf in target_leaves:\n        if _is_same_type(source_leaf, target_leaf):\n            similarity_score = dice_coefficient(\n                source_leaf, target_leaf\n            )\n            if similarity_score >= 0.6:\n                heappush(\n                    candidate_matchings,\n                    (\n                        -similarity_score,\n                        len(candidate_matchings),\n                        source_leaf,\n                        target_leaf,\n                    ),\n                )\n
\n
\n\n

In the implementation above, we push each matching pair onto the heap to automatically maintain the correct order based on the assigned similarity score.

\n\n

Finally, we build the initial matching set by picking leaf pairs with the highest score:

\n\n
\n
matching_set = set()\nwhile candidate_matchings:\n    _, _, source_leaf, target_leaf = heappop(candidate_matchings)\n    if (\n        source_leaf in unmatched_source_nodes\n        and target_leaf in unmatched_target_nodes\n    ):\n        matching_set.add((source_leaf, target_leaf))\n        unmatched_source_nodes.remove(source_leaf)\n        unmatched_target_nodes.remove(target_leaf)\n
\n
\n\n

To finalize the matching set, we should now proceed with matching inner nodes.

\n\n

Matching Inner Nodes

\n\n

Matching inner nodes is quite similar to matching leaf nodes, with the following two distinctions:

\n\n
    \n
  • Rather than ranking a set of possible candidates, we pick the first node pair that passes the matching criteria.
  • \n
  • The matching criteria itself has been extended to account for the number of leaf nodes the pair of inner nodes have in common.
  • \n
\n\n

\"Figure\nFigure 3: Matching inner nodes based on their type as well as how many of their leaf nodes have been previously matched.

\n\n

Let\u2019s start with the matching criteria. The criteria is formalized as follows:

\n\n

\"Matching

\n\n

Alongside already familiar similarity score and node type criteria, there is a new one in the middle: the ratio of leaf nodes that the two nodes have in common must exceed some threshold \u201ct\u201d. The recommended value for \u201ct\u201d is also 0.6. Counting the number of common leaf nodes is pretty straightforward, since we already have the complete matching set for leaves. All we need to do is count how many matching pairs do leaf nodes from the two compared inner nodes form.

\n\n

There are two additional heuristics associated with this matching criteria:

\n\n
    \n
  • Inner node similarity weighting: if the similarity score between the node values doesn\u2019t pass the threshold \u201cf\u201d but the ratio of common leaf nodes (\u201ct\u201d) is greater than or equal to 0.8, then the matching is considered successful.
  • \n
  • The threshold \u201ct\u201d is reduced to 0.4 for inner nodes with the number of leaf nodes equal to 4 or less, in order to decrease the false negative rate for small subtrees.
  • \n
\n\n

We now only have to iterate through the remaining unmatched nodes and form matching pairs based on the outlined criteria:

\n\n
\n
leaves_matching_set = matching_set.copy()\n\nfor source_node in unmatched_source_nodes.copy():\n    for target_node in unmatched_target_nodes:\n        if _is_same_type(source_node, target_node):\n            source_leaves = set(_get_leaves(source_node))\n            target_leaves = set(_get_leaves(target_node))\n\n            max_leaves_num = max(len(source_leaves), len(target_leaves))\n            if max_leaves_num:\n                common_leaves_num = sum(\n                    1 if s in source_leaves and t in target_leaves else 0\n                    for s, t in leaves_matching_set\n                )\n                leaf_similarity_score = common_leaves_num / max_leaves_num\n            else:\n                leaf_similarity_score = 0.0\n\n            adjusted_t = (\n                0.6\n                if min(len(source_leaves), len(target_leaves)) > 4\n                else 0.4\n            )\n\n            if leaf_similarity_score >= 0.8 or (\n                leaf_similarity_score >= adjusted_t\n                and dice_coefficient(source_node, target_node) >= 0.6\n            ):\n                matching_set.add((source_node, target_node))\n                unmatched_source_nodes.remove(source_node)\n                unmatched_target_nodes.remove(target_node)\n                break\n
\n
\n\n

After the matching set is formed, we can proceed with generation of the edit script, which will be the algorithm\u2019s output.

\n\n

Generating the Edit Script

\n\n

At this point, we should have the following 3 sets at our disposal:

\n\n
    \n
  • The set of matched node pairs.
  • \n
  • The set of remaining unmatched nodes from the source tree.
  • \n
  • The set of remaining unmatched nodes from the target tree.
  • \n
\n\n

We can derive 3 kinds of edits from the matching set: either the node\u2019s value was updated (Update), the node was moved to a different position within the tree (Move), or the node remained unchanged (Keep). Note that the Move case is not mutually exclusive with the other two. The node could have been updated or could have remained the same while at the same time its position within its parent node or the parent node itself could have changed. All unmatched nodes from the source tree are the ones that were removed (Remove), while unmatched nodes from the target tree are the ones that were inserted (Insert).

\n\n

The latter two cases are pretty straightforward to implement:

\n\n
\n
edit_script = []\n\nfor removed_node in unmatched_source_nodes:\n    edit_script.append(Remove(removed_node))\nfor inserted_node in unmatched_target_nodes:\n    edit_script.append(Insert(inserted_node))\n
\n
\n\n

Traversing the matching set requires a little more thought:

\n\n
\n
for source_node, target_node in matching_set:\n    if (\n        not isinstance(source_node, LEAF_EXPRESSION_TYPES)\n        or source_node == target_node\n    ):\n        move_edits = generate_move_edits(\n            source_node, target_node, matching_set\n        )\n        edit_script.extend(move_edits)\n        edit_script.append(Keep(source_node, target_node))\n    else:\n        edit_script.append(Update(source_node, target_node))\n
\n
\n\n

If a matching pair represents a pair of leaf nodes, we check if they are the same to decide whether an update took place. For inner node pairs, we also need to compare the positions of their respective children to detect node movements. Chawathe et al. [3] suggest applying the longest common subsequence (LCS) algorithm which, no surprise here, was described by Myers himself [1]. There is a small catch, however: instead of checking the equality of two children nodes, we need to check whether the two nodes form a pair that is a part of our matching set.

\n\n

Now with this knowledge, the implementation becomes straightforward:

\n\n
\n
def generate_move_edits(source, target, matching_set):\n    source_children = _get_child_nodes(source)\n    target_children = _get_child_nodes(target)\n\n    lcs = set(\n        _longest_common_subsequence(\n            source_children,\n            target_children,\n            lambda l, r: (l, r) in matching_set\n        )\n    )\n\n    move_edits = []\n    for node in source_children:\n        if node not in lcs and node not in unmatched_source_nodes:\n            move_edits.append(Move(node))\n\n    return move_edits\n
\n
\n\n

I left out the implementation of the LCS algorithm itself here, but there are plenty of implementation choices out there that can be easily looked up.

\n\n

Output

\n\n

The implemented algorithm produces the output that resembles the following:

\n\n
\n
>>> from sqlglot import parse_one, diff\n>>> diff(parse_one("SELECT a + b + c, d, e"), parse_one("SELECT a - b + c, e, f"))\n\nRemove(Add)\nRemove(Column(d))\nRemove(Identifier(d))\nInsert(Sub)\nInsert(Column(f))\nInsert(Identifier(f))\nKeep(Select, Select)\nKeep(Add, Add)\nKeep(Column(a), Column(a))\nKeep(Identifier(a), Identifier(a))\nKeep(Column(b), Column(b))\nKeep(Identifier(b), Identifier(b))\nKeep(Column(c), Column(c))\nKeep(Identifier(c), Identifier(c))\nKeep(Column(e), Column(e))\nKeep(Identifier(e), Identifier(e))\n
\n
\n\n

Note that the output above is abbreviated. The string representation of actual AST nodes is significantly more verbose.

\n\n

The implementation works especially well when coupled with the SQLGlot\u2019s query optimizer which can be used to produce canonical representations of compared queries:

\n\n
\n
>>> schema={"t": {"a": "INT", "b": "INT", "c": "INT", "d": "INT"}}\n>>> source = """\n... SELECT 1 + 1 + a\n... FROM t\n... WHERE b = 1 OR (c = 2 AND d = 3)\n... """\n>>> target = """\n... SELECT 2 + a\n... FROM t\n... WHERE (b = 1 OR c = 2) AND (b = 1 OR d = 3)\n... """\n>>> optimized_source = optimize(parse_one(source), schema=schema)\n>>> optimized_target = optimize(parse_one(target), schema=schema)\n>>> edit_script = diff(optimized_source, optimized_target)\n>>> sum(0 if isinstance(e, Keep) else 1 for e in edit_script)\n0\n
\n
\n\n

Optimizations

\n\n

The worst case runtime complexity of this algorithm is not exactly stellar: O(n^2 * log n^2). This is because of the leaf matching process, which involves ranking a cartesian product between all leaf nodes of compared trees. Unsurprisingly, the algorithm takes a considerable time to finish for bigger queries.

\n\n

There are still a few basic things we can do in our implementation to help improve performance:

\n\n
    \n
  • Refer to individual node objects using their identifiers (Python\u2019s id()) instead of direct references in sets. This helps avoid costly recursive hash calculations and equality checks.
  • \n
  • Cache bigram histograms to avoid computing them more than once for the same node.
  • \n
  • Compute the canonical SQL string representation for each tree once while caching string representations of all inner nodes. This prevents redundant tree traversals when bigrams are computed.
  • \n
\n\n

At the time of writing only the first two optimizations have been implemented, so there is an opportunity to contribute for anyone who\u2019s interested.

\n\n

Alternative Solutions

\n\n

This section is dedicated to solutions that I\u2019ve investigated, but haven\u2019t tried.

\n\n

First, this section wouldn\u2019t be complete without Tristan Hume\u2019s blog post. Tristan\u2019s solution has a lot in common with the Myers algorithm plus heuristics that is much more clever than what I came up with. The implementation relies on a combination of dynamic programming and A* search algorithm to explore the space of possible matchings and pick the best ones. It seemed to have worked well for Tistan\u2019s specific use case, but after my negative experience with the Myers algorithm, I decided to try something different.

\n\n

Another notable approach is the Gumtree algorithm by Falleri et al. [4]. I discovered this paper after I\u2019d already implemented the algorithm that is the main focus of this post. In sections 5.2 and 5.3 of their paper, the authors compare the two algorithms side by side and claim that Gumtree is significantly better in terms of both runtime performance and accuracy when evaluated on 12 792 pairs of Java source files. This doesn\u2019t surprise me, as the algorithm takes the height of subtrees into account. In my tests, I definitely saw scenarios in which this context would have helped. On top of that, the authors promise O(n^2) runtime complexity in the worst case which, given the Change Distiller's O(n^2 * log n^2), looks particularly tempting. I hope to try this algorithm out at some point, and there is a good chance you see me writing about it in my future posts.

\n\n

Conclusion

\n\n

The Change Distiller algorithm yielded quite satisfactory results in most of my tests. The scenarios in which it fell short mostly concerned identical (or very similar) subtrees located in different parts of the AST. In those cases, node mismatches were frequent and, as a result, edit scripts were somewhat suboptimal.

\n\n

Additionally, the runtime performance of the algorithm leaves a lot to be desired. On trees with 1000 leaf nodes each, the algorithm takes a little under 2 seconds to complete. My implementation still has room for improvement, but this should give you a rough idea of what to expect. It appears that the Gumtree algorithm [4] can help address both of these points. I hope to find bandwidth to work on it soon and then compare the two algorithms side-by-side to find out which one performs better on SQL specifically. In the meantime, Change Distiller definitely gets the job done, and I can now proceed with applying it to some of the use cases I mentioned at the beginning of this post.

\n\n

I\u2019m also curious to learn whether other folks in the industry faced a similar problem, and how they approached it. If you did something similar, I\u2019m interested to hear about your experience.

\n\n

References

\n\n

[1] Eugene W. Myers. An O(ND) Difference Algorithm and Its Variations. Algorithmica 1(2): 251-266 (1986)

\n\n

[2] B. Fluri, M. Wursch, M. Pinzger, and H. Gall. Change Distilling: Tree differencing for fine-grained source code change extraction. IEEE Trans. Software Eng., 33(11):725\u2013743, 2007.

\n\n

[3] S.S. Chawathe, A. Rajaraman, H. Garcia-Molina, and J. Widom. Change Detection in Hierarchically Structured Information. Proc. ACM Sigmod Int\u2019l Conf. Management of Data, pp. 493-504, June 1996

\n\n

[4] Jean-R\u00e9my Falleri, Flor\u00e9al Morandat, Xavier Blanc, Matias Martinez, Martin Monperrus. Fine-grained and Accurate Source Code Differencing. Proceedings of the International Conference on Automated Software Engineering, 2014, V\u00e4steras, Sweden. pp.313-324, 10.1145/2642937.2642982. hal-01054552

\n\n
\n"}, "sqlglot.diff.Insert": {"fullname": "sqlglot.diff.Insert", "modulename": "sqlglot.diff", "qualname": "Insert", "kind": "class", "doc": "

Indicates that a new node has been inserted

\n"}, "sqlglot.diff.Insert.__init__": {"fullname": "sqlglot.diff.Insert.__init__", "modulename": "sqlglot.diff", "qualname": "Insert.__init__", "kind": "function", "doc": "

\n", "signature": "(expression: sqlglot.expressions.Expression)"}, "sqlglot.diff.Remove": {"fullname": "sqlglot.diff.Remove", "modulename": "sqlglot.diff", "qualname": "Remove", "kind": "class", "doc": "

Indicates that an existing node has been removed

\n"}, "sqlglot.diff.Remove.__init__": {"fullname": "sqlglot.diff.Remove.__init__", "modulename": "sqlglot.diff", "qualname": "Remove.__init__", "kind": "function", "doc": "

\n", "signature": "(expression: sqlglot.expressions.Expression)"}, "sqlglot.diff.Move": {"fullname": "sqlglot.diff.Move", "modulename": "sqlglot.diff", "qualname": "Move", "kind": "class", "doc": "

Indicates that an existing node's position within the tree has changed

\n"}, "sqlglot.diff.Move.__init__": {"fullname": "sqlglot.diff.Move.__init__", "modulename": "sqlglot.diff", "qualname": "Move.__init__", "kind": "function", "doc": "

\n", "signature": "(expression: sqlglot.expressions.Expression)"}, "sqlglot.diff.Update": {"fullname": "sqlglot.diff.Update", "modulename": "sqlglot.diff", "qualname": "Update", "kind": "class", "doc": "

Indicates that an existing node has been updated

\n"}, "sqlglot.diff.Update.__init__": {"fullname": "sqlglot.diff.Update.__init__", "modulename": "sqlglot.diff", "qualname": "Update.__init__", "kind": "function", "doc": "

\n", "signature": "(\tsource: sqlglot.expressions.Expression,\ttarget: sqlglot.expressions.Expression)"}, "sqlglot.diff.Keep": {"fullname": "sqlglot.diff.Keep", "modulename": "sqlglot.diff", "qualname": "Keep", "kind": "class", "doc": "

Indicates that an existing node hasn't been changed

\n"}, "sqlglot.diff.Keep.__init__": {"fullname": "sqlglot.diff.Keep.__init__", "modulename": "sqlglot.diff", "qualname": "Keep.__init__", "kind": "function", "doc": "

\n", "signature": "(\tsource: sqlglot.expressions.Expression,\ttarget: sqlglot.expressions.Expression)"}, "sqlglot.diff.diff": {"fullname": "sqlglot.diff.diff", "modulename": "sqlglot.diff", "qualname": "diff", "kind": "function", "doc": "

Returns the list of changes between the source and the target expressions.

\n\n
Examples:
\n\n
\n
\n
>>> diff(parse_one("a + b"), parse_one("a + c"))\n[\n    Remove(expression=(COLUMN this: (IDENTIFIER this: b, quoted: False))),\n    Insert(expression=(COLUMN this: (IDENTIFIER this: c, quoted: False))),\n    Keep(\n        source=(ADD this: ...),\n        target=(ADD this: ...)\n    ),\n    Keep(\n        source=(COLUMN this: (IDENTIFIER this: a, quoted: False)),\n        target=(COLUMN this: (IDENTIFIER this: a, quoted: False))\n    ),\n]\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • source: the source expression.
  • \n
  • target: the target expression against which the diff should be calculated.
  • \n
  • matchings: the list of pre-matched node pairs which is used to help the algorithm's\nheuristics produce better results for subtrees that are known by a caller to be matching.\nNote: expression references in this list must refer to the same node objects that are\nreferenced in source / target trees.
  • \n
\n\n
Returns:
\n\n
\n

the list of Insert, Remove, Move, Update and Keep objects for each node in the source and the\n target expression trees. This list represents a sequence of steps needed to transform the source\n expression tree into the target one.

\n
\n", "signature": "(\tsource: sqlglot.expressions.Expression,\ttarget: sqlglot.expressions.Expression,\tmatchings: Optional[List[Tuple[sqlglot.expressions.Expression, sqlglot.expressions.Expression]]] = None,\t**kwargs: Any) -> List[Union[sqlglot.diff.Insert, sqlglot.diff.Remove, sqlglot.diff.Move, sqlglot.diff.Update, sqlglot.diff.Keep]]:", "funcdef": "def"}, "sqlglot.diff.ChangeDistiller": {"fullname": "sqlglot.diff.ChangeDistiller", "modulename": "sqlglot.diff", "qualname": "ChangeDistiller", "kind": "class", "doc": "

The implementation of the Change Distiller algorithm described by Beat Fluri and Martin Pinzger in\ntheir paper https://ieeexplore.ieee.org/document/4339230, which in turn is based on the algorithm by\nChawathe et al. described in http://ilpubs.stanford.edu:8090/115/1/1995-46.pdf.

\n"}, "sqlglot.diff.ChangeDistiller.__init__": {"fullname": "sqlglot.diff.ChangeDistiller.__init__", "modulename": "sqlglot.diff", "qualname": "ChangeDistiller.__init__", "kind": "function", "doc": "

\n", "signature": "(f: float = 0.6, t: float = 0.6)"}, "sqlglot.diff.ChangeDistiller.diff": {"fullname": "sqlglot.diff.ChangeDistiller.diff", "modulename": "sqlglot.diff", "qualname": "ChangeDistiller.diff", "kind": "function", "doc": "

\n", "signature": "(\tself,\tsource: sqlglot.expressions.Expression,\ttarget: sqlglot.expressions.Expression,\tmatchings: Optional[List[Tuple[sqlglot.expressions.Expression, sqlglot.expressions.Expression]]] = None) -> List[Union[sqlglot.diff.Insert, sqlglot.diff.Remove, sqlglot.diff.Move, sqlglot.diff.Update, sqlglot.diff.Keep]]:", "funcdef": "def"}, "sqlglot.errors": {"fullname": "sqlglot.errors", "modulename": "sqlglot.errors", "kind": "module", "doc": "

\n"}, "sqlglot.errors.ErrorLevel": {"fullname": "sqlglot.errors.ErrorLevel", "modulename": "sqlglot.errors", "qualname": "ErrorLevel", "kind": "class", "doc": "

An enumeration.

\n", "bases": "sqlglot.helper.AutoName"}, "sqlglot.errors.ErrorLevel.IGNORE": {"fullname": "sqlglot.errors.ErrorLevel.IGNORE", "modulename": "sqlglot.errors", "qualname": "ErrorLevel.IGNORE", "kind": "variable", "doc": "

Ignore all errors.

\n", "default_value": "<ErrorLevel.IGNORE: 'IGNORE'>"}, "sqlglot.errors.ErrorLevel.WARN": {"fullname": "sqlglot.errors.ErrorLevel.WARN", "modulename": "sqlglot.errors", "qualname": "ErrorLevel.WARN", "kind": "variable", "doc": "

Log all errors.

\n", "default_value": "<ErrorLevel.WARN: 'WARN'>"}, "sqlglot.errors.ErrorLevel.RAISE": {"fullname": "sqlglot.errors.ErrorLevel.RAISE", "modulename": "sqlglot.errors", "qualname": "ErrorLevel.RAISE", "kind": "variable", "doc": "

Collect all errors and raise a single exception.

\n", "default_value": "<ErrorLevel.RAISE: 'RAISE'>"}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"fullname": "sqlglot.errors.ErrorLevel.IMMEDIATE", "modulename": "sqlglot.errors", "qualname": "ErrorLevel.IMMEDIATE", "kind": "variable", "doc": "

Immediately raise an exception on the first error found.

\n", "default_value": "<ErrorLevel.IMMEDIATE: 'IMMEDIATE'>"}, "sqlglot.errors.SqlglotError": {"fullname": "sqlglot.errors.SqlglotError", "modulename": "sqlglot.errors", "qualname": "SqlglotError", "kind": "class", "doc": "

Common base class for all non-exit exceptions.

\n", "bases": "builtins.Exception"}, "sqlglot.errors.UnsupportedError": {"fullname": "sqlglot.errors.UnsupportedError", "modulename": "sqlglot.errors", "qualname": "UnsupportedError", "kind": "class", "doc": "

Common base class for all non-exit exceptions.

\n", "bases": "SqlglotError"}, "sqlglot.errors.ParseError": {"fullname": "sqlglot.errors.ParseError", "modulename": "sqlglot.errors", "qualname": "ParseError", "kind": "class", "doc": "

Common base class for all non-exit exceptions.

\n", "bases": "SqlglotError"}, "sqlglot.errors.ParseError.__init__": {"fullname": "sqlglot.errors.ParseError.__init__", "modulename": "sqlglot.errors", "qualname": "ParseError.__init__", "kind": "function", "doc": "

\n", "signature": "(message: str, errors: Optional[List[Dict[str, Any]]] = None)"}, "sqlglot.errors.ParseError.new": {"fullname": "sqlglot.errors.ParseError.new", "modulename": "sqlglot.errors", "qualname": "ParseError.new", "kind": "function", "doc": "

\n", "signature": "(\tcls,\tmessage: str,\tdescription: Optional[str] = None,\tline: Optional[int] = None,\tcol: Optional[int] = None,\tstart_context: Optional[str] = None,\thighlight: Optional[str] = None,\tend_context: Optional[str] = None,\tinto_expression: Optional[str] = None) -> sqlglot.errors.ParseError:", "funcdef": "def"}, "sqlglot.errors.TokenError": {"fullname": "sqlglot.errors.TokenError", "modulename": "sqlglot.errors", "qualname": "TokenError", "kind": "class", "doc": "

Common base class for all non-exit exceptions.

\n", "bases": "SqlglotError"}, "sqlglot.errors.OptimizeError": {"fullname": "sqlglot.errors.OptimizeError", "modulename": "sqlglot.errors", "qualname": "OptimizeError", "kind": "class", "doc": "

Common base class for all non-exit exceptions.

\n", "bases": "SqlglotError"}, "sqlglot.errors.SchemaError": {"fullname": "sqlglot.errors.SchemaError", "modulename": "sqlglot.errors", "qualname": "SchemaError", "kind": "class", "doc": "

Common base class for all non-exit exceptions.

\n", "bases": "SqlglotError"}, "sqlglot.errors.ExecuteError": {"fullname": "sqlglot.errors.ExecuteError", "modulename": "sqlglot.errors", "qualname": "ExecuteError", "kind": "class", "doc": "

Common base class for all non-exit exceptions.

\n", "bases": "SqlglotError"}, "sqlglot.errors.concat_messages": {"fullname": "sqlglot.errors.concat_messages", "modulename": "sqlglot.errors", "qualname": "concat_messages", "kind": "function", "doc": "

\n", "signature": "(errors: Sequence[Any], maximum: int) -> str:", "funcdef": "def"}, "sqlglot.errors.merge_errors": {"fullname": "sqlglot.errors.merge_errors", "modulename": "sqlglot.errors", "qualname": "merge_errors", "kind": "function", "doc": "

\n", "signature": "(errors: Sequence[sqlglot.errors.ParseError]) -> List[Dict[str, Any]]:", "funcdef": "def"}, "sqlglot.executor": {"fullname": "sqlglot.executor", "modulename": "sqlglot.executor", "kind": "module", "doc": "

Writing a Python SQL engine from scratch

\n\n

Toby Mao

\n\n

Introduction

\n\n

When I first started writing SQLGlot in early 2021, my goal was just to translate SQL queries from SparkSQL to Presto and vice versa. However, over the last year and a half, I've ended up with a full-fledged SQL engine. SQLGlot can now parse and transpile between 18 SQL dialects and can execute all 24 TPC-H SQL queries. The parser and engine are all written from scratch using Python.

\n\n

This post will cover why I went through the effort of creating a Python SQL engine and how a simple query goes from a string to actually transforming data. The following steps are briefly summarized:

\n\n\n\n

Why?

\n\n

I started working on SQLGlot because of my work on the experimentation and metrics platform at Netflix, where I built tools that allowed data scientists to define and compute SQL-based metrics. Netflix relied on multiple engines to query data (Spark, Presto, and Druid), so my team built the metrics platform around PyPika, a Python SQL query builder. This way, definitions could be reused across multiple engines. However, it became quickly apparent that writing python code to programmatically generate SQL was challenging for data scientists, especially those with academic backgrounds, since they were mostly familiar with R and SQL. At the time, the only Python SQL parser was sqlparse, which is not actually a parser but a tokenizer, so having users write raw SQL into the platform wasn't really an option. Some time later, I randomly stumbled across Crafting Interpreters and realized that I could use it as a guide towards creating my own SQL parser/transpiler.

\n\n

Why did I do this? Isn't a Python SQL engine going to be extremely slow?

\n\n

The main reason why I ended up building a SQL engine was...just for entertainment. It's been fun learning about all the things required to actually run a SQL query, and seeing it actually work is extremely rewarding. Before SQLGlot, I had zero experience with lexers, parsers, or compilers.

\n\n

In terms of practical use cases, I planned to use the Python SQL engine for unit testing SQL pipelines. Big data pipelines are tough to test because many of the engines are not open source and cannot be run locally. With SQLGlot, you can take a SQL query targeting a warehouse such as Snowflake and seamlessly run it in CI on mock Python data. It's easy to mock data and create arbitrary UDFs because everything is just Python. Although the implementation is slow and unsuitable for large amounts of data (> 1 million rows), there's very little overhead/startup and you can run queries on test data in a couple of milliseconds.

\n\n

Finally, the components that have been built to support execution can be used as a foundation for a faster engine. I'm inspired by what Apache Calcite has done for the JVM world. Even though Python is commonly used for data, there hasn't been a Calcite for Python. So, you could say that SQLGlot aims to be that framework. For example, it wouldn't take much work to replace the Python execution engine with numpy/pandas/arrow to become a respectably-performing query engine. The implementation would be able to leverage the parser, optimizer, and logical planner, only needing to implement physical execution. There is a lot of work in the Python ecosystem around high performance vectorized computation, which I think could benefit from a pure Python-based AST/plan. Parsing and planning doesn't have to be fast when the bottleneck of running queries is processing terabytes of data. So, having a Python-based ecosystem around SQL is beneficial given the ease of development in Python, despite not having bare metal performance.

\n\n

Parts of SQLGlot's toolkit are being used today by the following:

\n\n
    \n
  • Ibis: A Python library that provides a lightweight, universal interface for data wrangling.\n
      \n
    • Uses the Python SQL expression builder and leverages the optimizer/planner to convert SQL into dataframe operations.
    • \n
  • \n
  • mysql-mimic: Pure-Python implementation of the MySQL server wire protocol\n
      \n
    • Parses / transforms SQL and executes INFORMATION_SCHEMA queries.
    • \n
  • \n
  • Quokka: Push-based vectorized query engine\n
      \n
    • Parse and optimizes SQL.
    • \n
  • \n
  • Splink: Fast, accurate and scalable probabilistic data linkage using your choice of SQL backend.\n
      \n
    • Transpiles queries.
    • \n
  • \n
\n\n

How?

\n\n

There are many steps involved with actually running a simple query like:

\n\n
\n
SELECT\n  bar.a,\n  b + 1 AS b\nFROM bar\nJOIN baz\n  ON bar.a = baz.a\nWHERE bar.a > 1\n
\n
\n\n

In this post, I'll walk through all the steps SQLGlot takes to run this query over Python objects.

\n\n

Tokenizing

\n\n

The first step is to convert the sql string into a list of tokens. SQLGlot's tokenizer is quite simple and can be found here. In a while loop, it checks each character and either appends the character to the current token, or makes a new token.

\n\n

Running the SQLGlot tokenizer shows the output.

\n\n

\"Tokenizer

\n\n

Each keyword has been converted to a SQLGlot Token object. Each token has some metadata associated with it, like line/column information for error messages. Comments are also a part of the token, so that comments can be preserved.

\n\n

Parsing

\n\n

Once a SQL statement is tokenized, we don't need to worry about white space and other formatting, so it's easier to work with. We can now convert the list of tokens into an AST. The SQLGlot parser is a handwritten recursive descent parser.

\n\n

Similar to the tokenizer, it consumes the tokens sequentially, but it instead uses a recursive algorithm. The tokens are converted into a single AST node that presents the SQL query. The SQLGlot parser was designed to support various dialects, so it contains many options for overriding parsing functionality.

\n\n

\"Parser

\n\n

The AST is a generic representation of a given SQL query. Each dialect can override or implement its own generator, which can convert an AST object into syntatically-correct SQL.

\n\n

Optimizing

\n\n

Once we have our AST, we can transform it into an equivalent query that produces the same results more efficiently. When optimizing queries, most engines first convert the AST into a logical plan and then optimize the plan. However, I chose to optimize the AST directly for the following reasons:

\n\n
    \n
  1. It's easier to debug and validate the optimizations when the input and output are both SQL.

  2. \n
  3. Rules can be applied a la carte to transform SQL into a more desirable form.

  4. \n
  5. I wanted a way to generate 'canonical sql'. Having a canonical representation of SQL is useful for understanding if two queries are semantically equivalent (e.g. SELECT 1 + 1 and SELECT 2).

  6. \n
\n\n

I've yet to find another engine that takes this approach, but I'm quite happy with this decision. The optimizer currently does not perform any \"physical optimizations\" such as join reordering. Those are left to the execution layer, as additional statistics and information could become relevant.

\n\n

\"Optimizer

\n\n

The optimizer currently has 17 rules. Each of these rules is applied, transforming the AST in place. The combination of these rules creates \"canonical\" sql that can then be more easily converted into a logical plan and executed.

\n\n

Some example rules are:

\n\n

qualify_tables and qualify_columns

\n\n
    \n
  • Adds all db/catalog qualifiers to tables and forces an alias.
  • \n
  • Ensure each column is unambiguous and expand stars.
  • \n
\n\n
\n
SELECT * FROM x;\n\nSELECT "db"."x" AS "x";\n
\n
\n\n

simplify

\n\n

Boolean and math simplification. Check out all the test cases.

\n\n
\n
((NOT FALSE) AND (x = x)) AND (TRUE OR 1 <> 3);\nx = x;\n\n1 + 1;\n2;\n
\n
\n\n

normalize

\n\n

Attempts to convert all predicates into conjunctive normal form.

\n\n
\n
-- DNF\n(A AND B) OR (B AND C AND D);\n\n-- CNF\n(A OR C) AND (A OR D) AND B;\n
\n
\n\n

unnest_subqueries

\n\n

Converts subqueries in predicates into joins.

\n\n
\n
-- The subquery can be converted into a left join\nSELECT *\nFROM x AS x\nWHERE (\n  SELECT y.a AS a\n  FROM y AS y\n  WHERE x.a = y.a\n) = 1;\n\nSELECT *\nFROM x AS x\nLEFT JOIN (\n  SELECT y.a AS a\n  FROM y AS y\n  WHERE TRUE\n  GROUP BY y.a\n) AS "_u_0"\n  ON x.a = "_u_0".a\nWHERE ("_u_0".a = 1 AND NOT "_u_0".a IS NULL)\n
\n
\n\n

pushdown_predicates

\n\n

Push down filters into the innermost query.

\n\n
\n
SELECT *\nFROM (\n  SELECT *\n  FROM x AS x\n) AS y\nWHERE y.a = 1;\n\nSELECT *\nFROM (\n  SELECT *\n  FROM x AS x\n  WHERE y.a = 1\n) AS y WHERE TRUE\n
\n
\n\n

annotate_types

\n\n

Infer all types throughout the AST given schema information and function type definitions.

\n\n

Planning

\n\n

After the SQL AST has been \"optimized\", it's much easier to convert into a logical plan. The AST is traversed and converted into a DAG consisting of one of five steps. The different steps are:

\n\n

Scan

\n\n

Selects columns from a table, applies projections, and finally filters the table.

\n\n

Sort

\n\n

Sorts a table for order by expressions.

\n\n

Set

\n\n

Applies the operators union/union all/except/intersect.

\n\n

Aggregate

\n\n

Applies an aggregation/group by.

\n\n

Join

\n\n

Joins multiple tables together.

\n\n

\"Planner

\n\n

The logical plan is quite simple and contains the information required to convert it into a physical plan (execution).

\n\n

Executing

\n\n

Finally, we can actually execute the SQL query. The Python engine is not fast, but it's very small (~400 LOC)! It iterates the DAG with a queue and runs each step, passing each intermediary table to the next step.

\n\n

In order to keep things simple, it evaluates expressions with eval. Because SQLGlot was built primarily to be a transpiler, it was simple to create a \"Python SQL\" dialect. So a SQL expression x + 1 can just be converted into scope['x'] + 1.

\n\n

\"Executor

\n\n

What's next

\n\n

SQLGlot's main focus will always be on parsing/transpiling, but I plan to continue development on the execution engine. I'd like to pass TPC-DS. If someone doesn't beat me to it, I may even take a stab at writing a Pandas/Arrow execution engine.

\n\n

I'm hoping that over time, SQLGlot will spark the Python SQL ecosystem just like Calcite has for Java.

\n\n

Special thanks

\n\n

SQLGlot would not be what it is without it's core contributors. In particular, the execution engine would not exist without Barak Alon and George Sittas.

\n\n

Get in touch

\n\n

If you'd like to chat more about SQLGlot, please join my Slack Channel!

\n\n
\n"}, "sqlglot.executor.execute": {"fullname": "sqlglot.executor.execute", "modulename": "sqlglot.executor", "qualname": "execute", "kind": "function", "doc": "

Run a sql query against data.

\n\n
Arguments:
\n\n
    \n
  • sql: a sql statement.
  • \n
  • schema: database schema.\nThis can either be an instance of Schema or a mapping in one of the following forms:\n
      \n
    1. {table: {col: type}}
    2. \n
    3. {db: {table: {col: type}}}
    4. \n
    5. {catalog: {db: {table: {col: type}}}}
    6. \n
  • \n
  • read: the SQL dialect to apply during parsing (eg. \"spark\", \"hive\", \"presto\", \"mysql\").
  • \n
  • tables: additional tables to register.
  • \n
\n\n
Returns:
\n\n
\n

Simple columnar data structure.

\n
\n", "signature": "(\tsql: str | sqlglot.expressions.Expression,\tschema: Union[Dict, sqlglot.schema.Schema, NoneType] = None,\tread: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\ttables: Optional[Dict] = None) -> sqlglot.executor.table.Table:", "funcdef": "def"}, "sqlglot.executor.context": {"fullname": "sqlglot.executor.context", "modulename": "sqlglot.executor.context", "kind": "module", "doc": "

\n"}, "sqlglot.executor.context.Context": {"fullname": "sqlglot.executor.context.Context", "modulename": "sqlglot.executor.context", "qualname": "Context", "kind": "class", "doc": "

Execution context for sql expressions.

\n\n

Context is used to hold relevant data tables which can then be queried on with eval.

\n\n

References to columns can either be scalar or vectors. When set_row is used, column references\nevaluate to scalars while set_range evaluates to vectors. This allows convenient and efficient\nevaluation of aggregation functions.

\n"}, "sqlglot.executor.context.Context.__init__": {"fullname": "sqlglot.executor.context.Context.__init__", "modulename": "sqlglot.executor.context", "qualname": "Context.__init__", "kind": "function", "doc": "

Args\n tables: representing the scope of the current execution context.\n env: dictionary of functions within the execution context.

\n", "signature": "(\ttables: Dict[str, sqlglot.executor.table.Table],\tenv: Optional[Dict] = None)"}, "sqlglot.executor.context.Context.eval": {"fullname": "sqlglot.executor.context.Context.eval", "modulename": "sqlglot.executor.context", "qualname": "Context.eval", "kind": "function", "doc": "

\n", "signature": "(self, code):", "funcdef": "def"}, "sqlglot.executor.context.Context.eval_tuple": {"fullname": "sqlglot.executor.context.Context.eval_tuple", "modulename": "sqlglot.executor.context", "qualname": "Context.eval_tuple", "kind": "function", "doc": "

\n", "signature": "(self, codes):", "funcdef": "def"}, "sqlglot.executor.context.Context.add_columns": {"fullname": "sqlglot.executor.context.Context.add_columns", "modulename": "sqlglot.executor.context", "qualname": "Context.add_columns", "kind": "function", "doc": "

\n", "signature": "(self, *columns: str) -> None:", "funcdef": "def"}, "sqlglot.executor.context.Context.table_iter": {"fullname": "sqlglot.executor.context.Context.table_iter", "modulename": "sqlglot.executor.context", "qualname": "Context.table_iter", "kind": "function", "doc": "

\n", "signature": "(\tself,\ttable: str) -> Iterator[Tuple[sqlglot.executor.table.TableIter, sqlglot.executor.context.Context]]:", "funcdef": "def"}, "sqlglot.executor.context.Context.filter": {"fullname": "sqlglot.executor.context.Context.filter", "modulename": "sqlglot.executor.context", "qualname": "Context.filter", "kind": "function", "doc": "

\n", "signature": "(self, condition) -> None:", "funcdef": "def"}, "sqlglot.executor.context.Context.sort": {"fullname": "sqlglot.executor.context.Context.sort", "modulename": "sqlglot.executor.context", "qualname": "Context.sort", "kind": "function", "doc": "

\n", "signature": "(self, key) -> None:", "funcdef": "def"}, "sqlglot.executor.context.Context.set_row": {"fullname": "sqlglot.executor.context.Context.set_row", "modulename": "sqlglot.executor.context", "qualname": "Context.set_row", "kind": "function", "doc": "

\n", "signature": "(self, row: Tuple) -> None:", "funcdef": "def"}, "sqlglot.executor.context.Context.set_index": {"fullname": "sqlglot.executor.context.Context.set_index", "modulename": "sqlglot.executor.context", "qualname": "Context.set_index", "kind": "function", "doc": "

\n", "signature": "(self, index: int) -> None:", "funcdef": "def"}, "sqlglot.executor.context.Context.set_range": {"fullname": "sqlglot.executor.context.Context.set_range", "modulename": "sqlglot.executor.context", "qualname": "Context.set_range", "kind": "function", "doc": "

\n", "signature": "(self, start: int, end: int) -> None:", "funcdef": "def"}, "sqlglot.executor.env": {"fullname": "sqlglot.executor.env", "modulename": "sqlglot.executor.env", "kind": "module", "doc": "

\n"}, "sqlglot.executor.env.reverse_key": {"fullname": "sqlglot.executor.env.reverse_key", "modulename": "sqlglot.executor.env", "qualname": "reverse_key", "kind": "class", "doc": "

\n"}, "sqlglot.executor.env.reverse_key.__init__": {"fullname": "sqlglot.executor.env.reverse_key.__init__", "modulename": "sqlglot.executor.env", "qualname": "reverse_key.__init__", "kind": "function", "doc": "

\n", "signature": "(obj)"}, "sqlglot.executor.env.filter_nulls": {"fullname": "sqlglot.executor.env.filter_nulls", "modulename": "sqlglot.executor.env", "qualname": "filter_nulls", "kind": "function", "doc": "

\n", "signature": "(func, empty_null=True):", "funcdef": "def"}, "sqlglot.executor.env.null_if_any": {"fullname": "sqlglot.executor.env.null_if_any", "modulename": "sqlglot.executor.env", "qualname": "null_if_any", "kind": "function", "doc": "

Decorator that makes a function return None if any of the required arguments are None.

\n\n

This also supports decoration with no arguments, e.g.:

\n\n
@null_if_any\ndef foo(a, b): ...\n
\n\n

In which case all arguments are required.

\n", "signature": "(*required):", "funcdef": "def"}, "sqlglot.executor.env.str_position": {"fullname": "sqlglot.executor.env.str_position", "modulename": "sqlglot.executor.env", "qualname": "str_position", "kind": "function", "doc": "

\n", "signature": "(substr, this, position=None):", "funcdef": "def"}, "sqlglot.executor.env.substring": {"fullname": "sqlglot.executor.env.substring", "modulename": "sqlglot.executor.env", "qualname": "substring", "kind": "function", "doc": "

\n", "signature": "(this, start=None, length=None):", "funcdef": "def"}, "sqlglot.executor.env.cast": {"fullname": "sqlglot.executor.env.cast", "modulename": "sqlglot.executor.env", "qualname": "cast", "kind": "function", "doc": "

\n", "signature": "(this, to):", "funcdef": "def"}, "sqlglot.executor.env.ordered": {"fullname": "sqlglot.executor.env.ordered", "modulename": "sqlglot.executor.env", "qualname": "ordered", "kind": "function", "doc": "

\n", "signature": "(this, desc, nulls_first):", "funcdef": "def"}, "sqlglot.executor.env.interval": {"fullname": "sqlglot.executor.env.interval", "modulename": "sqlglot.executor.env", "qualname": "interval", "kind": "function", "doc": "

\n", "signature": "(this, unit):", "funcdef": "def"}, "sqlglot.executor.python": {"fullname": "sqlglot.executor.python", "modulename": "sqlglot.executor.python", "kind": "module", "doc": "

\n"}, "sqlglot.executor.python.PythonExecutor": {"fullname": "sqlglot.executor.python.PythonExecutor", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor", "kind": "class", "doc": "

\n"}, "sqlglot.executor.python.PythonExecutor.__init__": {"fullname": "sqlglot.executor.python.PythonExecutor.__init__", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.__init__", "kind": "function", "doc": "

\n", "signature": "(env=None, tables=None)"}, "sqlglot.executor.python.PythonExecutor.execute": {"fullname": "sqlglot.executor.python.PythonExecutor.execute", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.execute", "kind": "function", "doc": "

\n", "signature": "(self, plan):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.generate": {"fullname": "sqlglot.executor.python.PythonExecutor.generate", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.generate", "kind": "function", "doc": "

Convert a SQL expression into literal Python code and compile it into bytecode.

\n", "signature": "(self, expression):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"fullname": "sqlglot.executor.python.PythonExecutor.generate_tuple", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.generate_tuple", "kind": "function", "doc": "

Convert an array of SQL expressions into tuple of Python byte code.

\n", "signature": "(self, expressions):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.context": {"fullname": "sqlglot.executor.python.PythonExecutor.context", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.context", "kind": "function", "doc": "

\n", "signature": "(self, tables):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.table": {"fullname": "sqlglot.executor.python.PythonExecutor.table", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.table", "kind": "function", "doc": "

\n", "signature": "(self, expressions):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.scan": {"fullname": "sqlglot.executor.python.PythonExecutor.scan", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.scan", "kind": "function", "doc": "

\n", "signature": "(self, step, context):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.static": {"fullname": "sqlglot.executor.python.PythonExecutor.static", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.static", "kind": "function", "doc": "

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.scan_table": {"fullname": "sqlglot.executor.python.PythonExecutor.scan_table", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.scan_table", "kind": "function", "doc": "

\n", "signature": "(self, step):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"fullname": "sqlglot.executor.python.PythonExecutor.scan_csv", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.scan_csv", "kind": "function", "doc": "

\n", "signature": "(self, step):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.join": {"fullname": "sqlglot.executor.python.PythonExecutor.join", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.join", "kind": "function", "doc": "

\n", "signature": "(self, step, context):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"fullname": "sqlglot.executor.python.PythonExecutor.nested_loop_join", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.nested_loop_join", "kind": "function", "doc": "

\n", "signature": "(self, _join, source_context, join_context):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.hash_join": {"fullname": "sqlglot.executor.python.PythonExecutor.hash_join", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.hash_join", "kind": "function", "doc": "

\n", "signature": "(self, join, source_context, join_context):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.aggregate": {"fullname": "sqlglot.executor.python.PythonExecutor.aggregate", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.aggregate", "kind": "function", "doc": "

\n", "signature": "(self, step, context):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.sort": {"fullname": "sqlglot.executor.python.PythonExecutor.sort", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.sort", "kind": "function", "doc": "

\n", "signature": "(self, step, context):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.set_operation": {"fullname": "sqlglot.executor.python.PythonExecutor.set_operation", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.set_operation", "kind": "function", "doc": "

\n", "signature": "(self, step, context):", "funcdef": "def"}, "sqlglot.executor.python.Python": {"fullname": "sqlglot.executor.python.Python", "modulename": "sqlglot.executor.python", "qualname": "Python", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.executor.python.Python.Tokenizer": {"fullname": "sqlglot.executor.python.Python.Tokenizer", "modulename": "sqlglot.executor.python", "qualname": "Python.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.executor.python.Python.Generator": {"fullname": "sqlglot.executor.python.Python.Generator", "modulename": "sqlglot.executor.python", "qualname": "Python.Generator", "kind": "class", "doc": "

Generator converts a given syntax tree to the corresponding SQL string.

\n\n
Arguments:
\n\n
    \n
  • pretty: Whether or not to format the produced SQL string.\nDefault: False.
  • \n
  • identify: Determines when an identifier should be quoted. Possible values are:\nFalse (default): Never quote, except in cases where it's mandatory by the dialect.\nTrue or 'always': Always quote.\n'safe': Only quote identifiers that are case insensitive.
  • \n
  • normalize: Whether or not to normalize identifiers to lowercase.\nDefault: False.
  • \n
  • pad: Determines the pad size in a formatted string.\nDefault: 2.
  • \n
  • indent: Determines the indentation size in a formatted string.\nDefault: 2.
  • \n
  • normalize_functions: Whether or not to normalize all function names. Possible values are:\n\"upper\" or True (default): Convert names to uppercase.\n\"lower\": Convert names to lowercase.\nFalse: Disables function name normalization.
  • \n
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.\nDefault ErrorLevel.WARN.
  • \n
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions.\nThis is only relevant when generating in pretty mode.\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.executor.table": {"fullname": "sqlglot.executor.table", "modulename": "sqlglot.executor.table", "kind": "module", "doc": "

\n"}, "sqlglot.executor.table.Table": {"fullname": "sqlglot.executor.table.Table", "modulename": "sqlglot.executor.table", "qualname": "Table", "kind": "class", "doc": "

\n"}, "sqlglot.executor.table.Table.__init__": {"fullname": "sqlglot.executor.table.Table.__init__", "modulename": "sqlglot.executor.table", "qualname": "Table.__init__", "kind": "function", "doc": "

\n", "signature": "(columns, rows=None, column_range=None)"}, "sqlglot.executor.table.Table.add_columns": {"fullname": "sqlglot.executor.table.Table.add_columns", "modulename": "sqlglot.executor.table", "qualname": "Table.add_columns", "kind": "function", "doc": "

\n", "signature": "(self, *columns: str) -> None:", "funcdef": "def"}, "sqlglot.executor.table.Table.append": {"fullname": "sqlglot.executor.table.Table.append", "modulename": "sqlglot.executor.table", "qualname": "Table.append", "kind": "function", "doc": "

\n", "signature": "(self, row):", "funcdef": "def"}, "sqlglot.executor.table.Table.pop": {"fullname": "sqlglot.executor.table.Table.pop", "modulename": "sqlglot.executor.table", "qualname": "Table.pop", "kind": "function", "doc": "

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.executor.table.TableIter": {"fullname": "sqlglot.executor.table.TableIter", "modulename": "sqlglot.executor.table", "qualname": "TableIter", "kind": "class", "doc": "

\n"}, "sqlglot.executor.table.TableIter.__init__": {"fullname": "sqlglot.executor.table.TableIter.__init__", "modulename": "sqlglot.executor.table", "qualname": "TableIter.__init__", "kind": "function", "doc": "

\n", "signature": "(table)"}, "sqlglot.executor.table.RangeReader": {"fullname": "sqlglot.executor.table.RangeReader", "modulename": "sqlglot.executor.table", "qualname": "RangeReader", "kind": "class", "doc": "

\n"}, "sqlglot.executor.table.RangeReader.__init__": {"fullname": "sqlglot.executor.table.RangeReader.__init__", "modulename": "sqlglot.executor.table", "qualname": "RangeReader.__init__", "kind": "function", "doc": "

\n", "signature": "(table)"}, "sqlglot.executor.table.RowReader": {"fullname": "sqlglot.executor.table.RowReader", "modulename": "sqlglot.executor.table", "qualname": "RowReader", "kind": "class", "doc": "

\n"}, "sqlglot.executor.table.RowReader.__init__": {"fullname": "sqlglot.executor.table.RowReader.__init__", "modulename": "sqlglot.executor.table", "qualname": "RowReader.__init__", "kind": "function", "doc": "

\n", "signature": "(columns, column_range=None)"}, "sqlglot.executor.table.Tables": {"fullname": "sqlglot.executor.table.Tables", "modulename": "sqlglot.executor.table", "qualname": "Tables", "kind": "class", "doc": "

Abstract base class for generic types.

\n\n

A generic type is typically declared by inheriting from\nthis class parameterized with one or more type variables.\nFor example, a generic mapping type might be defined as::

\n\n

class Mapping(Generic[KT, VT]):\n def __getitem__(self, key: KT) -> VT:\n ...\n # Etc.

\n\n

This class can then be used as follows::

\n\n

def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT:\n try:\n return mapping[key]\n except KeyError:\n return default

\n", "bases": "sqlglot.schema.AbstractMappingSchema[sqlglot.executor.table.Table]"}, "sqlglot.executor.table.ensure_tables": {"fullname": "sqlglot.executor.table.ensure_tables", "modulename": "sqlglot.executor.table", "qualname": "ensure_tables", "kind": "function", "doc": "

\n", "signature": "(d: Optional[Dict]) -> sqlglot.executor.table.Tables:", "funcdef": "def"}, "sqlglot.expressions": {"fullname": "sqlglot.expressions", "modulename": "sqlglot.expressions", "kind": "module", "doc": "

Expressions

\n\n

Every AST node in SQLGlot is represented by a subclass of Expression.

\n\n

This module contains the implementation of all supported Expression types. Additionally,\nit exposes a number of helper functions, which are mainly used to programmatically build\nSQL expressions, such as sqlglot.expressions.select.

\n\n
\n"}, "sqlglot.expressions.Expression": {"fullname": "sqlglot.expressions.Expression", "modulename": "sqlglot.expressions", "qualname": "Expression", "kind": "class", "doc": "

The base class for all expressions in a syntax tree. Each Expression encapsulates any necessary\ncontext, such as its child expressions, their names (arg keys), and whether a given child expression\nis optional or not.

\n\n
Attributes:
\n\n
    \n
  • key: a unique key for each class in the Expression hierarchy. This is useful for hashing\nand representing expressions as strings.
  • \n
  • arg_types: determines what arguments (child nodes) are supported by an expression. It\nmaps arg keys to booleans that indicate whether the corresponding args are optional.
  • \n
  • parent: a reference to the parent expression (or None, in case of root expressions).
  • \n
  • arg_key: the arg key an expression is associated with, i.e. the name its parent expression\nuses to refer to it.
  • \n
  • comments: a list of comments that are associated with a given expression. This is used in\norder to preserve comments when transpiling SQL code.
  • \n
  • _type: the sqlglot.expressions.DataType type of an expression. This is inferred by the\noptimizer, in order to enable some transformations that require type information.
  • \n
\n\n
Example:
\n\n
\n
\n
>>> class Foo(Expression):\n...     arg_types = {"this": True, "expression": False}\n
\n
\n \n

The above definition informs us that Foo is an Expression that requires an argument called\n \"this\" and may also optionally receive an argument called \"expression\".

\n
\n\n
Arguments:
\n\n
    \n
  • args: a mapping used for retrieving the arguments of an expression, given their arg keys.
  • \n
\n"}, "sqlglot.expressions.Expression.__init__": {"fullname": "sqlglot.expressions.Expression.__init__", "modulename": "sqlglot.expressions", "qualname": "Expression.__init__", "kind": "function", "doc": "

\n", "signature": "(**args: Any)"}, "sqlglot.expressions.Expression.this": {"fullname": "sqlglot.expressions.Expression.this", "modulename": "sqlglot.expressions", "qualname": "Expression.this", "kind": "variable", "doc": "

Retrieves the argument with key \"this\".

\n"}, "sqlglot.expressions.Expression.expression": {"fullname": "sqlglot.expressions.Expression.expression", "modulename": "sqlglot.expressions", "qualname": "Expression.expression", "kind": "variable", "doc": "

Retrieves the argument with key \"expression\".

\n"}, "sqlglot.expressions.Expression.expressions": {"fullname": "sqlglot.expressions.Expression.expressions", "modulename": "sqlglot.expressions", "qualname": "Expression.expressions", "kind": "variable", "doc": "

Retrieves the argument with key \"expressions\".

\n"}, "sqlglot.expressions.Expression.text": {"fullname": "sqlglot.expressions.Expression.text", "modulename": "sqlglot.expressions", "qualname": "Expression.text", "kind": "function", "doc": "

Returns a textual representation of the argument corresponding to \"key\". This can only be used\nfor args that are strings or leaf Expression instances, such as identifiers and literals.

\n", "signature": "(self, key) -> str:", "funcdef": "def"}, "sqlglot.expressions.Expression.is_string": {"fullname": "sqlglot.expressions.Expression.is_string", "modulename": "sqlglot.expressions", "qualname": "Expression.is_string", "kind": "variable", "doc": "

Checks whether a Literal expression is a string.

\n", "annotation": ": bool"}, "sqlglot.expressions.Expression.is_number": {"fullname": "sqlglot.expressions.Expression.is_number", "modulename": "sqlglot.expressions", "qualname": "Expression.is_number", "kind": "variable", "doc": "

Checks whether a Literal expression is a number.

\n", "annotation": ": bool"}, "sqlglot.expressions.Expression.is_int": {"fullname": "sqlglot.expressions.Expression.is_int", "modulename": "sqlglot.expressions", "qualname": "Expression.is_int", "kind": "variable", "doc": "

Checks whether a Literal expression is an integer.

\n", "annotation": ": bool"}, "sqlglot.expressions.Expression.is_star": {"fullname": "sqlglot.expressions.Expression.is_star", "modulename": "sqlglot.expressions", "qualname": "Expression.is_star", "kind": "variable", "doc": "

Checks whether an expression is a star.

\n", "annotation": ": bool"}, "sqlglot.expressions.Expression.alias": {"fullname": "sqlglot.expressions.Expression.alias", "modulename": "sqlglot.expressions", "qualname": "Expression.alias", "kind": "variable", "doc": "

Returns the alias of the expression, or an empty string if it's not aliased.

\n", "annotation": ": str"}, "sqlglot.expressions.Expression.output_name": {"fullname": "sqlglot.expressions.Expression.output_name", "modulename": "sqlglot.expressions", "qualname": "Expression.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n", "annotation": ": str"}, "sqlglot.expressions.Expression.copy": {"fullname": "sqlglot.expressions.Expression.copy", "modulename": "sqlglot.expressions", "qualname": "Expression.copy", "kind": "function", "doc": "

Returns a deep copy of the expression.

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.expressions.Expression.add_comments": {"fullname": "sqlglot.expressions.Expression.add_comments", "modulename": "sqlglot.expressions", "qualname": "Expression.add_comments", "kind": "function", "doc": "

\n", "signature": "(self, comments: Optional[List[str]]) -> None:", "funcdef": "def"}, "sqlglot.expressions.Expression.append": {"fullname": "sqlglot.expressions.Expression.append", "modulename": "sqlglot.expressions", "qualname": "Expression.append", "kind": "function", "doc": "

Appends value to arg_key if it's a list or sets it as a new list.

\n\n
Arguments:
\n\n
    \n
  • arg_key (str): name of the list expression arg
  • \n
  • value (Any): value to append to the list
  • \n
\n", "signature": "(self, arg_key: str, value: Any) -> None:", "funcdef": "def"}, "sqlglot.expressions.Expression.set": {"fullname": "sqlglot.expressions.Expression.set", "modulename": "sqlglot.expressions", "qualname": "Expression.set", "kind": "function", "doc": "

Sets arg_key to value.

\n\n
Arguments:
\n\n
    \n
  • arg_key (str): name of the expression arg.
  • \n
  • value: value to set the arg to.
  • \n
\n", "signature": "(self, arg_key: str, value: Any) -> None:", "funcdef": "def"}, "sqlglot.expressions.Expression.depth": {"fullname": "sqlglot.expressions.Expression.depth", "modulename": "sqlglot.expressions", "qualname": "Expression.depth", "kind": "variable", "doc": "

Returns the depth of this tree.

\n", "annotation": ": int"}, "sqlglot.expressions.Expression.iter_expressions": {"fullname": "sqlglot.expressions.Expression.iter_expressions", "modulename": "sqlglot.expressions", "qualname": "Expression.iter_expressions", "kind": "function", "doc": "

Yields the key and expression for all arguments, exploding list args.

\n", "signature": "(self) -> Iterator[Tuple[str, sqlglot.expressions.Expression]]:", "funcdef": "def"}, "sqlglot.expressions.Expression.find": {"fullname": "sqlglot.expressions.Expression.find", "modulename": "sqlglot.expressions", "qualname": "Expression.find", "kind": "function", "doc": "

Returns the first node in this tree which matches at least one of\nthe specified types.

\n\n
Arguments:
\n\n
    \n
  • expression_types: the expression type(s) to match.
  • \n
  • bfs: whether to search the AST using the BFS algorithm (DFS is used if false).
  • \n
\n\n
Returns:
\n\n
\n

The node which matches the criteria or None if no such node was found.

\n
\n", "signature": "(self, *expression_types: Type[~E], bfs: bool = True) -> Optional[~E]:", "funcdef": "def"}, "sqlglot.expressions.Expression.find_all": {"fullname": "sqlglot.expressions.Expression.find_all", "modulename": "sqlglot.expressions", "qualname": "Expression.find_all", "kind": "function", "doc": "

Returns a generator object which visits all nodes in this tree and only\nyields those that match at least one of the specified expression types.

\n\n
Arguments:
\n\n
    \n
  • expression_types: the expression type(s) to match.
  • \n
  • bfs: whether to search the AST using the BFS algorithm (DFS is used if false).
  • \n
\n\n
Returns:
\n\n
\n

The generator object.

\n
\n", "signature": "(self, *expression_types: Type[~E], bfs: bool = True) -> Iterator[~E]:", "funcdef": "def"}, "sqlglot.expressions.Expression.find_ancestor": {"fullname": "sqlglot.expressions.Expression.find_ancestor", "modulename": "sqlglot.expressions", "qualname": "Expression.find_ancestor", "kind": "function", "doc": "

Returns a nearest parent matching expression_types.

\n\n
Arguments:
\n\n
    \n
  • expression_types: the expression type(s) to match.
  • \n
\n\n
Returns:
\n\n
\n

The parent node.

\n
\n", "signature": "(self, *expression_types: Type[~E]) -> Optional[~E]:", "funcdef": "def"}, "sqlglot.expressions.Expression.parent_select": {"fullname": "sqlglot.expressions.Expression.parent_select", "modulename": "sqlglot.expressions", "qualname": "Expression.parent_select", "kind": "variable", "doc": "

Returns the parent select statement.

\n", "annotation": ": Optional[sqlglot.expressions.Select]"}, "sqlglot.expressions.Expression.same_parent": {"fullname": "sqlglot.expressions.Expression.same_parent", "modulename": "sqlglot.expressions", "qualname": "Expression.same_parent", "kind": "variable", "doc": "

Returns if the parent is the same class as itself.

\n", "annotation": ": bool"}, "sqlglot.expressions.Expression.root": {"fullname": "sqlglot.expressions.Expression.root", "modulename": "sqlglot.expressions", "qualname": "Expression.root", "kind": "function", "doc": "

Returns the root expression of this tree.

\n", "signature": "(self) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.expressions.Expression.walk": {"fullname": "sqlglot.expressions.Expression.walk", "modulename": "sqlglot.expressions", "qualname": "Expression.walk", "kind": "function", "doc": "

Returns a generator object which visits all nodes in this tree.

\n\n
Arguments:
\n\n
    \n
  • bfs (bool): if set to True the BFS traversal order will be applied,\notherwise the DFS traversal will be used instead.
  • \n
  • prune ((node, parent, arg_key) -> bool): callable that returns True if\nthe generator should stop traversing this branch of the tree.
  • \n
\n\n
Returns:
\n\n
\n

the generator object.

\n
\n", "signature": "(self, bfs=True, prune=None):", "funcdef": "def"}, "sqlglot.expressions.Expression.dfs": {"fullname": "sqlglot.expressions.Expression.dfs", "modulename": "sqlglot.expressions", "qualname": "Expression.dfs", "kind": "function", "doc": "

Returns a generator object which visits all nodes in this tree in\nthe DFS (Depth-first) order.

\n\n
Returns:
\n\n
\n

The generator object.

\n
\n", "signature": "(self, parent=None, key=None, prune=None):", "funcdef": "def"}, "sqlglot.expressions.Expression.bfs": {"fullname": "sqlglot.expressions.Expression.bfs", "modulename": "sqlglot.expressions", "qualname": "Expression.bfs", "kind": "function", "doc": "

Returns a generator object which visits all nodes in this tree in\nthe BFS (Breadth-first) order.

\n\n
Returns:
\n\n
\n

The generator object.

\n
\n", "signature": "(self, prune=None):", "funcdef": "def"}, "sqlglot.expressions.Expression.unnest": {"fullname": "sqlglot.expressions.Expression.unnest", "modulename": "sqlglot.expressions", "qualname": "Expression.unnest", "kind": "function", "doc": "

Returns the first non parenthesis child or self.

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.expressions.Expression.unalias": {"fullname": "sqlglot.expressions.Expression.unalias", "modulename": "sqlglot.expressions", "qualname": "Expression.unalias", "kind": "function", "doc": "

Returns the inner expression if this is an Alias.

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.expressions.Expression.unnest_operands": {"fullname": "sqlglot.expressions.Expression.unnest_operands", "modulename": "sqlglot.expressions", "qualname": "Expression.unnest_operands", "kind": "function", "doc": "

Returns unnested operands as a tuple.

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.expressions.Expression.flatten": {"fullname": "sqlglot.expressions.Expression.flatten", "modulename": "sqlglot.expressions", "qualname": "Expression.flatten", "kind": "function", "doc": "

Returns a generator which yields child nodes who's parents are the same class.

\n\n

A AND B AND C -> [A, B, C]

\n", "signature": "(self, unnest=True):", "funcdef": "def"}, "sqlglot.expressions.Expression.sql": {"fullname": "sqlglot.expressions.Expression.sql", "modulename": "sqlglot.expressions", "qualname": "Expression.sql", "kind": "function", "doc": "

Returns SQL string representation of this tree.

\n\n
Arguments:
\n\n
    \n
  • dialect: the dialect of the output SQL string (eg. \"spark\", \"hive\", \"presto\", \"mysql\").
  • \n
  • opts: other sqlglot.generator.Generator options.
  • \n
\n\n
Returns:
\n\n
\n

The SQL string.

\n
\n", "signature": "(\tself,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> str:", "funcdef": "def"}, "sqlglot.expressions.Expression.transform": {"fullname": "sqlglot.expressions.Expression.transform", "modulename": "sqlglot.expressions", "qualname": "Expression.transform", "kind": "function", "doc": "

Recursively visits all tree nodes (excluding already transformed ones)\nand applies the given transformation function to each node.

\n\n
Arguments:
\n\n
    \n
  • fun (function): a function which takes a node as an argument and returns a\nnew transformed node or the same node without modifications. If the function\nreturns None, then the corresponding node will be removed from the syntax tree.
  • \n
  • copy (bool): if set to True a new tree instance is constructed, otherwise the tree is\nmodified in place.
  • \n
\n\n
Returns:
\n\n
\n

The transformed tree.

\n
\n", "signature": "(self, fun, *args, copy=True, **kwargs):", "funcdef": "def"}, "sqlglot.expressions.Expression.replace": {"fullname": "sqlglot.expressions.Expression.replace", "modulename": "sqlglot.expressions", "qualname": "Expression.replace", "kind": "function", "doc": "

Swap out this expression with a new expression.

\n\n

For example::

\n\n
>>> tree = Select().select(\"x\").from_(\"tbl\")\n>>> tree.find(Column).replace(Column(this=\"y\"))\n(COLUMN this: y)\n>>> tree.sql()\n'SELECT y FROM tbl'\n
\n\n
Arguments:
\n\n
    \n
  • expression: new node
  • \n
\n\n
Returns:
\n\n
\n

The new expression or expressions.

\n
\n", "signature": "(self, expression):", "funcdef": "def"}, "sqlglot.expressions.Expression.pop": {"fullname": "sqlglot.expressions.Expression.pop", "modulename": "sqlglot.expressions", "qualname": "Expression.pop", "kind": "function", "doc": "

Remove this expression from its AST.

\n\n
Returns:
\n\n
\n

The popped expression.

\n
\n", "signature": "(self: ~E) -> ~E:", "funcdef": "def"}, "sqlglot.expressions.Expression.assert_is": {"fullname": "sqlglot.expressions.Expression.assert_is", "modulename": "sqlglot.expressions", "qualname": "Expression.assert_is", "kind": "function", "doc": "

Assert that this Expression is an instance of type_.

\n\n

If it is NOT an instance of type_, this raises an assertion error.\nOtherwise, this returns this expression.

\n\n
Examples:
\n\n
\n

This is useful for type security in chained expressions:

\n \n
\n
>>> import sqlglot\n>>> sqlglot.parse_one("SELECT x from y").assert_is(Select).select("z").sql()\n'SELECT x, z FROM y'\n
\n
\n
\n", "signature": "(self, type_: Type[~E]) -> ~E:", "funcdef": "def"}, "sqlglot.expressions.Expression.error_messages": {"fullname": "sqlglot.expressions.Expression.error_messages", "modulename": "sqlglot.expressions", "qualname": "Expression.error_messages", "kind": "function", "doc": "

Checks if this expression is valid (e.g. all mandatory args are set).

\n\n
Arguments:
\n\n
    \n
  • args: a sequence of values that were used to instantiate a Func expression. This is used\nto check that the provided arguments don't exceed the function argument limit.
  • \n
\n\n
Returns:
\n\n
\n

A list of error messages for all possible errors that were found.

\n
\n", "signature": "(self, args: Optional[Sequence] = None) -> List[str]:", "funcdef": "def"}, "sqlglot.expressions.Expression.dump": {"fullname": "sqlglot.expressions.Expression.dump", "modulename": "sqlglot.expressions", "qualname": "Expression.dump", "kind": "function", "doc": "

Dump this Expression to a JSON-serializable dict.

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.expressions.Expression.load": {"fullname": "sqlglot.expressions.Expression.load", "modulename": "sqlglot.expressions", "qualname": "Expression.load", "kind": "function", "doc": "

Load a dict (as returned by Expression.dump) into an Expression instance.

\n", "signature": "(cls, obj):", "funcdef": "def"}, "sqlglot.expressions.Condition": {"fullname": "sqlglot.expressions.Condition", "modulename": "sqlglot.expressions", "qualname": "Condition", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Condition.and_": {"fullname": "sqlglot.expressions.Condition.and_", "modulename": "sqlglot.expressions", "qualname": "Condition.and_", "kind": "function", "doc": "

AND this condition with one or multiple expressions.

\n\n
Example:
\n\n
\n
\n
>>> condition("x=1").and_("y=1").sql()\n'x = 1 AND y = 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: whether or not to copy the involved expressions (only applies to Expressions).
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The new And condition.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Condition:", "funcdef": "def"}, "sqlglot.expressions.Condition.or_": {"fullname": "sqlglot.expressions.Condition.or_", "modulename": "sqlglot.expressions", "qualname": "Condition.or_", "kind": "function", "doc": "

OR this condition with one or multiple expressions.

\n\n
Example:
\n\n
\n
\n
>>> condition("x=1").or_("y=1").sql()\n'x = 1 OR y = 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: whether or not to copy the involved expressions (only applies to Expressions).
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The new Or condition.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Condition:", "funcdef": "def"}, "sqlglot.expressions.Condition.not_": {"fullname": "sqlglot.expressions.Condition.not_", "modulename": "sqlglot.expressions", "qualname": "Condition.not_", "kind": "function", "doc": "

Wrap this condition with NOT.

\n\n
Example:
\n\n
\n
\n
>>> condition("x=1").not_().sql()\n'NOT x = 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • copy: whether or not to copy this object.
  • \n
\n\n
Returns:
\n\n
\n

The new Not instance.

\n
\n", "signature": "(self, copy: bool = True):", "funcdef": "def"}, "sqlglot.expressions.Condition.as_": {"fullname": "sqlglot.expressions.Condition.as_", "modulename": "sqlglot.expressions", "qualname": "Condition.as_", "kind": "function", "doc": "

\n", "signature": "(\tself,\talias: str | sqlglot.expressions.Identifier,\tquoted: Optional[bool] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Alias:", "funcdef": "def"}, "sqlglot.expressions.Condition.isin": {"fullname": "sqlglot.expressions.Condition.isin", "modulename": "sqlglot.expressions", "qualname": "Condition.isin", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*expressions: Any,\tquery: Union[str, sqlglot.expressions.Expression, NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.In:", "funcdef": "def"}, "sqlglot.expressions.Condition.between": {"fullname": "sqlglot.expressions.Condition.between", "modulename": "sqlglot.expressions", "qualname": "Condition.between", "kind": "function", "doc": "

\n", "signature": "(\tself,\tlow: Any,\thigh: Any,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Between:", "funcdef": "def"}, "sqlglot.expressions.Condition.is_": {"fullname": "sqlglot.expressions.Condition.is_", "modulename": "sqlglot.expressions", "qualname": "Condition.is_", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: Union[str, sqlglot.expressions.Expression]) -> sqlglot.expressions.Is:", "funcdef": "def"}, "sqlglot.expressions.Condition.like": {"fullname": "sqlglot.expressions.Condition.like", "modulename": "sqlglot.expressions", "qualname": "Condition.like", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: Union[str, sqlglot.expressions.Expression]) -> sqlglot.expressions.Like:", "funcdef": "def"}, "sqlglot.expressions.Condition.ilike": {"fullname": "sqlglot.expressions.Condition.ilike", "modulename": "sqlglot.expressions", "qualname": "Condition.ilike", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: Union[str, sqlglot.expressions.Expression]) -> sqlglot.expressions.ILike:", "funcdef": "def"}, "sqlglot.expressions.Condition.eq": {"fullname": "sqlglot.expressions.Condition.eq", "modulename": "sqlglot.expressions", "qualname": "Condition.eq", "kind": "function", "doc": "

\n", "signature": "(self, other: Any) -> sqlglot.expressions.EQ:", "funcdef": "def"}, "sqlglot.expressions.Condition.neq": {"fullname": "sqlglot.expressions.Condition.neq", "modulename": "sqlglot.expressions", "qualname": "Condition.neq", "kind": "function", "doc": "

\n", "signature": "(self, other: Any) -> sqlglot.expressions.NEQ:", "funcdef": "def"}, "sqlglot.expressions.Condition.rlike": {"fullname": "sqlglot.expressions.Condition.rlike", "modulename": "sqlglot.expressions", "qualname": "Condition.rlike", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: Union[str, sqlglot.expressions.Expression]) -> sqlglot.expressions.RegexpLike:", "funcdef": "def"}, "sqlglot.expressions.Predicate": {"fullname": "sqlglot.expressions.Predicate", "modulename": "sqlglot.expressions", "qualname": "Predicate", "kind": "class", "doc": "

Relationships like x = y, x > 1, x >= y.

\n", "bases": "Condition"}, "sqlglot.expressions.DerivedTable": {"fullname": "sqlglot.expressions.DerivedTable", "modulename": "sqlglot.expressions", "qualname": "DerivedTable", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Unionable": {"fullname": "sqlglot.expressions.Unionable", "modulename": "sqlglot.expressions", "qualname": "Unionable", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Unionable.union": {"fullname": "sqlglot.expressions.Unionable.union", "modulename": "sqlglot.expressions", "qualname": "Unionable.union", "kind": "function", "doc": "

Builds a UNION expression.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sqlglot.parse_one("SELECT * FROM foo").union("SELECT * FROM bla").sql()\n'SELECT * FROM foo UNION SELECT * FROM bla'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code string.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • distinct: set the DISTINCT flag if and only if this is true.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The new Union expression.

\n
\n", "signature": "(\tself,\texpression: Union[str, sqlglot.expressions.Expression],\tdistinct: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> sqlglot.expressions.Unionable:", "funcdef": "def"}, "sqlglot.expressions.Unionable.intersect": {"fullname": "sqlglot.expressions.Unionable.intersect", "modulename": "sqlglot.expressions", "qualname": "Unionable.intersect", "kind": "function", "doc": "

Builds an INTERSECT expression.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sqlglot.parse_one("SELECT * FROM foo").intersect("SELECT * FROM bla").sql()\n'SELECT * FROM foo INTERSECT SELECT * FROM bla'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code string.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • distinct: set the DISTINCT flag if and only if this is true.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The new Intersect expression.

\n
\n", "signature": "(\tself,\texpression: Union[str, sqlglot.expressions.Expression],\tdistinct: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> sqlglot.expressions.Unionable:", "funcdef": "def"}, "sqlglot.expressions.Unionable.except_": {"fullname": "sqlglot.expressions.Unionable.except_", "modulename": "sqlglot.expressions", "qualname": "Unionable.except_", "kind": "function", "doc": "

Builds an EXCEPT expression.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sqlglot.parse_one("SELECT * FROM foo").except_("SELECT * FROM bla").sql()\n'SELECT * FROM foo EXCEPT SELECT * FROM bla'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code string.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • distinct: set the DISTINCT flag if and only if this is true.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The new Except expression.

\n
\n", "signature": "(\tself,\texpression: Union[str, sqlglot.expressions.Expression],\tdistinct: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> sqlglot.expressions.Unionable:", "funcdef": "def"}, "sqlglot.expressions.UDTF": {"fullname": "sqlglot.expressions.UDTF", "modulename": "sqlglot.expressions", "qualname": "UDTF", "kind": "class", "doc": "

\n", "bases": "DerivedTable, Unionable"}, "sqlglot.expressions.Cache": {"fullname": "sqlglot.expressions.Cache", "modulename": "sqlglot.expressions", "qualname": "Cache", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Uncache": {"fullname": "sqlglot.expressions.Uncache", "modulename": "sqlglot.expressions", "qualname": "Uncache", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Create": {"fullname": "sqlglot.expressions.Create", "modulename": "sqlglot.expressions", "qualname": "Create", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Clone": {"fullname": "sqlglot.expressions.Clone", "modulename": "sqlglot.expressions", "qualname": "Clone", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Describe": {"fullname": "sqlglot.expressions.Describe", "modulename": "sqlglot.expressions", "qualname": "Describe", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Pragma": {"fullname": "sqlglot.expressions.Pragma", "modulename": "sqlglot.expressions", "qualname": "Pragma", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Set": {"fullname": "sqlglot.expressions.Set", "modulename": "sqlglot.expressions", "qualname": "Set", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.SetItem": {"fullname": "sqlglot.expressions.SetItem", "modulename": "sqlglot.expressions", "qualname": "SetItem", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Show": {"fullname": "sqlglot.expressions.Show", "modulename": "sqlglot.expressions", "qualname": "Show", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.UserDefinedFunction": {"fullname": "sqlglot.expressions.UserDefinedFunction", "modulename": "sqlglot.expressions", "qualname": "UserDefinedFunction", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.CharacterSet": {"fullname": "sqlglot.expressions.CharacterSet", "modulename": "sqlglot.expressions", "qualname": "CharacterSet", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.With": {"fullname": "sqlglot.expressions.With", "modulename": "sqlglot.expressions", "qualname": "With", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.WithinGroup": {"fullname": "sqlglot.expressions.WithinGroup", "modulename": "sqlglot.expressions", "qualname": "WithinGroup", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.CTE": {"fullname": "sqlglot.expressions.CTE", "modulename": "sqlglot.expressions", "qualname": "CTE", "kind": "class", "doc": "

\n", "bases": "DerivedTable"}, "sqlglot.expressions.TableAlias": {"fullname": "sqlglot.expressions.TableAlias", "modulename": "sqlglot.expressions", "qualname": "TableAlias", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.BitString": {"fullname": "sqlglot.expressions.BitString", "modulename": "sqlglot.expressions", "qualname": "BitString", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.HexString": {"fullname": "sqlglot.expressions.HexString", "modulename": "sqlglot.expressions", "qualname": "HexString", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.ByteString": {"fullname": "sqlglot.expressions.ByteString", "modulename": "sqlglot.expressions", "qualname": "ByteString", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.RawString": {"fullname": "sqlglot.expressions.RawString", "modulename": "sqlglot.expressions", "qualname": "RawString", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.Column": {"fullname": "sqlglot.expressions.Column", "modulename": "sqlglot.expressions", "qualname": "Column", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.Column.output_name": {"fullname": "sqlglot.expressions.Column.output_name", "modulename": "sqlglot.expressions", "qualname": "Column.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n", "annotation": ": str"}, "sqlglot.expressions.Column.parts": {"fullname": "sqlglot.expressions.Column.parts", "modulename": "sqlglot.expressions", "qualname": "Column.parts", "kind": "variable", "doc": "

Return the parts of a column in order catalog, db, table, name.

\n", "annotation": ": List[sqlglot.expressions.Identifier]"}, "sqlglot.expressions.Column.to_dot": {"fullname": "sqlglot.expressions.Column.to_dot", "modulename": "sqlglot.expressions", "qualname": "Column.to_dot", "kind": "function", "doc": "

Converts the column into a dot expression.

\n", "signature": "(self) -> sqlglot.expressions.Dot:", "funcdef": "def"}, "sqlglot.expressions.ColumnPosition": {"fullname": "sqlglot.expressions.ColumnPosition", "modulename": "sqlglot.expressions", "qualname": "ColumnPosition", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.ColumnDef": {"fullname": "sqlglot.expressions.ColumnDef", "modulename": "sqlglot.expressions", "qualname": "ColumnDef", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.AlterColumn": {"fullname": "sqlglot.expressions.AlterColumn", "modulename": "sqlglot.expressions", "qualname": "AlterColumn", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.RenameTable": {"fullname": "sqlglot.expressions.RenameTable", "modulename": "sqlglot.expressions", "qualname": "RenameTable", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.SetTag": {"fullname": "sqlglot.expressions.SetTag", "modulename": "sqlglot.expressions", "qualname": "SetTag", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Comment": {"fullname": "sqlglot.expressions.Comment", "modulename": "sqlglot.expressions", "qualname": "Comment", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.MergeTreeTTLAction": {"fullname": "sqlglot.expressions.MergeTreeTTLAction", "modulename": "sqlglot.expressions", "qualname": "MergeTreeTTLAction", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.MergeTreeTTL": {"fullname": "sqlglot.expressions.MergeTreeTTL", "modulename": "sqlglot.expressions", "qualname": "MergeTreeTTL", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.ColumnConstraint": {"fullname": "sqlglot.expressions.ColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "ColumnConstraint", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.ColumnConstraintKind": {"fullname": "sqlglot.expressions.ColumnConstraintKind", "modulename": "sqlglot.expressions", "qualname": "ColumnConstraintKind", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.AutoIncrementColumnConstraint": {"fullname": "sqlglot.expressions.AutoIncrementColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "AutoIncrementColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.CaseSpecificColumnConstraint": {"fullname": "sqlglot.expressions.CaseSpecificColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "CaseSpecificColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.CharacterSetColumnConstraint": {"fullname": "sqlglot.expressions.CharacterSetColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "CharacterSetColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.CheckColumnConstraint": {"fullname": "sqlglot.expressions.CheckColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "CheckColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.CollateColumnConstraint": {"fullname": "sqlglot.expressions.CollateColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "CollateColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.CommentColumnConstraint": {"fullname": "sqlglot.expressions.CommentColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "CommentColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.CompressColumnConstraint": {"fullname": "sqlglot.expressions.CompressColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "CompressColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.DateFormatColumnConstraint": {"fullname": "sqlglot.expressions.DateFormatColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "DateFormatColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.DefaultColumnConstraint": {"fullname": "sqlglot.expressions.DefaultColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "DefaultColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.EncodeColumnConstraint": {"fullname": "sqlglot.expressions.EncodeColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "EncodeColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.GeneratedAsIdentityColumnConstraint": {"fullname": "sqlglot.expressions.GeneratedAsIdentityColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "GeneratedAsIdentityColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.InlineLengthColumnConstraint": {"fullname": "sqlglot.expressions.InlineLengthColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "InlineLengthColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.NotNullColumnConstraint": {"fullname": "sqlglot.expressions.NotNullColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "NotNullColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.OnUpdateColumnConstraint": {"fullname": "sqlglot.expressions.OnUpdateColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "OnUpdateColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.PrimaryKeyColumnConstraint": {"fullname": "sqlglot.expressions.PrimaryKeyColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "PrimaryKeyColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.TitleColumnConstraint": {"fullname": "sqlglot.expressions.TitleColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "TitleColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.UniqueColumnConstraint": {"fullname": "sqlglot.expressions.UniqueColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "UniqueColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.UppercaseColumnConstraint": {"fullname": "sqlglot.expressions.UppercaseColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "UppercaseColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.PathColumnConstraint": {"fullname": "sqlglot.expressions.PathColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "PathColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.Constraint": {"fullname": "sqlglot.expressions.Constraint", "modulename": "sqlglot.expressions", "qualname": "Constraint", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Delete": {"fullname": "sqlglot.expressions.Delete", "modulename": "sqlglot.expressions", "qualname": "Delete", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Delete.delete": {"fullname": "sqlglot.expressions.Delete.delete", "modulename": "sqlglot.expressions", "qualname": "Delete.delete", "kind": "function", "doc": "

Create a DELETE expression or replace the table on an existing DELETE expression.

\n\n
Example:
\n\n
\n
\n
>>> delete("tbl").sql()\n'DELETE FROM tbl'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • table: the table from which to delete.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Delete: the modified expression.

\n
\n", "signature": "(\tself,\ttable: Union[str, sqlglot.expressions.Expression],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Delete:", "funcdef": "def"}, "sqlglot.expressions.Delete.where": {"fullname": "sqlglot.expressions.Delete.where", "modulename": "sqlglot.expressions", "qualname": "Delete.where", "kind": "function", "doc": "

Append to or set the WHERE expressions.

\n\n
Example:
\n\n
\n
\n
>>> delete("tbl").where("x = 'a' OR x < 'b'").sql()\n"DELETE FROM tbl WHERE x = 'a' OR x < 'b'"\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.\nMultiple expressions are combined with an AND operator.
  • \n
  • append: if True, AND the new expressions to any existing expression.\nOtherwise, this resets the expression.
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Delete: the modified expression.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Delete:", "funcdef": "def"}, "sqlglot.expressions.Delete.returning": {"fullname": "sqlglot.expressions.Delete.returning", "modulename": "sqlglot.expressions", "qualname": "Delete.returning", "kind": "function", "doc": "

Set the RETURNING expression. Not supported by all dialects.

\n\n
Example:
\n\n
\n
\n
>>> delete("tbl").returning("*", dialect="postgres").sql()\n'DELETE FROM tbl RETURNING *'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Delete: the modified expression.

\n
\n", "signature": "(\tself,\texpression: Union[str, sqlglot.expressions.Expression],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Delete:", "funcdef": "def"}, "sqlglot.expressions.Drop": {"fullname": "sqlglot.expressions.Drop", "modulename": "sqlglot.expressions", "qualname": "Drop", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Filter": {"fullname": "sqlglot.expressions.Filter", "modulename": "sqlglot.expressions", "qualname": "Filter", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Check": {"fullname": "sqlglot.expressions.Check", "modulename": "sqlglot.expressions", "qualname": "Check", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Directory": {"fullname": "sqlglot.expressions.Directory", "modulename": "sqlglot.expressions", "qualname": "Directory", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.ForeignKey": {"fullname": "sqlglot.expressions.ForeignKey", "modulename": "sqlglot.expressions", "qualname": "ForeignKey", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.PrimaryKey": {"fullname": "sqlglot.expressions.PrimaryKey", "modulename": "sqlglot.expressions", "qualname": "PrimaryKey", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Into": {"fullname": "sqlglot.expressions.Into", "modulename": "sqlglot.expressions", "qualname": "Into", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.From": {"fullname": "sqlglot.expressions.From", "modulename": "sqlglot.expressions", "qualname": "From", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Having": {"fullname": "sqlglot.expressions.Having", "modulename": "sqlglot.expressions", "qualname": "Having", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Hint": {"fullname": "sqlglot.expressions.Hint", "modulename": "sqlglot.expressions", "qualname": "Hint", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.JoinHint": {"fullname": "sqlglot.expressions.JoinHint", "modulename": "sqlglot.expressions", "qualname": "JoinHint", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Identifier": {"fullname": "sqlglot.expressions.Identifier", "modulename": "sqlglot.expressions", "qualname": "Identifier", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Identifier.output_name": {"fullname": "sqlglot.expressions.Identifier.output_name", "modulename": "sqlglot.expressions", "qualname": "Identifier.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n", "annotation": ": str"}, "sqlglot.expressions.Index": {"fullname": "sqlglot.expressions.Index", "modulename": "sqlglot.expressions", "qualname": "Index", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Insert": {"fullname": "sqlglot.expressions.Insert", "modulename": "sqlglot.expressions", "qualname": "Insert", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Insert.with_": {"fullname": "sqlglot.expressions.Insert.with_", "modulename": "sqlglot.expressions", "qualname": "Insert.with_", "kind": "function", "doc": "

Append to or set the common table expressions.

\n\n
Example:
\n\n
\n
\n
>>> insert("SELECT x FROM cte", "t").with_("cte", as_="SELECT * FROM tbl").sql()\n'WITH cte AS (SELECT * FROM tbl) INSERT INTO t SELECT x FROM cte'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • alias: the SQL code string to parse as the table name.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • as_: the SQL code string to parse as the table expression.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • recursive: set the RECURSIVE part of the expression. Defaults to False.
  • \n
  • append: if True, add to any existing expressions.\nOtherwise, this resets the expressions.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified expression.

\n
\n", "signature": "(\tself,\talias: Union[str, sqlglot.expressions.Expression],\tas_: Union[str, sqlglot.expressions.Expression],\trecursive: Optional[bool] = None,\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Insert:", "funcdef": "def"}, "sqlglot.expressions.OnConflict": {"fullname": "sqlglot.expressions.OnConflict", "modulename": "sqlglot.expressions", "qualname": "OnConflict", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Returning": {"fullname": "sqlglot.expressions.Returning", "modulename": "sqlglot.expressions", "qualname": "Returning", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Introducer": {"fullname": "sqlglot.expressions.Introducer", "modulename": "sqlglot.expressions", "qualname": "Introducer", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.National": {"fullname": "sqlglot.expressions.National", "modulename": "sqlglot.expressions", "qualname": "National", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.LoadData": {"fullname": "sqlglot.expressions.LoadData", "modulename": "sqlglot.expressions", "qualname": "LoadData", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Partition": {"fullname": "sqlglot.expressions.Partition", "modulename": "sqlglot.expressions", "qualname": "Partition", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Fetch": {"fullname": "sqlglot.expressions.Fetch", "modulename": "sqlglot.expressions", "qualname": "Fetch", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Group": {"fullname": "sqlglot.expressions.Group", "modulename": "sqlglot.expressions", "qualname": "Group", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Lambda": {"fullname": "sqlglot.expressions.Lambda", "modulename": "sqlglot.expressions", "qualname": "Lambda", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Limit": {"fullname": "sqlglot.expressions.Limit", "modulename": "sqlglot.expressions", "qualname": "Limit", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Literal": {"fullname": "sqlglot.expressions.Literal", "modulename": "sqlglot.expressions", "qualname": "Literal", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.Literal.number": {"fullname": "sqlglot.expressions.Literal.number", "modulename": "sqlglot.expressions", "qualname": "Literal.number", "kind": "function", "doc": "

\n", "signature": "(cls, number) -> sqlglot.expressions.Literal:", "funcdef": "def"}, "sqlglot.expressions.Literal.string": {"fullname": "sqlglot.expressions.Literal.string", "modulename": "sqlglot.expressions", "qualname": "Literal.string", "kind": "function", "doc": "

\n", "signature": "(cls, string) -> sqlglot.expressions.Literal:", "funcdef": "def"}, "sqlglot.expressions.Literal.output_name": {"fullname": "sqlglot.expressions.Literal.output_name", "modulename": "sqlglot.expressions", "qualname": "Literal.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n", "annotation": ": str"}, "sqlglot.expressions.Join": {"fullname": "sqlglot.expressions.Join", "modulename": "sqlglot.expressions", "qualname": "Join", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Join.on": {"fullname": "sqlglot.expressions.Join.on", "modulename": "sqlglot.expressions", "qualname": "Join.on", "kind": "function", "doc": "

Append to or set the ON expressions.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sqlglot.parse_one("JOIN x", into=Join).on("y = 1").sql()\n'JOIN x ON y = 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.\nMultiple expressions are combined with an AND operator.
  • \n
  • append: if True, AND the new expressions to any existing expression.\nOtherwise, this resets the expression.
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified Join expression.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Join:", "funcdef": "def"}, "sqlglot.expressions.Join.using": {"fullname": "sqlglot.expressions.Join.using", "modulename": "sqlglot.expressions", "qualname": "Join.using", "kind": "function", "doc": "

Append to or set the USING expressions.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sqlglot.parse_one("JOIN x", into=Join).using("foo", "bla").sql()\n'JOIN x USING (foo, bla)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • append: if True, concatenate the new expressions to the existing \"using\" list.\nOtherwise, this resets the expression.
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified Join expression.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Join:", "funcdef": "def"}, "sqlglot.expressions.Lateral": {"fullname": "sqlglot.expressions.Lateral", "modulename": "sqlglot.expressions", "qualname": "Lateral", "kind": "class", "doc": "

\n", "bases": "UDTF"}, "sqlglot.expressions.MatchRecognize": {"fullname": "sqlglot.expressions.MatchRecognize", "modulename": "sqlglot.expressions", "qualname": "MatchRecognize", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Final": {"fullname": "sqlglot.expressions.Final", "modulename": "sqlglot.expressions", "qualname": "Final", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Offset": {"fullname": "sqlglot.expressions.Offset", "modulename": "sqlglot.expressions", "qualname": "Offset", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Order": {"fullname": "sqlglot.expressions.Order", "modulename": "sqlglot.expressions", "qualname": "Order", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Cluster": {"fullname": "sqlglot.expressions.Cluster", "modulename": "sqlglot.expressions", "qualname": "Cluster", "kind": "class", "doc": "

\n", "bases": "Order"}, "sqlglot.expressions.Distribute": {"fullname": "sqlglot.expressions.Distribute", "modulename": "sqlglot.expressions", "qualname": "Distribute", "kind": "class", "doc": "

\n", "bases": "Order"}, "sqlglot.expressions.Sort": {"fullname": "sqlglot.expressions.Sort", "modulename": "sqlglot.expressions", "qualname": "Sort", "kind": "class", "doc": "

\n", "bases": "Order"}, "sqlglot.expressions.Ordered": {"fullname": "sqlglot.expressions.Ordered", "modulename": "sqlglot.expressions", "qualname": "Ordered", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Property": {"fullname": "sqlglot.expressions.Property", "modulename": "sqlglot.expressions", "qualname": "Property", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.AlgorithmProperty": {"fullname": "sqlglot.expressions.AlgorithmProperty", "modulename": "sqlglot.expressions", "qualname": "AlgorithmProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.AutoIncrementProperty": {"fullname": "sqlglot.expressions.AutoIncrementProperty", "modulename": "sqlglot.expressions", "qualname": "AutoIncrementProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.BlockCompressionProperty": {"fullname": "sqlglot.expressions.BlockCompressionProperty", "modulename": "sqlglot.expressions", "qualname": "BlockCompressionProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.CharacterSetProperty": {"fullname": "sqlglot.expressions.CharacterSetProperty", "modulename": "sqlglot.expressions", "qualname": "CharacterSetProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.ChecksumProperty": {"fullname": "sqlglot.expressions.ChecksumProperty", "modulename": "sqlglot.expressions", "qualname": "ChecksumProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.CollateProperty": {"fullname": "sqlglot.expressions.CollateProperty", "modulename": "sqlglot.expressions", "qualname": "CollateProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.DataBlocksizeProperty": {"fullname": "sqlglot.expressions.DataBlocksizeProperty", "modulename": "sqlglot.expressions", "qualname": "DataBlocksizeProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.DefinerProperty": {"fullname": "sqlglot.expressions.DefinerProperty", "modulename": "sqlglot.expressions", "qualname": "DefinerProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.DistKeyProperty": {"fullname": "sqlglot.expressions.DistKeyProperty", "modulename": "sqlglot.expressions", "qualname": "DistKeyProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.DistStyleProperty": {"fullname": "sqlglot.expressions.DistStyleProperty", "modulename": "sqlglot.expressions", "qualname": "DistStyleProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.EngineProperty": {"fullname": "sqlglot.expressions.EngineProperty", "modulename": "sqlglot.expressions", "qualname": "EngineProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.ToTableProperty": {"fullname": "sqlglot.expressions.ToTableProperty", "modulename": "sqlglot.expressions", "qualname": "ToTableProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.ExecuteAsProperty": {"fullname": "sqlglot.expressions.ExecuteAsProperty", "modulename": "sqlglot.expressions", "qualname": "ExecuteAsProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.ExternalProperty": {"fullname": "sqlglot.expressions.ExternalProperty", "modulename": "sqlglot.expressions", "qualname": "ExternalProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.FallbackProperty": {"fullname": "sqlglot.expressions.FallbackProperty", "modulename": "sqlglot.expressions", "qualname": "FallbackProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.FileFormatProperty": {"fullname": "sqlglot.expressions.FileFormatProperty", "modulename": "sqlglot.expressions", "qualname": "FileFormatProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.FreespaceProperty": {"fullname": "sqlglot.expressions.FreespaceProperty", "modulename": "sqlglot.expressions", "qualname": "FreespaceProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.InputOutputFormat": {"fullname": "sqlglot.expressions.InputOutputFormat", "modulename": "sqlglot.expressions", "qualname": "InputOutputFormat", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.IsolatedLoadingProperty": {"fullname": "sqlglot.expressions.IsolatedLoadingProperty", "modulename": "sqlglot.expressions", "qualname": "IsolatedLoadingProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.JournalProperty": {"fullname": "sqlglot.expressions.JournalProperty", "modulename": "sqlglot.expressions", "qualname": "JournalProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.LanguageProperty": {"fullname": "sqlglot.expressions.LanguageProperty", "modulename": "sqlglot.expressions", "qualname": "LanguageProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.DictProperty": {"fullname": "sqlglot.expressions.DictProperty", "modulename": "sqlglot.expressions", "qualname": "DictProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.DictSubProperty": {"fullname": "sqlglot.expressions.DictSubProperty", "modulename": "sqlglot.expressions", "qualname": "DictSubProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.DictRange": {"fullname": "sqlglot.expressions.DictRange", "modulename": "sqlglot.expressions", "qualname": "DictRange", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.OnCluster": {"fullname": "sqlglot.expressions.OnCluster", "modulename": "sqlglot.expressions", "qualname": "OnCluster", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.LikeProperty": {"fullname": "sqlglot.expressions.LikeProperty", "modulename": "sqlglot.expressions", "qualname": "LikeProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.LocationProperty": {"fullname": "sqlglot.expressions.LocationProperty", "modulename": "sqlglot.expressions", "qualname": "LocationProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.LockingProperty": {"fullname": "sqlglot.expressions.LockingProperty", "modulename": "sqlglot.expressions", "qualname": "LockingProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.LogProperty": {"fullname": "sqlglot.expressions.LogProperty", "modulename": "sqlglot.expressions", "qualname": "LogProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.MaterializedProperty": {"fullname": "sqlglot.expressions.MaterializedProperty", "modulename": "sqlglot.expressions", "qualname": "MaterializedProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.MergeBlockRatioProperty": {"fullname": "sqlglot.expressions.MergeBlockRatioProperty", "modulename": "sqlglot.expressions", "qualname": "MergeBlockRatioProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.NoPrimaryIndexProperty": {"fullname": "sqlglot.expressions.NoPrimaryIndexProperty", "modulename": "sqlglot.expressions", "qualname": "NoPrimaryIndexProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.OnCommitProperty": {"fullname": "sqlglot.expressions.OnCommitProperty", "modulename": "sqlglot.expressions", "qualname": "OnCommitProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.PartitionedByProperty": {"fullname": "sqlglot.expressions.PartitionedByProperty", "modulename": "sqlglot.expressions", "qualname": "PartitionedByProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.ReturnsProperty": {"fullname": "sqlglot.expressions.ReturnsProperty", "modulename": "sqlglot.expressions", "qualname": "ReturnsProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.RowFormatProperty": {"fullname": "sqlglot.expressions.RowFormatProperty", "modulename": "sqlglot.expressions", "qualname": "RowFormatProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.RowFormatDelimitedProperty": {"fullname": "sqlglot.expressions.RowFormatDelimitedProperty", "modulename": "sqlglot.expressions", "qualname": "RowFormatDelimitedProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.RowFormatSerdeProperty": {"fullname": "sqlglot.expressions.RowFormatSerdeProperty", "modulename": "sqlglot.expressions", "qualname": "RowFormatSerdeProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.SchemaCommentProperty": {"fullname": "sqlglot.expressions.SchemaCommentProperty", "modulename": "sqlglot.expressions", "qualname": "SchemaCommentProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.SerdeProperties": {"fullname": "sqlglot.expressions.SerdeProperties", "modulename": "sqlglot.expressions", "qualname": "SerdeProperties", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.SetProperty": {"fullname": "sqlglot.expressions.SetProperty", "modulename": "sqlglot.expressions", "qualname": "SetProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.SettingsProperty": {"fullname": "sqlglot.expressions.SettingsProperty", "modulename": "sqlglot.expressions", "qualname": "SettingsProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.SortKeyProperty": {"fullname": "sqlglot.expressions.SortKeyProperty", "modulename": "sqlglot.expressions", "qualname": "SortKeyProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.SqlSecurityProperty": {"fullname": "sqlglot.expressions.SqlSecurityProperty", "modulename": "sqlglot.expressions", "qualname": "SqlSecurityProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.StabilityProperty": {"fullname": "sqlglot.expressions.StabilityProperty", "modulename": "sqlglot.expressions", "qualname": "StabilityProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.TemporaryProperty": {"fullname": "sqlglot.expressions.TemporaryProperty", "modulename": "sqlglot.expressions", "qualname": "TemporaryProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.TransientProperty": {"fullname": "sqlglot.expressions.TransientProperty", "modulename": "sqlglot.expressions", "qualname": "TransientProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.VolatileProperty": {"fullname": "sqlglot.expressions.VolatileProperty", "modulename": "sqlglot.expressions", "qualname": "VolatileProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.WithDataProperty": {"fullname": "sqlglot.expressions.WithDataProperty", "modulename": "sqlglot.expressions", "qualname": "WithDataProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.WithJournalTableProperty": {"fullname": "sqlglot.expressions.WithJournalTableProperty", "modulename": "sqlglot.expressions", "qualname": "WithJournalTableProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.Properties": {"fullname": "sqlglot.expressions.Properties", "modulename": "sqlglot.expressions", "qualname": "Properties", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Properties.Location": {"fullname": "sqlglot.expressions.Properties.Location", "modulename": "sqlglot.expressions", "qualname": "Properties.Location", "kind": "class", "doc": "

An enumeration.

\n", "bases": "sqlglot.helper.AutoName"}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"fullname": "sqlglot.expressions.Properties.Location.POST_CREATE", "modulename": "sqlglot.expressions", "qualname": "Properties.Location.POST_CREATE", "kind": "variable", "doc": "

\n", "default_value": "<Location.POST_CREATE: 'POST_CREATE'>"}, "sqlglot.expressions.Properties.Location.POST_NAME": {"fullname": "sqlglot.expressions.Properties.Location.POST_NAME", "modulename": "sqlglot.expressions", "qualname": "Properties.Location.POST_NAME", "kind": "variable", "doc": "

\n", "default_value": "<Location.POST_NAME: 'POST_NAME'>"}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"fullname": "sqlglot.expressions.Properties.Location.POST_SCHEMA", "modulename": "sqlglot.expressions", "qualname": "Properties.Location.POST_SCHEMA", "kind": "variable", "doc": "

\n", "default_value": "<Location.POST_SCHEMA: 'POST_SCHEMA'>"}, "sqlglot.expressions.Properties.Location.POST_WITH": {"fullname": "sqlglot.expressions.Properties.Location.POST_WITH", "modulename": "sqlglot.expressions", "qualname": "Properties.Location.POST_WITH", "kind": "variable", "doc": "

\n", "default_value": "<Location.POST_WITH: 'POST_WITH'>"}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"fullname": "sqlglot.expressions.Properties.Location.POST_ALIAS", "modulename": "sqlglot.expressions", "qualname": "Properties.Location.POST_ALIAS", "kind": "variable", "doc": "

\n", "default_value": "<Location.POST_ALIAS: 'POST_ALIAS'>"}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"fullname": "sqlglot.expressions.Properties.Location.POST_EXPRESSION", "modulename": "sqlglot.expressions", "qualname": "Properties.Location.POST_EXPRESSION", "kind": "variable", "doc": "

\n", "default_value": "<Location.POST_EXPRESSION: 'POST_EXPRESSION'>"}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"fullname": "sqlglot.expressions.Properties.Location.POST_INDEX", "modulename": "sqlglot.expressions", "qualname": "Properties.Location.POST_INDEX", "kind": "variable", "doc": "

\n", "default_value": "<Location.POST_INDEX: 'POST_INDEX'>"}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"fullname": "sqlglot.expressions.Properties.Location.UNSUPPORTED", "modulename": "sqlglot.expressions", "qualname": "Properties.Location.UNSUPPORTED", "kind": "variable", "doc": "

\n", "default_value": "<Location.UNSUPPORTED: 'UNSUPPORTED'>"}, "sqlglot.expressions.Properties.from_dict": {"fullname": "sqlglot.expressions.Properties.from_dict", "modulename": "sqlglot.expressions", "qualname": "Properties.from_dict", "kind": "function", "doc": "

\n", "signature": "(cls, properties_dict: Dict) -> sqlglot.expressions.Properties:", "funcdef": "def"}, "sqlglot.expressions.Qualify": {"fullname": "sqlglot.expressions.Qualify", "modulename": "sqlglot.expressions", "qualname": "Qualify", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Return": {"fullname": "sqlglot.expressions.Return", "modulename": "sqlglot.expressions", "qualname": "Return", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Reference": {"fullname": "sqlglot.expressions.Reference", "modulename": "sqlglot.expressions", "qualname": "Reference", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Tuple": {"fullname": "sqlglot.expressions.Tuple", "modulename": "sqlglot.expressions", "qualname": "Tuple", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Tuple.isin": {"fullname": "sqlglot.expressions.Tuple.isin", "modulename": "sqlglot.expressions", "qualname": "Tuple.isin", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*expressions: Any,\tquery: Union[str, sqlglot.expressions.Expression, NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.In:", "funcdef": "def"}, "sqlglot.expressions.Subqueryable": {"fullname": "sqlglot.expressions.Subqueryable", "modulename": "sqlglot.expressions", "qualname": "Subqueryable", "kind": "class", "doc": "

\n", "bases": "Unionable"}, "sqlglot.expressions.Subqueryable.subquery": {"fullname": "sqlglot.expressions.Subqueryable.subquery", "modulename": "sqlglot.expressions", "qualname": "Subqueryable.subquery", "kind": "function", "doc": "

Convert this expression to an aliased expression that can be used as a Subquery.

\n\n
Example:
\n\n
\n
\n
>>> subquery = Select().select("x").from_("tbl").subquery()\n>>> Select().select("x").from_(subquery).sql()\n'SELECT x FROM (SELECT x FROM tbl)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • alias (str | Identifier): an optional alias for the subquery
  • \n
  • copy (bool): if False, modify this expression instance in-place.
  • \n
\n\n
Returns:
\n\n
\n

Alias: the subquery

\n
\n", "signature": "(\tself,\talias: Union[str, sqlglot.expressions.Expression, NoneType] = None,\tcopy: bool = True) -> sqlglot.expressions.Subquery:", "funcdef": "def"}, "sqlglot.expressions.Subqueryable.limit": {"fullname": "sqlglot.expressions.Subqueryable.limit", "modulename": "sqlglot.expressions", "qualname": "Subqueryable.limit", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: Union[str, sqlglot.expressions.Expression, int],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Subqueryable.with_": {"fullname": "sqlglot.expressions.Subqueryable.with_", "modulename": "sqlglot.expressions", "qualname": "Subqueryable.with_", "kind": "function", "doc": "

Append to or set the common table expressions.

\n\n
Example:
\n\n
\n
\n
>>> Select().with_("tbl2", as_="SELECT * FROM tbl").select("x").from_("tbl2").sql()\n'WITH tbl2 AS (SELECT * FROM tbl) SELECT x FROM tbl2'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • alias: the SQL code string to parse as the table name.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • as_: the SQL code string to parse as the table expression.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • recursive: set the RECURSIVE part of the expression. Defaults to False.
  • \n
  • append: if True, add to any existing expressions.\nOtherwise, this resets the expressions.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified expression.

\n
\n", "signature": "(\tself,\talias: Union[str, sqlglot.expressions.Expression],\tas_: Union[str, sqlglot.expressions.Expression],\trecursive: Optional[bool] = None,\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Subqueryable:", "funcdef": "def"}, "sqlglot.expressions.Table": {"fullname": "sqlglot.expressions.Table", "modulename": "sqlglot.expressions", "qualname": "Table", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Table.parts": {"fullname": "sqlglot.expressions.Table.parts", "modulename": "sqlglot.expressions", "qualname": "Table.parts", "kind": "variable", "doc": "

Return the parts of a table in order catalog, db, table.

\n", "annotation": ": List[sqlglot.expressions.Identifier]"}, "sqlglot.expressions.SystemTime": {"fullname": "sqlglot.expressions.SystemTime", "modulename": "sqlglot.expressions", "qualname": "SystemTime", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Union": {"fullname": "sqlglot.expressions.Union", "modulename": "sqlglot.expressions", "qualname": "Union", "kind": "class", "doc": "

\n", "bases": "Subqueryable"}, "sqlglot.expressions.Union.limit": {"fullname": "sqlglot.expressions.Union.limit", "modulename": "sqlglot.expressions", "qualname": "Union.limit", "kind": "function", "doc": "

Set the LIMIT expression.

\n\n
Example:
\n\n
\n
\n
>>> select("1").union(select("1")).limit(1).sql()\n'SELECT * FROM (SELECT 1 UNION SELECT 1) AS _l_0 LIMIT 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code string to parse.\nThis can also be an integer.\nIf a Limit instance is passed, this is used as-is.\nIf another Expression instance is passed, it will be wrapped in a Limit.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The limited subqueryable.

\n
\n", "signature": "(\tself,\texpression: Union[str, sqlglot.expressions.Expression, int],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Union.select": {"fullname": "sqlglot.expressions.Union.select", "modulename": "sqlglot.expressions", "qualname": "Union.select", "kind": "function", "doc": "

Append to or set the SELECT of the union recursively.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("select a from x union select a from y union select a from z").select("b").sql()\n'SELECT a, b FROM x UNION SELECT a, b FROM y UNION SELECT a, b FROM z'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • append: if True, add to any existing expressions.\nOtherwise, this resets the expressions.
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Union: the modified expression.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Union:", "funcdef": "def"}, "sqlglot.expressions.Union.is_star": {"fullname": "sqlglot.expressions.Union.is_star", "modulename": "sqlglot.expressions", "qualname": "Union.is_star", "kind": "variable", "doc": "

Checks whether an expression is a star.

\n", "annotation": ": bool"}, "sqlglot.expressions.Except": {"fullname": "sqlglot.expressions.Except", "modulename": "sqlglot.expressions", "qualname": "Except", "kind": "class", "doc": "

\n", "bases": "Union"}, "sqlglot.expressions.Intersect": {"fullname": "sqlglot.expressions.Intersect", "modulename": "sqlglot.expressions", "qualname": "Intersect", "kind": "class", "doc": "

\n", "bases": "Union"}, "sqlglot.expressions.Unnest": {"fullname": "sqlglot.expressions.Unnest", "modulename": "sqlglot.expressions", "qualname": "Unnest", "kind": "class", "doc": "

\n", "bases": "UDTF"}, "sqlglot.expressions.Update": {"fullname": "sqlglot.expressions.Update", "modulename": "sqlglot.expressions", "qualname": "Update", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Values": {"fullname": "sqlglot.expressions.Values", "modulename": "sqlglot.expressions", "qualname": "Values", "kind": "class", "doc": "

\n", "bases": "UDTF"}, "sqlglot.expressions.Var": {"fullname": "sqlglot.expressions.Var", "modulename": "sqlglot.expressions", "qualname": "Var", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Schema": {"fullname": "sqlglot.expressions.Schema", "modulename": "sqlglot.expressions", "qualname": "Schema", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Lock": {"fullname": "sqlglot.expressions.Lock", "modulename": "sqlglot.expressions", "qualname": "Lock", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Select": {"fullname": "sqlglot.expressions.Select", "modulename": "sqlglot.expressions", "qualname": "Select", "kind": "class", "doc": "

\n", "bases": "Subqueryable"}, "sqlglot.expressions.Select.from_": {"fullname": "sqlglot.expressions.Select.from_", "modulename": "sqlglot.expressions", "qualname": "Select.from_", "kind": "function", "doc": "

Set the FROM expression.

\n\n
Example:
\n\n
\n
\n
>>> Select().from_("tbl").select("x").sql()\n'SELECT x FROM tbl'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression : the SQL code strings to parse.\nIf a From instance is passed, this is used as-is.\nIf another Expression instance is passed, it will be wrapped in a From.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified Select expression.

\n
\n", "signature": "(\tself,\texpression: Union[str, sqlglot.expressions.Expression],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.group_by": {"fullname": "sqlglot.expressions.Select.group_by", "modulename": "sqlglot.expressions", "qualname": "Select.group_by", "kind": "function", "doc": "

Set the GROUP BY expression.

\n\n
Example:
\n\n
\n
\n
>>> Select().from_("tbl").select("x", "COUNT(1)").group_by("x").sql()\n'SELECT x, COUNT(1) FROM tbl GROUP BY x'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf a Group instance is passed, this is used as-is.\nIf another Expression instance is passed, it will be wrapped in a Group.\nIf nothing is passed in then a group by is not applied to the expression
  • \n
  • append: if True, add to any existing expressions.\nOtherwise, this flattens all the Group expression into a single expression.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified Select expression.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.order_by": {"fullname": "sqlglot.expressions.Select.order_by", "modulename": "sqlglot.expressions", "qualname": "Select.order_by", "kind": "function", "doc": "

Set the ORDER BY expression.

\n\n
Example:
\n\n
\n
\n
>>> Select().from_("tbl").select("x").order_by("x DESC").sql()\n'SELECT x FROM tbl ORDER BY x DESC'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf a Group instance is passed, this is used as-is.\nIf another Expression instance is passed, it will be wrapped in a Order.
  • \n
  • append: if True, add to any existing expressions.\nOtherwise, this flattens all the Order expression into a single expression.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified Select expression.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.sort_by": {"fullname": "sqlglot.expressions.Select.sort_by", "modulename": "sqlglot.expressions", "qualname": "Select.sort_by", "kind": "function", "doc": "

Set the SORT BY expression.

\n\n
Example:
\n\n
\n
\n
>>> Select().from_("tbl").select("x").sort_by("x DESC").sql(dialect="hive")\n'SELECT x FROM tbl SORT BY x DESC'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf a Group instance is passed, this is used as-is.\nIf another Expression instance is passed, it will be wrapped in a SORT.
  • \n
  • append: if True, add to any existing expressions.\nOtherwise, this flattens all the Order expression into a single expression.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified Select expression.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.cluster_by": {"fullname": "sqlglot.expressions.Select.cluster_by", "modulename": "sqlglot.expressions", "qualname": "Select.cluster_by", "kind": "function", "doc": "

Set the CLUSTER BY expression.

\n\n
Example:
\n\n
\n
\n
>>> Select().from_("tbl").select("x").cluster_by("x DESC").sql(dialect="hive")\n'SELECT x FROM tbl CLUSTER BY x DESC'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf a Group instance is passed, this is used as-is.\nIf another Expression instance is passed, it will be wrapped in a Cluster.
  • \n
  • append: if True, add to any existing expressions.\nOtherwise, this flattens all the Order expression into a single expression.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified Select expression.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.limit": {"fullname": "sqlglot.expressions.Select.limit", "modulename": "sqlglot.expressions", "qualname": "Select.limit", "kind": "function", "doc": "

Set the LIMIT expression.

\n\n
Example:
\n\n
\n
\n
>>> Select().from_("tbl").select("x").limit(10).sql()\n'SELECT x FROM tbl LIMIT 10'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code string to parse.\nThis can also be an integer.\nIf a Limit instance is passed, this is used as-is.\nIf another Expression instance is passed, it will be wrapped in a Limit.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Select: the modified expression.

\n
\n", "signature": "(\tself,\texpression: Union[str, sqlglot.expressions.Expression, int],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.offset": {"fullname": "sqlglot.expressions.Select.offset", "modulename": "sqlglot.expressions", "qualname": "Select.offset", "kind": "function", "doc": "

Set the OFFSET expression.

\n\n
Example:
\n\n
\n
\n
>>> Select().from_("tbl").select("x").offset(10).sql()\n'SELECT x FROM tbl OFFSET 10'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code string to parse.\nThis can also be an integer.\nIf a Offset instance is passed, this is used as-is.\nIf another Expression instance is passed, it will be wrapped in a Offset.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified Select expression.

\n
\n", "signature": "(\tself,\texpression: Union[str, sqlglot.expressions.Expression, int],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.select": {"fullname": "sqlglot.expressions.Select.select", "modulename": "sqlglot.expressions", "qualname": "Select.select", "kind": "function", "doc": "

Append to or set the SELECT expressions.

\n\n
Example:
\n\n
\n
\n
>>> Select().select("x", "y").sql()\n'SELECT x, y'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • append: if True, add to any existing expressions.\nOtherwise, this resets the expressions.
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified Select expression.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.lateral": {"fullname": "sqlglot.expressions.Select.lateral", "modulename": "sqlglot.expressions", "qualname": "Select.lateral", "kind": "function", "doc": "

Append to or set the LATERAL expressions.

\n\n
Example:
\n\n
\n
\n
>>> Select().select("x").lateral("OUTER explode(y) tbl2 AS z").from_("tbl").sql()\n'SELECT x FROM tbl LATERAL VIEW OUTER EXPLODE(y) tbl2 AS z'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • append: if True, add to any existing expressions.\nOtherwise, this resets the expressions.
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified Select expression.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.join": {"fullname": "sqlglot.expressions.Select.join", "modulename": "sqlglot.expressions", "qualname": "Select.join", "kind": "function", "doc": "

Append to or set the JOIN expressions.

\n\n
Example:
\n\n
\n
\n
>>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y").sql()\n'SELECT * FROM tbl JOIN tbl2 ON tbl1.y = tbl2.y'\n
\n
\n \n
\n
>>> Select().select("1").from_("a").join("b", using=["x", "y", "z"]).sql()\n'SELECT 1 FROM a JOIN b USING (x, y, z)'\n
\n
\n \n

Use join_type to change the type of join:

\n \n
\n
>>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y", join_type="left outer").sql()\n'SELECT * FROM tbl LEFT OUTER JOIN tbl2 ON tbl1.y = tbl2.y'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code string to parse.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • on: optionally specify the join \"on\" criteria as a SQL string.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • using: optionally specify the join \"using\" criteria as a SQL string.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • append: if True, add to any existing expressions.\nOtherwise, this resets the expressions.
  • \n
  • join_type: if set, alter the parsed join type.
  • \n
  • join_alias: an optional alias for the joined source.
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Select: the modified expression.

\n
\n", "signature": "(\tself,\texpression: Union[str, sqlglot.expressions.Expression],\ton: Union[str, sqlglot.expressions.Expression, NoneType] = None,\tusing: Union[str, sqlglot.expressions.Expression, List[Union[str, sqlglot.expressions.Expression]], NoneType] = None,\tappend: bool = True,\tjoin_type: Optional[str] = None,\tjoin_alias: Union[sqlglot.expressions.Identifier, str, NoneType] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.where": {"fullname": "sqlglot.expressions.Select.where", "modulename": "sqlglot.expressions", "qualname": "Select.where", "kind": "function", "doc": "

Append to or set the WHERE expressions.

\n\n
Example:
\n\n
\n
\n
>>> Select().select("x").from_("tbl").where("x = 'a' OR x < 'b'").sql()\n"SELECT x FROM tbl WHERE x = 'a' OR x < 'b'"\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.\nMultiple expressions are combined with an AND operator.
  • \n
  • append: if True, AND the new expressions to any existing expression.\nOtherwise, this resets the expression.
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Select: the modified expression.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.having": {"fullname": "sqlglot.expressions.Select.having", "modulename": "sqlglot.expressions", "qualname": "Select.having", "kind": "function", "doc": "

Append to or set the HAVING expressions.

\n\n
Example:
\n\n
\n
\n
>>> Select().select("x", "COUNT(y)").from_("tbl").group_by("x").having("COUNT(y) > 3").sql()\n'SELECT x, COUNT(y) FROM tbl GROUP BY x HAVING COUNT(y) > 3'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.\nMultiple expressions are combined with an AND operator.
  • \n
  • append: if True, AND the new expressions to any existing expression.\nOtherwise, this resets the expression.
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The modified Select expression.

\n
\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.window": {"fullname": "sqlglot.expressions.Select.window", "modulename": "sqlglot.expressions", "qualname": "Select.window", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.qualify": {"fullname": "sqlglot.expressions.Select.qualify", "modulename": "sqlglot.expressions", "qualname": "Select.qualify", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tappend: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.distinct": {"fullname": "sqlglot.expressions.Select.distinct", "modulename": "sqlglot.expressions", "qualname": "Select.distinct", "kind": "function", "doc": "

Set the OFFSET expression.

\n\n
Example:
\n\n
\n
\n
>>> Select().from_("tbl").select("x").distinct().sql()\n'SELECT DISTINCT x FROM tbl'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • ons: the expressions to distinct on
  • \n
  • distinct: whether the Select should be distinct
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
\n\n
Returns:
\n\n
\n

Select: the modified expression.

\n
\n", "signature": "(\tself,\t*ons: Union[str, sqlglot.expressions.Expression, NoneType],\tdistinct: bool = True,\tcopy: bool = True) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.ctas": {"fullname": "sqlglot.expressions.Select.ctas", "modulename": "sqlglot.expressions", "qualname": "Select.ctas", "kind": "function", "doc": "

Convert this expression to a CREATE TABLE AS statement.

\n\n
Example:
\n\n
\n
\n
>>> Select().select("*").from_("tbl").ctas("x").sql()\n'CREATE TABLE x AS SELECT * FROM tbl'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • table: the SQL code string to parse as the table name.\nIf another Expression instance is passed, it will be used as-is.
  • \n
  • properties: an optional mapping of table properties
  • \n
  • dialect: the dialect used to parse the input table.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
  • opts: other options to use to parse the input table.
  • \n
\n\n
Returns:
\n\n
\n

The new Create expression.

\n
\n", "signature": "(\tself,\ttable: Union[str, sqlglot.expressions.Expression],\tproperties: Optional[Dict] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Create:", "funcdef": "def"}, "sqlglot.expressions.Select.lock": {"fullname": "sqlglot.expressions.Select.lock", "modulename": "sqlglot.expressions", "qualname": "Select.lock", "kind": "function", "doc": "

Set the locking read mode for this expression.

\n\n
Examples:
\n\n
\n
\n
>>> Select().select("x").from_("tbl").where("x = 'a'").lock().sql("mysql")\n"SELECT x FROM tbl WHERE x = 'a' FOR UPDATE"\n
\n
\n \n
\n
>>> Select().select("x").from_("tbl").where("x = 'a'").lock(update=False).sql("mysql")\n"SELECT x FROM tbl WHERE x = 'a' FOR SHARE"\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • update: if True, the locking type will be FOR UPDATE, else it will be FOR SHARE.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
\n\n
Returns:
\n\n
\n

The modified expression.

\n
\n", "signature": "(\tself,\tupdate: bool = True,\tcopy: bool = True) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.hint": {"fullname": "sqlglot.expressions.Select.hint", "modulename": "sqlglot.expressions", "qualname": "Select.hint", "kind": "function", "doc": "

Set hints for this expression.

\n\n
Examples:
\n\n
\n
\n
>>> Select().select("x").from_("tbl").hint("BROADCAST(y)").sql(dialect="spark")\n'SELECT /*+ BROADCAST(y) */ x FROM tbl'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • hints: The SQL code strings to parse as the hints.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • dialect: The dialect used to parse the hints.
  • \n
  • copy: If False, modify this expression instance in-place.
  • \n
\n\n
Returns:
\n\n
\n

The modified expression.

\n
\n", "signature": "(\tself,\t*hints: Union[str, sqlglot.expressions.Expression],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.is_star": {"fullname": "sqlglot.expressions.Select.is_star", "modulename": "sqlglot.expressions", "qualname": "Select.is_star", "kind": "variable", "doc": "

Checks whether an expression is a star.

\n", "annotation": ": bool"}, "sqlglot.expressions.Subquery": {"fullname": "sqlglot.expressions.Subquery", "modulename": "sqlglot.expressions", "qualname": "Subquery", "kind": "class", "doc": "

\n", "bases": "DerivedTable, Unionable"}, "sqlglot.expressions.Subquery.unnest": {"fullname": "sqlglot.expressions.Subquery.unnest", "modulename": "sqlglot.expressions", "qualname": "Subquery.unnest", "kind": "function", "doc": "

Returns the first non subquery.

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.expressions.Subquery.is_star": {"fullname": "sqlglot.expressions.Subquery.is_star", "modulename": "sqlglot.expressions", "qualname": "Subquery.is_star", "kind": "variable", "doc": "

Checks whether an expression is a star.

\n", "annotation": ": bool"}, "sqlglot.expressions.Subquery.output_name": {"fullname": "sqlglot.expressions.Subquery.output_name", "modulename": "sqlglot.expressions", "qualname": "Subquery.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n", "annotation": ": str"}, "sqlglot.expressions.TableSample": {"fullname": "sqlglot.expressions.TableSample", "modulename": "sqlglot.expressions", "qualname": "TableSample", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Tag": {"fullname": "sqlglot.expressions.Tag", "modulename": "sqlglot.expressions", "qualname": "Tag", "kind": "class", "doc": "

Tags are used for generating arbitrary sql like SELECT x.

\n", "bases": "Expression"}, "sqlglot.expressions.Pivot": {"fullname": "sqlglot.expressions.Pivot", "modulename": "sqlglot.expressions", "qualname": "Pivot", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Window": {"fullname": "sqlglot.expressions.Window", "modulename": "sqlglot.expressions", "qualname": "Window", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.WindowSpec": {"fullname": "sqlglot.expressions.WindowSpec", "modulename": "sqlglot.expressions", "qualname": "WindowSpec", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Where": {"fullname": "sqlglot.expressions.Where", "modulename": "sqlglot.expressions", "qualname": "Where", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Star": {"fullname": "sqlglot.expressions.Star", "modulename": "sqlglot.expressions", "qualname": "Star", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Star.output_name": {"fullname": "sqlglot.expressions.Star.output_name", "modulename": "sqlglot.expressions", "qualname": "Star.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n", "annotation": ": str"}, "sqlglot.expressions.Parameter": {"fullname": "sqlglot.expressions.Parameter", "modulename": "sqlglot.expressions", "qualname": "Parameter", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.SessionParameter": {"fullname": "sqlglot.expressions.SessionParameter", "modulename": "sqlglot.expressions", "qualname": "SessionParameter", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Placeholder": {"fullname": "sqlglot.expressions.Placeholder", "modulename": "sqlglot.expressions", "qualname": "Placeholder", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Null": {"fullname": "sqlglot.expressions.Null", "modulename": "sqlglot.expressions", "qualname": "Null", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.Boolean": {"fullname": "sqlglot.expressions.Boolean", "modulename": "sqlglot.expressions", "qualname": "Boolean", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.DataTypeSize": {"fullname": "sqlglot.expressions.DataTypeSize", "modulename": "sqlglot.expressions", "qualname": "DataTypeSize", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.DataType": {"fullname": "sqlglot.expressions.DataType", "modulename": "sqlglot.expressions", "qualname": "DataType", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.DataType.Type": {"fullname": "sqlglot.expressions.DataType.Type", "modulename": "sqlglot.expressions", "qualname": "DataType.Type", "kind": "class", "doc": "

An enumeration.

\n", "bases": "sqlglot.helper.AutoName"}, "sqlglot.expressions.DataType.Type.ARRAY": {"fullname": "sqlglot.expressions.DataType.Type.ARRAY", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.ARRAY", "kind": "variable", "doc": "

\n", "default_value": "<Type.ARRAY: 'ARRAY'>"}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"fullname": "sqlglot.expressions.DataType.Type.BIGDECIMAL", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.BIGDECIMAL", "kind": "variable", "doc": "

\n", "default_value": "<Type.BIGDECIMAL: 'BIGDECIMAL'>"}, "sqlglot.expressions.DataType.Type.BIGINT": {"fullname": "sqlglot.expressions.DataType.Type.BIGINT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.BIGINT", "kind": "variable", "doc": "

\n", "default_value": "<Type.BIGINT: 'BIGINT'>"}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"fullname": "sqlglot.expressions.DataType.Type.BIGSERIAL", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.BIGSERIAL", "kind": "variable", "doc": "

\n", "default_value": "<Type.BIGSERIAL: 'BIGSERIAL'>"}, "sqlglot.expressions.DataType.Type.BINARY": {"fullname": "sqlglot.expressions.DataType.Type.BINARY", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.BINARY", "kind": "variable", "doc": "

\n", "default_value": "<Type.BINARY: 'BINARY'>"}, "sqlglot.expressions.DataType.Type.BIT": {"fullname": "sqlglot.expressions.DataType.Type.BIT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.BIT", "kind": "variable", "doc": "

\n", "default_value": "<Type.BIT: 'BIT'>"}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"fullname": "sqlglot.expressions.DataType.Type.BOOLEAN", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.BOOLEAN", "kind": "variable", "doc": "

\n", "default_value": "<Type.BOOLEAN: 'BOOLEAN'>"}, "sqlglot.expressions.DataType.Type.CHAR": {"fullname": "sqlglot.expressions.DataType.Type.CHAR", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.CHAR", "kind": "variable", "doc": "

\n", "default_value": "<Type.CHAR: 'CHAR'>"}, "sqlglot.expressions.DataType.Type.DATE": {"fullname": "sqlglot.expressions.DataType.Type.DATE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.DATE", "kind": "variable", "doc": "

\n", "default_value": "<Type.DATE: 'DATE'>"}, "sqlglot.expressions.DataType.Type.DATETIME": {"fullname": "sqlglot.expressions.DataType.Type.DATETIME", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.DATETIME", "kind": "variable", "doc": "

\n", "default_value": "<Type.DATETIME: 'DATETIME'>"}, "sqlglot.expressions.DataType.Type.DATETIME64": {"fullname": "sqlglot.expressions.DataType.Type.DATETIME64", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.DATETIME64", "kind": "variable", "doc": "

\n", "default_value": "<Type.DATETIME64: 'DATETIME64'>"}, "sqlglot.expressions.DataType.Type.ENUM": {"fullname": "sqlglot.expressions.DataType.Type.ENUM", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.ENUM", "kind": "variable", "doc": "

\n", "default_value": "<Type.ENUM: 'ENUM'>"}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"fullname": "sqlglot.expressions.DataType.Type.INT4RANGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.INT4RANGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.INT4RANGE: 'INT4RANGE'>"}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"fullname": "sqlglot.expressions.DataType.Type.INT4MULTIRANGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.INT4MULTIRANGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.INT4MULTIRANGE: 'INT4MULTIRANGE'>"}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"fullname": "sqlglot.expressions.DataType.Type.INT8RANGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.INT8RANGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.INT8RANGE: 'INT8RANGE'>"}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"fullname": "sqlglot.expressions.DataType.Type.INT8MULTIRANGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.INT8MULTIRANGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.INT8MULTIRANGE: 'INT8MULTIRANGE'>"}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"fullname": "sqlglot.expressions.DataType.Type.NUMRANGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.NUMRANGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.NUMRANGE: 'NUMRANGE'>"}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"fullname": "sqlglot.expressions.DataType.Type.NUMMULTIRANGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.NUMMULTIRANGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.NUMMULTIRANGE: 'NUMMULTIRANGE'>"}, "sqlglot.expressions.DataType.Type.TSRANGE": {"fullname": "sqlglot.expressions.DataType.Type.TSRANGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TSRANGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.TSRANGE: 'TSRANGE'>"}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"fullname": "sqlglot.expressions.DataType.Type.TSMULTIRANGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TSMULTIRANGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.TSMULTIRANGE: 'TSMULTIRANGE'>"}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"fullname": "sqlglot.expressions.DataType.Type.TSTZRANGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TSTZRANGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.TSTZRANGE: 'TSTZRANGE'>"}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"fullname": "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TSTZMULTIRANGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>"}, "sqlglot.expressions.DataType.Type.DATERANGE": {"fullname": "sqlglot.expressions.DataType.Type.DATERANGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.DATERANGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.DATERANGE: 'DATERANGE'>"}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"fullname": "sqlglot.expressions.DataType.Type.DATEMULTIRANGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.DATEMULTIRANGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.DATEMULTIRANGE: 'DATEMULTIRANGE'>"}, "sqlglot.expressions.DataType.Type.DECIMAL": {"fullname": "sqlglot.expressions.DataType.Type.DECIMAL", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.DECIMAL", "kind": "variable", "doc": "

\n", "default_value": "<Type.DECIMAL: 'DECIMAL'>"}, "sqlglot.expressions.DataType.Type.DOUBLE": {"fullname": "sqlglot.expressions.DataType.Type.DOUBLE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.DOUBLE", "kind": "variable", "doc": "

\n", "default_value": "<Type.DOUBLE: 'DOUBLE'>"}, "sqlglot.expressions.DataType.Type.FLOAT": {"fullname": "sqlglot.expressions.DataType.Type.FLOAT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.FLOAT", "kind": "variable", "doc": "

\n", "default_value": "<Type.FLOAT: 'FLOAT'>"}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"fullname": "sqlglot.expressions.DataType.Type.GEOGRAPHY", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.GEOGRAPHY", "kind": "variable", "doc": "

\n", "default_value": "<Type.GEOGRAPHY: 'GEOGRAPHY'>"}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"fullname": "sqlglot.expressions.DataType.Type.GEOMETRY", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.GEOMETRY", "kind": "variable", "doc": "

\n", "default_value": "<Type.GEOMETRY: 'GEOMETRY'>"}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"fullname": "sqlglot.expressions.DataType.Type.HLLSKETCH", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.HLLSKETCH", "kind": "variable", "doc": "

\n", "default_value": "<Type.HLLSKETCH: 'HLLSKETCH'>"}, "sqlglot.expressions.DataType.Type.HSTORE": {"fullname": "sqlglot.expressions.DataType.Type.HSTORE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.HSTORE", "kind": "variable", "doc": "

\n", "default_value": "<Type.HSTORE: 'HSTORE'>"}, "sqlglot.expressions.DataType.Type.IMAGE": {"fullname": "sqlglot.expressions.DataType.Type.IMAGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.IMAGE", "kind": "variable", "doc": "

\n", "default_value": "<Type.IMAGE: 'IMAGE'>"}, "sqlglot.expressions.DataType.Type.INET": {"fullname": "sqlglot.expressions.DataType.Type.INET", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.INET", "kind": "variable", "doc": "

\n", "default_value": "<Type.INET: 'INET'>"}, "sqlglot.expressions.DataType.Type.INT": {"fullname": "sqlglot.expressions.DataType.Type.INT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.INT", "kind": "variable", "doc": "

\n", "default_value": "<Type.INT: 'INT'>"}, "sqlglot.expressions.DataType.Type.INT128": {"fullname": "sqlglot.expressions.DataType.Type.INT128", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.INT128", "kind": "variable", "doc": "

\n", "default_value": "<Type.INT128: 'INT128'>"}, "sqlglot.expressions.DataType.Type.INT256": {"fullname": "sqlglot.expressions.DataType.Type.INT256", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.INT256", "kind": "variable", "doc": "

\n", "default_value": "<Type.INT256: 'INT256'>"}, "sqlglot.expressions.DataType.Type.INTERVAL": {"fullname": "sqlglot.expressions.DataType.Type.INTERVAL", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.INTERVAL", "kind": "variable", "doc": "

\n", "default_value": "<Type.INTERVAL: 'INTERVAL'>"}, "sqlglot.expressions.DataType.Type.JSON": {"fullname": "sqlglot.expressions.DataType.Type.JSON", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.JSON", "kind": "variable", "doc": "

\n", "default_value": "<Type.JSON: 'JSON'>"}, "sqlglot.expressions.DataType.Type.JSONB": {"fullname": "sqlglot.expressions.DataType.Type.JSONB", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.JSONB", "kind": "variable", "doc": "

\n", "default_value": "<Type.JSONB: 'JSONB'>"}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"fullname": "sqlglot.expressions.DataType.Type.LONGBLOB", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.LONGBLOB", "kind": "variable", "doc": "

\n", "default_value": "<Type.LONGBLOB: 'LONGBLOB'>"}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"fullname": "sqlglot.expressions.DataType.Type.LONGTEXT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.LONGTEXT", "kind": "variable", "doc": "

\n", "default_value": "<Type.LONGTEXT: 'LONGTEXT'>"}, "sqlglot.expressions.DataType.Type.MAP": {"fullname": "sqlglot.expressions.DataType.Type.MAP", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.MAP", "kind": "variable", "doc": "

\n", "default_value": "<Type.MAP: 'MAP'>"}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"fullname": "sqlglot.expressions.DataType.Type.MEDIUMBLOB", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.MEDIUMBLOB", "kind": "variable", "doc": "

\n", "default_value": "<Type.MEDIUMBLOB: 'MEDIUMBLOB'>"}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"fullname": "sqlglot.expressions.DataType.Type.MEDIUMTEXT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.MEDIUMTEXT", "kind": "variable", "doc": "

\n", "default_value": "<Type.MEDIUMTEXT: 'MEDIUMTEXT'>"}, "sqlglot.expressions.DataType.Type.MONEY": {"fullname": "sqlglot.expressions.DataType.Type.MONEY", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.MONEY", "kind": "variable", "doc": "

\n", "default_value": "<Type.MONEY: 'MONEY'>"}, "sqlglot.expressions.DataType.Type.NCHAR": {"fullname": "sqlglot.expressions.DataType.Type.NCHAR", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.NCHAR", "kind": "variable", "doc": "

\n", "default_value": "<Type.NCHAR: 'NCHAR'>"}, "sqlglot.expressions.DataType.Type.NULL": {"fullname": "sqlglot.expressions.DataType.Type.NULL", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.NULL", "kind": "variable", "doc": "

\n", "default_value": "<Type.NULL: 'NULL'>"}, "sqlglot.expressions.DataType.Type.NULLABLE": {"fullname": "sqlglot.expressions.DataType.Type.NULLABLE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.NULLABLE", "kind": "variable", "doc": "

\n", "default_value": "<Type.NULLABLE: 'NULLABLE'>"}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"fullname": "sqlglot.expressions.DataType.Type.NVARCHAR", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.NVARCHAR", "kind": "variable", "doc": "

\n", "default_value": "<Type.NVARCHAR: 'NVARCHAR'>"}, "sqlglot.expressions.DataType.Type.OBJECT": {"fullname": "sqlglot.expressions.DataType.Type.OBJECT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.OBJECT", "kind": "variable", "doc": "

\n", "default_value": "<Type.OBJECT: 'OBJECT'>"}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"fullname": "sqlglot.expressions.DataType.Type.ROWVERSION", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.ROWVERSION", "kind": "variable", "doc": "

\n", "default_value": "<Type.ROWVERSION: 'ROWVERSION'>"}, "sqlglot.expressions.DataType.Type.SERIAL": {"fullname": "sqlglot.expressions.DataType.Type.SERIAL", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.SERIAL", "kind": "variable", "doc": "

\n", "default_value": "<Type.SERIAL: 'SERIAL'>"}, "sqlglot.expressions.DataType.Type.SET": {"fullname": "sqlglot.expressions.DataType.Type.SET", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.SET", "kind": "variable", "doc": "

\n", "default_value": "<Type.SET: 'SET'>"}, "sqlglot.expressions.DataType.Type.SMALLINT": {"fullname": "sqlglot.expressions.DataType.Type.SMALLINT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.SMALLINT", "kind": "variable", "doc": "

\n", "default_value": "<Type.SMALLINT: 'SMALLINT'>"}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"fullname": "sqlglot.expressions.DataType.Type.SMALLMONEY", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.SMALLMONEY", "kind": "variable", "doc": "

\n", "default_value": "<Type.SMALLMONEY: 'SMALLMONEY'>"}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"fullname": "sqlglot.expressions.DataType.Type.SMALLSERIAL", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.SMALLSERIAL", "kind": "variable", "doc": "

\n", "default_value": "<Type.SMALLSERIAL: 'SMALLSERIAL'>"}, "sqlglot.expressions.DataType.Type.STRUCT": {"fullname": "sqlglot.expressions.DataType.Type.STRUCT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.STRUCT", "kind": "variable", "doc": "

\n", "default_value": "<Type.STRUCT: 'STRUCT'>"}, "sqlglot.expressions.DataType.Type.SUPER": {"fullname": "sqlglot.expressions.DataType.Type.SUPER", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.SUPER", "kind": "variable", "doc": "

\n", "default_value": "<Type.SUPER: 'SUPER'>"}, "sqlglot.expressions.DataType.Type.TEXT": {"fullname": "sqlglot.expressions.DataType.Type.TEXT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TEXT", "kind": "variable", "doc": "

\n", "default_value": "<Type.TEXT: 'TEXT'>"}, "sqlglot.expressions.DataType.Type.TIME": {"fullname": "sqlglot.expressions.DataType.Type.TIME", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TIME", "kind": "variable", "doc": "

\n", "default_value": "<Type.TIME: 'TIME'>"}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"fullname": "sqlglot.expressions.DataType.Type.TIMESTAMP", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TIMESTAMP", "kind": "variable", "doc": "

\n", "default_value": "<Type.TIMESTAMP: 'TIMESTAMP'>"}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"fullname": "sqlglot.expressions.DataType.Type.TIMESTAMPTZ", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TIMESTAMPTZ", "kind": "variable", "doc": "

\n", "default_value": "<Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>"}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"fullname": "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TIMESTAMPLTZ", "kind": "variable", "doc": "

\n", "default_value": "<Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>"}, "sqlglot.expressions.DataType.Type.TINYINT": {"fullname": "sqlglot.expressions.DataType.Type.TINYINT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TINYINT", "kind": "variable", "doc": "

\n", "default_value": "<Type.TINYINT: 'TINYINT'>"}, "sqlglot.expressions.DataType.Type.UBIGINT": {"fullname": "sqlglot.expressions.DataType.Type.UBIGINT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.UBIGINT", "kind": "variable", "doc": "

\n", "default_value": "<Type.UBIGINT: 'UBIGINT'>"}, "sqlglot.expressions.DataType.Type.UINT": {"fullname": "sqlglot.expressions.DataType.Type.UINT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.UINT", "kind": "variable", "doc": "

\n", "default_value": "<Type.UINT: 'UINT'>"}, "sqlglot.expressions.DataType.Type.USMALLINT": {"fullname": "sqlglot.expressions.DataType.Type.USMALLINT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.USMALLINT", "kind": "variable", "doc": "

\n", "default_value": "<Type.USMALLINT: 'USMALLINT'>"}, "sqlglot.expressions.DataType.Type.UTINYINT": {"fullname": "sqlglot.expressions.DataType.Type.UTINYINT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.UTINYINT", "kind": "variable", "doc": "

\n", "default_value": "<Type.UTINYINT: 'UTINYINT'>"}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"fullname": "sqlglot.expressions.DataType.Type.UNKNOWN", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.UNKNOWN", "kind": "variable", "doc": "

\n", "default_value": "<Type.UNKNOWN: 'UNKNOWN'>"}, "sqlglot.expressions.DataType.Type.UINT128": {"fullname": "sqlglot.expressions.DataType.Type.UINT128", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.UINT128", "kind": "variable", "doc": "

\n", "default_value": "<Type.UINT128: 'UINT128'>"}, "sqlglot.expressions.DataType.Type.UINT256": {"fullname": "sqlglot.expressions.DataType.Type.UINT256", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.UINT256", "kind": "variable", "doc": "

\n", "default_value": "<Type.UINT256: 'UINT256'>"}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"fullname": "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.UNIQUEIDENTIFIER", "kind": "variable", "doc": "

\n", "default_value": "<Type.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>"}, "sqlglot.expressions.DataType.Type.UUID": {"fullname": "sqlglot.expressions.DataType.Type.UUID", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.UUID", "kind": "variable", "doc": "

\n", "default_value": "<Type.UUID: 'UUID'>"}, "sqlglot.expressions.DataType.Type.VARBINARY": {"fullname": "sqlglot.expressions.DataType.Type.VARBINARY", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.VARBINARY", "kind": "variable", "doc": "

\n", "default_value": "<Type.VARBINARY: 'VARBINARY'>"}, "sqlglot.expressions.DataType.Type.VARCHAR": {"fullname": "sqlglot.expressions.DataType.Type.VARCHAR", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.VARCHAR", "kind": "variable", "doc": "

\n", "default_value": "<Type.VARCHAR: 'VARCHAR'>"}, "sqlglot.expressions.DataType.Type.VARIANT": {"fullname": "sqlglot.expressions.DataType.Type.VARIANT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.VARIANT", "kind": "variable", "doc": "

\n", "default_value": "<Type.VARIANT: 'VARIANT'>"}, "sqlglot.expressions.DataType.Type.XML": {"fullname": "sqlglot.expressions.DataType.Type.XML", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.XML", "kind": "variable", "doc": "

\n", "default_value": "<Type.XML: 'XML'>"}, "sqlglot.expressions.DataType.build": {"fullname": "sqlglot.expressions.DataType.build", "modulename": "sqlglot.expressions", "qualname": "DataType.build", "kind": "function", "doc": "

\n", "signature": "(\tcls,\tdtype: str | sqlglot.expressions.DataType | sqlglot.expressions.DataType.Type,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**kwargs) -> sqlglot.expressions.DataType:", "funcdef": "def"}, "sqlglot.expressions.DataType.is_type": {"fullname": "sqlglot.expressions.DataType.is_type", "modulename": "sqlglot.expressions", "qualname": "DataType.is_type", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*dtypes: str | sqlglot.expressions.DataType | sqlglot.expressions.DataType.Type) -> bool:", "funcdef": "def"}, "sqlglot.expressions.PseudoType": {"fullname": "sqlglot.expressions.PseudoType", "modulename": "sqlglot.expressions", "qualname": "PseudoType", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.SubqueryPredicate": {"fullname": "sqlglot.expressions.SubqueryPredicate", "modulename": "sqlglot.expressions", "qualname": "SubqueryPredicate", "kind": "class", "doc": "

\n", "bases": "Predicate"}, "sqlglot.expressions.All": {"fullname": "sqlglot.expressions.All", "modulename": "sqlglot.expressions", "qualname": "All", "kind": "class", "doc": "

\n", "bases": "SubqueryPredicate"}, "sqlglot.expressions.Any": {"fullname": "sqlglot.expressions.Any", "modulename": "sqlglot.expressions", "qualname": "Any", "kind": "class", "doc": "

\n", "bases": "SubqueryPredicate"}, "sqlglot.expressions.Exists": {"fullname": "sqlglot.expressions.Exists", "modulename": "sqlglot.expressions", "qualname": "Exists", "kind": "class", "doc": "

\n", "bases": "SubqueryPredicate"}, "sqlglot.expressions.Command": {"fullname": "sqlglot.expressions.Command", "modulename": "sqlglot.expressions", "qualname": "Command", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Transaction": {"fullname": "sqlglot.expressions.Transaction", "modulename": "sqlglot.expressions", "qualname": "Transaction", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Commit": {"fullname": "sqlglot.expressions.Commit", "modulename": "sqlglot.expressions", "qualname": "Commit", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Rollback": {"fullname": "sqlglot.expressions.Rollback", "modulename": "sqlglot.expressions", "qualname": "Rollback", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.AlterTable": {"fullname": "sqlglot.expressions.AlterTable", "modulename": "sqlglot.expressions", "qualname": "AlterTable", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.AddConstraint": {"fullname": "sqlglot.expressions.AddConstraint", "modulename": "sqlglot.expressions", "qualname": "AddConstraint", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.DropPartition": {"fullname": "sqlglot.expressions.DropPartition", "modulename": "sqlglot.expressions", "qualname": "DropPartition", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Binary": {"fullname": "sqlglot.expressions.Binary", "modulename": "sqlglot.expressions", "qualname": "Binary", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.Add": {"fullname": "sqlglot.expressions.Add", "modulename": "sqlglot.expressions", "qualname": "Add", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Connector": {"fullname": "sqlglot.expressions.Connector", "modulename": "sqlglot.expressions", "qualname": "Connector", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.And": {"fullname": "sqlglot.expressions.And", "modulename": "sqlglot.expressions", "qualname": "And", "kind": "class", "doc": "

\n", "bases": "Connector"}, "sqlglot.expressions.Or": {"fullname": "sqlglot.expressions.Or", "modulename": "sqlglot.expressions", "qualname": "Or", "kind": "class", "doc": "

\n", "bases": "Connector"}, "sqlglot.expressions.BitwiseAnd": {"fullname": "sqlglot.expressions.BitwiseAnd", "modulename": "sqlglot.expressions", "qualname": "BitwiseAnd", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.BitwiseLeftShift": {"fullname": "sqlglot.expressions.BitwiseLeftShift", "modulename": "sqlglot.expressions", "qualname": "BitwiseLeftShift", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.BitwiseOr": {"fullname": "sqlglot.expressions.BitwiseOr", "modulename": "sqlglot.expressions", "qualname": "BitwiseOr", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.BitwiseRightShift": {"fullname": "sqlglot.expressions.BitwiseRightShift", "modulename": "sqlglot.expressions", "qualname": "BitwiseRightShift", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.BitwiseXor": {"fullname": "sqlglot.expressions.BitwiseXor", "modulename": "sqlglot.expressions", "qualname": "BitwiseXor", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Div": {"fullname": "sqlglot.expressions.Div", "modulename": "sqlglot.expressions", "qualname": "Div", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Overlaps": {"fullname": "sqlglot.expressions.Overlaps", "modulename": "sqlglot.expressions", "qualname": "Overlaps", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Dot": {"fullname": "sqlglot.expressions.Dot", "modulename": "sqlglot.expressions", "qualname": "Dot", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Dot.output_name": {"fullname": "sqlglot.expressions.Dot.output_name", "modulename": "sqlglot.expressions", "qualname": "Dot.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n", "annotation": ": str"}, "sqlglot.expressions.Dot.build": {"fullname": "sqlglot.expressions.Dot.build", "modulename": "sqlglot.expressions", "qualname": "Dot.build", "kind": "function", "doc": "

Build a Dot object with a sequence of expressions.

\n", "signature": "(\tself,\texpressions: Sequence[sqlglot.expressions.Expression]) -> sqlglot.expressions.Dot:", "funcdef": "def"}, "sqlglot.expressions.DPipe": {"fullname": "sqlglot.expressions.DPipe", "modulename": "sqlglot.expressions", "qualname": "DPipe", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.SafeDPipe": {"fullname": "sqlglot.expressions.SafeDPipe", "modulename": "sqlglot.expressions", "qualname": "SafeDPipe", "kind": "class", "doc": "

\n", "bases": "DPipe"}, "sqlglot.expressions.EQ": {"fullname": "sqlglot.expressions.EQ", "modulename": "sqlglot.expressions", "qualname": "EQ", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.NullSafeEQ": {"fullname": "sqlglot.expressions.NullSafeEQ", "modulename": "sqlglot.expressions", "qualname": "NullSafeEQ", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.NullSafeNEQ": {"fullname": "sqlglot.expressions.NullSafeNEQ", "modulename": "sqlglot.expressions", "qualname": "NullSafeNEQ", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.Distance": {"fullname": "sqlglot.expressions.Distance", "modulename": "sqlglot.expressions", "qualname": "Distance", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Escape": {"fullname": "sqlglot.expressions.Escape", "modulename": "sqlglot.expressions", "qualname": "Escape", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Glob": {"fullname": "sqlglot.expressions.Glob", "modulename": "sqlglot.expressions", "qualname": "Glob", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.GT": {"fullname": "sqlglot.expressions.GT", "modulename": "sqlglot.expressions", "qualname": "GT", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.GTE": {"fullname": "sqlglot.expressions.GTE", "modulename": "sqlglot.expressions", "qualname": "GTE", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.ILike": {"fullname": "sqlglot.expressions.ILike", "modulename": "sqlglot.expressions", "qualname": "ILike", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.ILikeAny": {"fullname": "sqlglot.expressions.ILikeAny", "modulename": "sqlglot.expressions", "qualname": "ILikeAny", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.IntDiv": {"fullname": "sqlglot.expressions.IntDiv", "modulename": "sqlglot.expressions", "qualname": "IntDiv", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Is": {"fullname": "sqlglot.expressions.Is", "modulename": "sqlglot.expressions", "qualname": "Is", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.Kwarg": {"fullname": "sqlglot.expressions.Kwarg", "modulename": "sqlglot.expressions", "qualname": "Kwarg", "kind": "class", "doc": "

Kwarg in special functions like func(kwarg => y).

\n", "bases": "Binary"}, "sqlglot.expressions.Like": {"fullname": "sqlglot.expressions.Like", "modulename": "sqlglot.expressions", "qualname": "Like", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.LikeAny": {"fullname": "sqlglot.expressions.LikeAny", "modulename": "sqlglot.expressions", "qualname": "LikeAny", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.LT": {"fullname": "sqlglot.expressions.LT", "modulename": "sqlglot.expressions", "qualname": "LT", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.LTE": {"fullname": "sqlglot.expressions.LTE", "modulename": "sqlglot.expressions", "qualname": "LTE", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.Mod": {"fullname": "sqlglot.expressions.Mod", "modulename": "sqlglot.expressions", "qualname": "Mod", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Mul": {"fullname": "sqlglot.expressions.Mul", "modulename": "sqlglot.expressions", "qualname": "Mul", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.NEQ": {"fullname": "sqlglot.expressions.NEQ", "modulename": "sqlglot.expressions", "qualname": "NEQ", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.SimilarTo": {"fullname": "sqlglot.expressions.SimilarTo", "modulename": "sqlglot.expressions", "qualname": "SimilarTo", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.Slice": {"fullname": "sqlglot.expressions.Slice", "modulename": "sqlglot.expressions", "qualname": "Slice", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Sub": {"fullname": "sqlglot.expressions.Sub", "modulename": "sqlglot.expressions", "qualname": "Sub", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.ArrayOverlaps": {"fullname": "sqlglot.expressions.ArrayOverlaps", "modulename": "sqlglot.expressions", "qualname": "ArrayOverlaps", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Unary": {"fullname": "sqlglot.expressions.Unary", "modulename": "sqlglot.expressions", "qualname": "Unary", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.BitwiseNot": {"fullname": "sqlglot.expressions.BitwiseNot", "modulename": "sqlglot.expressions", "qualname": "BitwiseNot", "kind": "class", "doc": "

\n", "bases": "Unary"}, "sqlglot.expressions.Not": {"fullname": "sqlglot.expressions.Not", "modulename": "sqlglot.expressions", "qualname": "Not", "kind": "class", "doc": "

\n", "bases": "Unary"}, "sqlglot.expressions.Paren": {"fullname": "sqlglot.expressions.Paren", "modulename": "sqlglot.expressions", "qualname": "Paren", "kind": "class", "doc": "

\n", "bases": "Unary"}, "sqlglot.expressions.Paren.output_name": {"fullname": "sqlglot.expressions.Paren.output_name", "modulename": "sqlglot.expressions", "qualname": "Paren.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n", "annotation": ": str"}, "sqlglot.expressions.Neg": {"fullname": "sqlglot.expressions.Neg", "modulename": "sqlglot.expressions", "qualname": "Neg", "kind": "class", "doc": "

\n", "bases": "Unary"}, "sqlglot.expressions.Alias": {"fullname": "sqlglot.expressions.Alias", "modulename": "sqlglot.expressions", "qualname": "Alias", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Alias.output_name": {"fullname": "sqlglot.expressions.Alias.output_name", "modulename": "sqlglot.expressions", "qualname": "Alias.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n", "annotation": ": str"}, "sqlglot.expressions.Aliases": {"fullname": "sqlglot.expressions.Aliases", "modulename": "sqlglot.expressions", "qualname": "Aliases", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.AtTimeZone": {"fullname": "sqlglot.expressions.AtTimeZone", "modulename": "sqlglot.expressions", "qualname": "AtTimeZone", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Between": {"fullname": "sqlglot.expressions.Between", "modulename": "sqlglot.expressions", "qualname": "Between", "kind": "class", "doc": "

\n", "bases": "Predicate"}, "sqlglot.expressions.Bracket": {"fullname": "sqlglot.expressions.Bracket", "modulename": "sqlglot.expressions", "qualname": "Bracket", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.Distinct": {"fullname": "sqlglot.expressions.Distinct", "modulename": "sqlglot.expressions", "qualname": "Distinct", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.In": {"fullname": "sqlglot.expressions.In", "modulename": "sqlglot.expressions", "qualname": "In", "kind": "class", "doc": "

\n", "bases": "Predicate"}, "sqlglot.expressions.TimeUnit": {"fullname": "sqlglot.expressions.TimeUnit", "modulename": "sqlglot.expressions", "qualname": "TimeUnit", "kind": "class", "doc": "

Automatically converts unit arg into a var.

\n", "bases": "Expression"}, "sqlglot.expressions.TimeUnit.__init__": {"fullname": "sqlglot.expressions.TimeUnit.__init__", "modulename": "sqlglot.expressions", "qualname": "TimeUnit.__init__", "kind": "function", "doc": "

\n", "signature": "(**args)"}, "sqlglot.expressions.Interval": {"fullname": "sqlglot.expressions.Interval", "modulename": "sqlglot.expressions", "qualname": "Interval", "kind": "class", "doc": "

\n", "bases": "TimeUnit"}, "sqlglot.expressions.IgnoreNulls": {"fullname": "sqlglot.expressions.IgnoreNulls", "modulename": "sqlglot.expressions", "qualname": "IgnoreNulls", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.RespectNulls": {"fullname": "sqlglot.expressions.RespectNulls", "modulename": "sqlglot.expressions", "qualname": "RespectNulls", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Func": {"fullname": "sqlglot.expressions.Func", "modulename": "sqlglot.expressions", "qualname": "Func", "kind": "class", "doc": "

The base class for all function expressions.

\n\n
Attributes:
\n\n
    \n
  • is_var_len_args (bool): if set to True the last argument defined in arg_types will be\ntreated as a variable length argument and the argument's value will be stored as a list.
  • \n
  • _sql_names (list): determines the SQL name (1st item in the list) and aliases (subsequent items)\nfor this function expression. These values are used to map this node to a name during parsing\nas well as to provide the function's name during SQL string generation. By default the SQL\nname is set to the expression's class name transformed to snake case.
  • \n
\n", "bases": "Condition"}, "sqlglot.expressions.Func.from_arg_list": {"fullname": "sqlglot.expressions.Func.from_arg_list", "modulename": "sqlglot.expressions", "qualname": "Func.from_arg_list", "kind": "function", "doc": "

\n", "signature": "(cls, args):", "funcdef": "def"}, "sqlglot.expressions.Func.sql_names": {"fullname": "sqlglot.expressions.Func.sql_names", "modulename": "sqlglot.expressions", "qualname": "Func.sql_names", "kind": "function", "doc": "

\n", "signature": "(cls):", "funcdef": "def"}, "sqlglot.expressions.Func.sql_name": {"fullname": "sqlglot.expressions.Func.sql_name", "modulename": "sqlglot.expressions", "qualname": "Func.sql_name", "kind": "function", "doc": "

\n", "signature": "(cls):", "funcdef": "def"}, "sqlglot.expressions.Func.default_parser_mappings": {"fullname": "sqlglot.expressions.Func.default_parser_mappings", "modulename": "sqlglot.expressions", "qualname": "Func.default_parser_mappings", "kind": "function", "doc": "

\n", "signature": "(cls):", "funcdef": "def"}, "sqlglot.expressions.AggFunc": {"fullname": "sqlglot.expressions.AggFunc", "modulename": "sqlglot.expressions", "qualname": "AggFunc", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ParameterizedAgg": {"fullname": "sqlglot.expressions.ParameterizedAgg", "modulename": "sqlglot.expressions", "qualname": "ParameterizedAgg", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Abs": {"fullname": "sqlglot.expressions.Abs", "modulename": "sqlglot.expressions", "qualname": "Abs", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Anonymous": {"fullname": "sqlglot.expressions.Anonymous", "modulename": "sqlglot.expressions", "qualname": "Anonymous", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Hll": {"fullname": "sqlglot.expressions.Hll", "modulename": "sqlglot.expressions", "qualname": "Hll", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.ApproxDistinct": {"fullname": "sqlglot.expressions.ApproxDistinct", "modulename": "sqlglot.expressions", "qualname": "ApproxDistinct", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Array": {"fullname": "sqlglot.expressions.Array", "modulename": "sqlglot.expressions", "qualname": "Array", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ToChar": {"fullname": "sqlglot.expressions.ToChar", "modulename": "sqlglot.expressions", "qualname": "ToChar", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.GenerateSeries": {"fullname": "sqlglot.expressions.GenerateSeries", "modulename": "sqlglot.expressions", "qualname": "GenerateSeries", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArrayAgg": {"fullname": "sqlglot.expressions.ArrayAgg", "modulename": "sqlglot.expressions", "qualname": "ArrayAgg", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.ArrayAll": {"fullname": "sqlglot.expressions.ArrayAll", "modulename": "sqlglot.expressions", "qualname": "ArrayAll", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArrayAny": {"fullname": "sqlglot.expressions.ArrayAny", "modulename": "sqlglot.expressions", "qualname": "ArrayAny", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArrayConcat": {"fullname": "sqlglot.expressions.ArrayConcat", "modulename": "sqlglot.expressions", "qualname": "ArrayConcat", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArrayContains": {"fullname": "sqlglot.expressions.ArrayContains", "modulename": "sqlglot.expressions", "qualname": "ArrayContains", "kind": "class", "doc": "

\n", "bases": "Binary, Func"}, "sqlglot.expressions.ArrayContained": {"fullname": "sqlglot.expressions.ArrayContained", "modulename": "sqlglot.expressions", "qualname": "ArrayContained", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.ArrayFilter": {"fullname": "sqlglot.expressions.ArrayFilter", "modulename": "sqlglot.expressions", "qualname": "ArrayFilter", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArrayJoin": {"fullname": "sqlglot.expressions.ArrayJoin", "modulename": "sqlglot.expressions", "qualname": "ArrayJoin", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArraySize": {"fullname": "sqlglot.expressions.ArraySize", "modulename": "sqlglot.expressions", "qualname": "ArraySize", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArraySort": {"fullname": "sqlglot.expressions.ArraySort", "modulename": "sqlglot.expressions", "qualname": "ArraySort", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArraySum": {"fullname": "sqlglot.expressions.ArraySum", "modulename": "sqlglot.expressions", "qualname": "ArraySum", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArrayUnionAgg": {"fullname": "sqlglot.expressions.ArrayUnionAgg", "modulename": "sqlglot.expressions", "qualname": "ArrayUnionAgg", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Avg": {"fullname": "sqlglot.expressions.Avg", "modulename": "sqlglot.expressions", "qualname": "Avg", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.AnyValue": {"fullname": "sqlglot.expressions.AnyValue", "modulename": "sqlglot.expressions", "qualname": "AnyValue", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Case": {"fullname": "sqlglot.expressions.Case", "modulename": "sqlglot.expressions", "qualname": "Case", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Case.when": {"fullname": "sqlglot.expressions.Case.when", "modulename": "sqlglot.expressions", "qualname": "Case.when", "kind": "function", "doc": "

\n", "signature": "(\tself,\tcondition: Union[str, sqlglot.expressions.Expression],\tthen: Union[str, sqlglot.expressions.Expression],\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Case:", "funcdef": "def"}, "sqlglot.expressions.Case.else_": {"fullname": "sqlglot.expressions.Case.else_", "modulename": "sqlglot.expressions", "qualname": "Case.else_", "kind": "function", "doc": "

\n", "signature": "(\tself,\tcondition: Union[str, sqlglot.expressions.Expression],\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Case:", "funcdef": "def"}, "sqlglot.expressions.Cast": {"fullname": "sqlglot.expressions.Cast", "modulename": "sqlglot.expressions", "qualname": "Cast", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Cast.output_name": {"fullname": "sqlglot.expressions.Cast.output_name", "modulename": "sqlglot.expressions", "qualname": "Cast.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n", "annotation": ": str"}, "sqlglot.expressions.Cast.is_type": {"fullname": "sqlglot.expressions.Cast.is_type", "modulename": "sqlglot.expressions", "qualname": "Cast.is_type", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*dtypes: str | sqlglot.expressions.DataType | sqlglot.expressions.DataType.Type) -> bool:", "funcdef": "def"}, "sqlglot.expressions.CastToStrType": {"fullname": "sqlglot.expressions.CastToStrType", "modulename": "sqlglot.expressions", "qualname": "CastToStrType", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Collate": {"fullname": "sqlglot.expressions.Collate", "modulename": "sqlglot.expressions", "qualname": "Collate", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.TryCast": {"fullname": "sqlglot.expressions.TryCast", "modulename": "sqlglot.expressions", "qualname": "TryCast", "kind": "class", "doc": "

\n", "bases": "Cast"}, "sqlglot.expressions.Ceil": {"fullname": "sqlglot.expressions.Ceil", "modulename": "sqlglot.expressions", "qualname": "Ceil", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Coalesce": {"fullname": "sqlglot.expressions.Coalesce", "modulename": "sqlglot.expressions", "qualname": "Coalesce", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Concat": {"fullname": "sqlglot.expressions.Concat", "modulename": "sqlglot.expressions", "qualname": "Concat", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.SafeConcat": {"fullname": "sqlglot.expressions.SafeConcat", "modulename": "sqlglot.expressions", "qualname": "SafeConcat", "kind": "class", "doc": "

\n", "bases": "Concat"}, "sqlglot.expressions.ConcatWs": {"fullname": "sqlglot.expressions.ConcatWs", "modulename": "sqlglot.expressions", "qualname": "ConcatWs", "kind": "class", "doc": "

\n", "bases": "Concat"}, "sqlglot.expressions.Count": {"fullname": "sqlglot.expressions.Count", "modulename": "sqlglot.expressions", "qualname": "Count", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.CountIf": {"fullname": "sqlglot.expressions.CountIf", "modulename": "sqlglot.expressions", "qualname": "CountIf", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.CurrentDate": {"fullname": "sqlglot.expressions.CurrentDate", "modulename": "sqlglot.expressions", "qualname": "CurrentDate", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.CurrentDatetime": {"fullname": "sqlglot.expressions.CurrentDatetime", "modulename": "sqlglot.expressions", "qualname": "CurrentDatetime", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.CurrentTime": {"fullname": "sqlglot.expressions.CurrentTime", "modulename": "sqlglot.expressions", "qualname": "CurrentTime", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.CurrentTimestamp": {"fullname": "sqlglot.expressions.CurrentTimestamp", "modulename": "sqlglot.expressions", "qualname": "CurrentTimestamp", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.CurrentUser": {"fullname": "sqlglot.expressions.CurrentUser", "modulename": "sqlglot.expressions", "qualname": "CurrentUser", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.DateAdd": {"fullname": "sqlglot.expressions.DateAdd", "modulename": "sqlglot.expressions", "qualname": "DateAdd", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.DateSub": {"fullname": "sqlglot.expressions.DateSub", "modulename": "sqlglot.expressions", "qualname": "DateSub", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.DateDiff": {"fullname": "sqlglot.expressions.DateDiff", "modulename": "sqlglot.expressions", "qualname": "DateDiff", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.DateTrunc": {"fullname": "sqlglot.expressions.DateTrunc", "modulename": "sqlglot.expressions", "qualname": "DateTrunc", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.DatetimeAdd": {"fullname": "sqlglot.expressions.DatetimeAdd", "modulename": "sqlglot.expressions", "qualname": "DatetimeAdd", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.DatetimeSub": {"fullname": "sqlglot.expressions.DatetimeSub", "modulename": "sqlglot.expressions", "qualname": "DatetimeSub", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.DatetimeDiff": {"fullname": "sqlglot.expressions.DatetimeDiff", "modulename": "sqlglot.expressions", "qualname": "DatetimeDiff", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.DatetimeTrunc": {"fullname": "sqlglot.expressions.DatetimeTrunc", "modulename": "sqlglot.expressions", "qualname": "DatetimeTrunc", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.DayOfWeek": {"fullname": "sqlglot.expressions.DayOfWeek", "modulename": "sqlglot.expressions", "qualname": "DayOfWeek", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.DayOfMonth": {"fullname": "sqlglot.expressions.DayOfMonth", "modulename": "sqlglot.expressions", "qualname": "DayOfMonth", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.DayOfYear": {"fullname": "sqlglot.expressions.DayOfYear", "modulename": "sqlglot.expressions", "qualname": "DayOfYear", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.WeekOfYear": {"fullname": "sqlglot.expressions.WeekOfYear", "modulename": "sqlglot.expressions", "qualname": "WeekOfYear", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.LastDateOfMonth": {"fullname": "sqlglot.expressions.LastDateOfMonth", "modulename": "sqlglot.expressions", "qualname": "LastDateOfMonth", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Extract": {"fullname": "sqlglot.expressions.Extract", "modulename": "sqlglot.expressions", "qualname": "Extract", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TimestampAdd": {"fullname": "sqlglot.expressions.TimestampAdd", "modulename": "sqlglot.expressions", "qualname": "TimestampAdd", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.TimestampSub": {"fullname": "sqlglot.expressions.TimestampSub", "modulename": "sqlglot.expressions", "qualname": "TimestampSub", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.TimestampDiff": {"fullname": "sqlglot.expressions.TimestampDiff", "modulename": "sqlglot.expressions", "qualname": "TimestampDiff", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.TimestampTrunc": {"fullname": "sqlglot.expressions.TimestampTrunc", "modulename": "sqlglot.expressions", "qualname": "TimestampTrunc", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.TimeAdd": {"fullname": "sqlglot.expressions.TimeAdd", "modulename": "sqlglot.expressions", "qualname": "TimeAdd", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.TimeSub": {"fullname": "sqlglot.expressions.TimeSub", "modulename": "sqlglot.expressions", "qualname": "TimeSub", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.TimeDiff": {"fullname": "sqlglot.expressions.TimeDiff", "modulename": "sqlglot.expressions", "qualname": "TimeDiff", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.TimeTrunc": {"fullname": "sqlglot.expressions.TimeTrunc", "modulename": "sqlglot.expressions", "qualname": "TimeTrunc", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.DateFromParts": {"fullname": "sqlglot.expressions.DateFromParts", "modulename": "sqlglot.expressions", "qualname": "DateFromParts", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.DateStrToDate": {"fullname": "sqlglot.expressions.DateStrToDate", "modulename": "sqlglot.expressions", "qualname": "DateStrToDate", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.DateToDateStr": {"fullname": "sqlglot.expressions.DateToDateStr", "modulename": "sqlglot.expressions", "qualname": "DateToDateStr", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.DateToDi": {"fullname": "sqlglot.expressions.DateToDi", "modulename": "sqlglot.expressions", "qualname": "DateToDi", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Date": {"fullname": "sqlglot.expressions.Date", "modulename": "sqlglot.expressions", "qualname": "Date", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Day": {"fullname": "sqlglot.expressions.Day", "modulename": "sqlglot.expressions", "qualname": "Day", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Decode": {"fullname": "sqlglot.expressions.Decode", "modulename": "sqlglot.expressions", "qualname": "Decode", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.DiToDate": {"fullname": "sqlglot.expressions.DiToDate", "modulename": "sqlglot.expressions", "qualname": "DiToDate", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Encode": {"fullname": "sqlglot.expressions.Encode", "modulename": "sqlglot.expressions", "qualname": "Encode", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Exp": {"fullname": "sqlglot.expressions.Exp", "modulename": "sqlglot.expressions", "qualname": "Exp", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Explode": {"fullname": "sqlglot.expressions.Explode", "modulename": "sqlglot.expressions", "qualname": "Explode", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Floor": {"fullname": "sqlglot.expressions.Floor", "modulename": "sqlglot.expressions", "qualname": "Floor", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.FromBase64": {"fullname": "sqlglot.expressions.FromBase64", "modulename": "sqlglot.expressions", "qualname": "FromBase64", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ToBase64": {"fullname": "sqlglot.expressions.ToBase64", "modulename": "sqlglot.expressions", "qualname": "ToBase64", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Greatest": {"fullname": "sqlglot.expressions.Greatest", "modulename": "sqlglot.expressions", "qualname": "Greatest", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.GroupConcat": {"fullname": "sqlglot.expressions.GroupConcat", "modulename": "sqlglot.expressions", "qualname": "GroupConcat", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Hex": {"fullname": "sqlglot.expressions.Hex", "modulename": "sqlglot.expressions", "qualname": "Hex", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.If": {"fullname": "sqlglot.expressions.If", "modulename": "sqlglot.expressions", "qualname": "If", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Initcap": {"fullname": "sqlglot.expressions.Initcap", "modulename": "sqlglot.expressions", "qualname": "Initcap", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.JSONKeyValue": {"fullname": "sqlglot.expressions.JSONKeyValue", "modulename": "sqlglot.expressions", "qualname": "JSONKeyValue", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.JSONObject": {"fullname": "sqlglot.expressions.JSONObject", "modulename": "sqlglot.expressions", "qualname": "JSONObject", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.OpenJSONColumnDef": {"fullname": "sqlglot.expressions.OpenJSONColumnDef", "modulename": "sqlglot.expressions", "qualname": "OpenJSONColumnDef", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.OpenJSON": {"fullname": "sqlglot.expressions.OpenJSON", "modulename": "sqlglot.expressions", "qualname": "OpenJSON", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.JSONBContains": {"fullname": "sqlglot.expressions.JSONBContains", "modulename": "sqlglot.expressions", "qualname": "JSONBContains", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.JSONExtract": {"fullname": "sqlglot.expressions.JSONExtract", "modulename": "sqlglot.expressions", "qualname": "JSONExtract", "kind": "class", "doc": "

\n", "bases": "Binary, Func"}, "sqlglot.expressions.JSONExtractScalar": {"fullname": "sqlglot.expressions.JSONExtractScalar", "modulename": "sqlglot.expressions", "qualname": "JSONExtractScalar", "kind": "class", "doc": "

\n", "bases": "JSONExtract"}, "sqlglot.expressions.JSONBExtract": {"fullname": "sqlglot.expressions.JSONBExtract", "modulename": "sqlglot.expressions", "qualname": "JSONBExtract", "kind": "class", "doc": "

\n", "bases": "JSONExtract"}, "sqlglot.expressions.JSONBExtractScalar": {"fullname": "sqlglot.expressions.JSONBExtractScalar", "modulename": "sqlglot.expressions", "qualname": "JSONBExtractScalar", "kind": "class", "doc": "

\n", "bases": "JSONExtract"}, "sqlglot.expressions.JSONFormat": {"fullname": "sqlglot.expressions.JSONFormat", "modulename": "sqlglot.expressions", "qualname": "JSONFormat", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Least": {"fullname": "sqlglot.expressions.Least", "modulename": "sqlglot.expressions", "qualname": "Least", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Left": {"fullname": "sqlglot.expressions.Left", "modulename": "sqlglot.expressions", "qualname": "Left", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Right": {"fullname": "sqlglot.expressions.Right", "modulename": "sqlglot.expressions", "qualname": "Right", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Length": {"fullname": "sqlglot.expressions.Length", "modulename": "sqlglot.expressions", "qualname": "Length", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Levenshtein": {"fullname": "sqlglot.expressions.Levenshtein", "modulename": "sqlglot.expressions", "qualname": "Levenshtein", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Ln": {"fullname": "sqlglot.expressions.Ln", "modulename": "sqlglot.expressions", "qualname": "Ln", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Log": {"fullname": "sqlglot.expressions.Log", "modulename": "sqlglot.expressions", "qualname": "Log", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Log2": {"fullname": "sqlglot.expressions.Log2", "modulename": "sqlglot.expressions", "qualname": "Log2", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Log10": {"fullname": "sqlglot.expressions.Log10", "modulename": "sqlglot.expressions", "qualname": "Log10", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.LogicalOr": {"fullname": "sqlglot.expressions.LogicalOr", "modulename": "sqlglot.expressions", "qualname": "LogicalOr", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.LogicalAnd": {"fullname": "sqlglot.expressions.LogicalAnd", "modulename": "sqlglot.expressions", "qualname": "LogicalAnd", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Lower": {"fullname": "sqlglot.expressions.Lower", "modulename": "sqlglot.expressions", "qualname": "Lower", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Map": {"fullname": "sqlglot.expressions.Map", "modulename": "sqlglot.expressions", "qualname": "Map", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.StarMap": {"fullname": "sqlglot.expressions.StarMap", "modulename": "sqlglot.expressions", "qualname": "StarMap", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.VarMap": {"fullname": "sqlglot.expressions.VarMap", "modulename": "sqlglot.expressions", "qualname": "VarMap", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.MatchAgainst": {"fullname": "sqlglot.expressions.MatchAgainst", "modulename": "sqlglot.expressions", "qualname": "MatchAgainst", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Max": {"fullname": "sqlglot.expressions.Max", "modulename": "sqlglot.expressions", "qualname": "Max", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.MD5": {"fullname": "sqlglot.expressions.MD5", "modulename": "sqlglot.expressions", "qualname": "MD5", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Min": {"fullname": "sqlglot.expressions.Min", "modulename": "sqlglot.expressions", "qualname": "Min", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Month": {"fullname": "sqlglot.expressions.Month", "modulename": "sqlglot.expressions", "qualname": "Month", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Nvl2": {"fullname": "sqlglot.expressions.Nvl2", "modulename": "sqlglot.expressions", "qualname": "Nvl2", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Posexplode": {"fullname": "sqlglot.expressions.Posexplode", "modulename": "sqlglot.expressions", "qualname": "Posexplode", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Pow": {"fullname": "sqlglot.expressions.Pow", "modulename": "sqlglot.expressions", "qualname": "Pow", "kind": "class", "doc": "

\n", "bases": "Binary, Func"}, "sqlglot.expressions.PercentileCont": {"fullname": "sqlglot.expressions.PercentileCont", "modulename": "sqlglot.expressions", "qualname": "PercentileCont", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.PercentileDisc": {"fullname": "sqlglot.expressions.PercentileDisc", "modulename": "sqlglot.expressions", "qualname": "PercentileDisc", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Quantile": {"fullname": "sqlglot.expressions.Quantile", "modulename": "sqlglot.expressions", "qualname": "Quantile", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.ApproxQuantile": {"fullname": "sqlglot.expressions.ApproxQuantile", "modulename": "sqlglot.expressions", "qualname": "ApproxQuantile", "kind": "class", "doc": "

\n", "bases": "Quantile"}, "sqlglot.expressions.RangeN": {"fullname": "sqlglot.expressions.RangeN", "modulename": "sqlglot.expressions", "qualname": "RangeN", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ReadCSV": {"fullname": "sqlglot.expressions.ReadCSV", "modulename": "sqlglot.expressions", "qualname": "ReadCSV", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Reduce": {"fullname": "sqlglot.expressions.Reduce", "modulename": "sqlglot.expressions", "qualname": "Reduce", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.RegexpExtract": {"fullname": "sqlglot.expressions.RegexpExtract", "modulename": "sqlglot.expressions", "qualname": "RegexpExtract", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.RegexpLike": {"fullname": "sqlglot.expressions.RegexpLike", "modulename": "sqlglot.expressions", "qualname": "RegexpLike", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.RegexpILike": {"fullname": "sqlglot.expressions.RegexpILike", "modulename": "sqlglot.expressions", "qualname": "RegexpILike", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.RegexpSplit": {"fullname": "sqlglot.expressions.RegexpSplit", "modulename": "sqlglot.expressions", "qualname": "RegexpSplit", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Repeat": {"fullname": "sqlglot.expressions.Repeat", "modulename": "sqlglot.expressions", "qualname": "Repeat", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Round": {"fullname": "sqlglot.expressions.Round", "modulename": "sqlglot.expressions", "qualname": "Round", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.RowNumber": {"fullname": "sqlglot.expressions.RowNumber", "modulename": "sqlglot.expressions", "qualname": "RowNumber", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.SafeDivide": {"fullname": "sqlglot.expressions.SafeDivide", "modulename": "sqlglot.expressions", "qualname": "SafeDivide", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.SetAgg": {"fullname": "sqlglot.expressions.SetAgg", "modulename": "sqlglot.expressions", "qualname": "SetAgg", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.SHA": {"fullname": "sqlglot.expressions.SHA", "modulename": "sqlglot.expressions", "qualname": "SHA", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.SHA2": {"fullname": "sqlglot.expressions.SHA2", "modulename": "sqlglot.expressions", "qualname": "SHA2", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.SortArray": {"fullname": "sqlglot.expressions.SortArray", "modulename": "sqlglot.expressions", "qualname": "SortArray", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Split": {"fullname": "sqlglot.expressions.Split", "modulename": "sqlglot.expressions", "qualname": "Split", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Substring": {"fullname": "sqlglot.expressions.Substring", "modulename": "sqlglot.expressions", "qualname": "Substring", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.StandardHash": {"fullname": "sqlglot.expressions.StandardHash", "modulename": "sqlglot.expressions", "qualname": "StandardHash", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.StrPosition": {"fullname": "sqlglot.expressions.StrPosition", "modulename": "sqlglot.expressions", "qualname": "StrPosition", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.StrToDate": {"fullname": "sqlglot.expressions.StrToDate", "modulename": "sqlglot.expressions", "qualname": "StrToDate", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.StrToTime": {"fullname": "sqlglot.expressions.StrToTime", "modulename": "sqlglot.expressions", "qualname": "StrToTime", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.StrToUnix": {"fullname": "sqlglot.expressions.StrToUnix", "modulename": "sqlglot.expressions", "qualname": "StrToUnix", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.NumberToStr": {"fullname": "sqlglot.expressions.NumberToStr", "modulename": "sqlglot.expressions", "qualname": "NumberToStr", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.FromBase": {"fullname": "sqlglot.expressions.FromBase", "modulename": "sqlglot.expressions", "qualname": "FromBase", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Struct": {"fullname": "sqlglot.expressions.Struct", "modulename": "sqlglot.expressions", "qualname": "Struct", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.StructExtract": {"fullname": "sqlglot.expressions.StructExtract", "modulename": "sqlglot.expressions", "qualname": "StructExtract", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Sum": {"fullname": "sqlglot.expressions.Sum", "modulename": "sqlglot.expressions", "qualname": "Sum", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Sqrt": {"fullname": "sqlglot.expressions.Sqrt", "modulename": "sqlglot.expressions", "qualname": "Sqrt", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Stddev": {"fullname": "sqlglot.expressions.Stddev", "modulename": "sqlglot.expressions", "qualname": "Stddev", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.StddevPop": {"fullname": "sqlglot.expressions.StddevPop", "modulename": "sqlglot.expressions", "qualname": "StddevPop", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.StddevSamp": {"fullname": "sqlglot.expressions.StddevSamp", "modulename": "sqlglot.expressions", "qualname": "StddevSamp", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.TimeToStr": {"fullname": "sqlglot.expressions.TimeToStr", "modulename": "sqlglot.expressions", "qualname": "TimeToStr", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TimeToTimeStr": {"fullname": "sqlglot.expressions.TimeToTimeStr", "modulename": "sqlglot.expressions", "qualname": "TimeToTimeStr", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TimeToUnix": {"fullname": "sqlglot.expressions.TimeToUnix", "modulename": "sqlglot.expressions", "qualname": "TimeToUnix", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TimeStrToDate": {"fullname": "sqlglot.expressions.TimeStrToDate", "modulename": "sqlglot.expressions", "qualname": "TimeStrToDate", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TimeStrToTime": {"fullname": "sqlglot.expressions.TimeStrToTime", "modulename": "sqlglot.expressions", "qualname": "TimeStrToTime", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TimeStrToUnix": {"fullname": "sqlglot.expressions.TimeStrToUnix", "modulename": "sqlglot.expressions", "qualname": "TimeStrToUnix", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Trim": {"fullname": "sqlglot.expressions.Trim", "modulename": "sqlglot.expressions", "qualname": "Trim", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TsOrDsAdd": {"fullname": "sqlglot.expressions.TsOrDsAdd", "modulename": "sqlglot.expressions", "qualname": "TsOrDsAdd", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.TsOrDsToDateStr": {"fullname": "sqlglot.expressions.TsOrDsToDateStr", "modulename": "sqlglot.expressions", "qualname": "TsOrDsToDateStr", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TsOrDsToDate": {"fullname": "sqlglot.expressions.TsOrDsToDate", "modulename": "sqlglot.expressions", "qualname": "TsOrDsToDate", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TsOrDiToDi": {"fullname": "sqlglot.expressions.TsOrDiToDi", "modulename": "sqlglot.expressions", "qualname": "TsOrDiToDi", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Unhex": {"fullname": "sqlglot.expressions.Unhex", "modulename": "sqlglot.expressions", "qualname": "Unhex", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.UnixToStr": {"fullname": "sqlglot.expressions.UnixToStr", "modulename": "sqlglot.expressions", "qualname": "UnixToStr", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.UnixToTime": {"fullname": "sqlglot.expressions.UnixToTime", "modulename": "sqlglot.expressions", "qualname": "UnixToTime", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.UnixToTimeStr": {"fullname": "sqlglot.expressions.UnixToTimeStr", "modulename": "sqlglot.expressions", "qualname": "UnixToTimeStr", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Upper": {"fullname": "sqlglot.expressions.Upper", "modulename": "sqlglot.expressions", "qualname": "Upper", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Variance": {"fullname": "sqlglot.expressions.Variance", "modulename": "sqlglot.expressions", "qualname": "Variance", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.VariancePop": {"fullname": "sqlglot.expressions.VariancePop", "modulename": "sqlglot.expressions", "qualname": "VariancePop", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Week": {"fullname": "sqlglot.expressions.Week", "modulename": "sqlglot.expressions", "qualname": "Week", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.XMLTable": {"fullname": "sqlglot.expressions.XMLTable", "modulename": "sqlglot.expressions", "qualname": "XMLTable", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Year": {"fullname": "sqlglot.expressions.Year", "modulename": "sqlglot.expressions", "qualname": "Year", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Use": {"fullname": "sqlglot.expressions.Use", "modulename": "sqlglot.expressions", "qualname": "Use", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Merge": {"fullname": "sqlglot.expressions.Merge", "modulename": "sqlglot.expressions", "qualname": "Merge", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.When": {"fullname": "sqlglot.expressions.When", "modulename": "sqlglot.expressions", "qualname": "When", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.NextValueFor": {"fullname": "sqlglot.expressions.NextValueFor", "modulename": "sqlglot.expressions", "qualname": "NextValueFor", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.maybe_parse": {"fullname": "sqlglot.expressions.maybe_parse", "modulename": "sqlglot.expressions", "qualname": "maybe_parse", "kind": "function", "doc": "

Gracefully handle a possible string or expression.

\n\n
Example:
\n\n
\n
\n
>>> maybe_parse("1")\n(LITERAL this: 1, is_string: False)\n>>> maybe_parse(to_identifier("x"))\n(IDENTIFIER this: x, quoted: False)\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • sql_or_expression: the SQL code string or an expression
  • \n
  • into: the SQLGlot Expression to parse into
  • \n
  • dialect: the dialect used to parse the input expressions (in the case that an\ninput expression is a SQL string).
  • \n
  • prefix: a string to prefix the sql with before it gets parsed\n(automatically includes a space)
  • \n
  • copy: whether or not to copy the expression.
  • \n
  • **opts: other options to use to parse the input expressions (again, in the case\nthat an input expression is a SQL string).
  • \n
\n\n
Returns:
\n\n
\n

Expression: the parsed or given expression.

\n
\n", "signature": "(\tsql_or_expression: Union[str, sqlglot.expressions.Expression],\t*,\tinto: Union[str, Type[sqlglot.expressions.Expression], Collection[Union[str, Type[sqlglot.expressions.Expression]]], NoneType] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tprefix: Optional[str] = None,\tcopy: bool = False,\t**opts) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.expressions.union": {"fullname": "sqlglot.expressions.union", "modulename": "sqlglot.expressions", "qualname": "union", "kind": "function", "doc": "

Initializes a syntax tree from one UNION expression.

\n\n
Example:
\n\n
\n
\n
>>> union("SELECT * FROM foo", "SELECT * FROM bla").sql()\n'SELECT * FROM foo UNION SELECT * FROM bla'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • left: the SQL code string corresponding to the left-hand side.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • right: the SQL code string corresponding to the right-hand side.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • distinct: set the DISTINCT flag if and only if this is true.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The new Union instance.

\n
\n", "signature": "(\tleft: Union[str, sqlglot.expressions.Expression],\tright: Union[str, sqlglot.expressions.Expression],\tdistinct: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> sqlglot.expressions.Union:", "funcdef": "def"}, "sqlglot.expressions.intersect": {"fullname": "sqlglot.expressions.intersect", "modulename": "sqlglot.expressions", "qualname": "intersect", "kind": "function", "doc": "

Initializes a syntax tree from one INTERSECT expression.

\n\n
Example:
\n\n
\n
\n
>>> intersect("SELECT * FROM foo", "SELECT * FROM bla").sql()\n'SELECT * FROM foo INTERSECT SELECT * FROM bla'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • left: the SQL code string corresponding to the left-hand side.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • right: the SQL code string corresponding to the right-hand side.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • distinct: set the DISTINCT flag if and only if this is true.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The new Intersect instance.

\n
\n", "signature": "(\tleft: Union[str, sqlglot.expressions.Expression],\tright: Union[str, sqlglot.expressions.Expression],\tdistinct: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> sqlglot.expressions.Intersect:", "funcdef": "def"}, "sqlglot.expressions.except_": {"fullname": "sqlglot.expressions.except_", "modulename": "sqlglot.expressions", "qualname": "except_", "kind": "function", "doc": "

Initializes a syntax tree from one EXCEPT expression.

\n\n
Example:
\n\n
\n
\n
>>> except_("SELECT * FROM foo", "SELECT * FROM bla").sql()\n'SELECT * FROM foo EXCEPT SELECT * FROM bla'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • left: the SQL code string corresponding to the left-hand side.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • right: the SQL code string corresponding to the right-hand side.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • distinct: set the DISTINCT flag if and only if this is true.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The new Except instance.

\n
\n", "signature": "(\tleft: Union[str, sqlglot.expressions.Expression],\tright: Union[str, sqlglot.expressions.Expression],\tdistinct: bool = True,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> sqlglot.expressions.Except:", "funcdef": "def"}, "sqlglot.expressions.select": {"fullname": "sqlglot.expressions.select", "modulename": "sqlglot.expressions", "qualname": "select", "kind": "function", "doc": "

Initializes a syntax tree from one or multiple SELECT expressions.

\n\n
Example:
\n\n
\n
\n
>>> select("col1", "col2").from_("tbl").sql()\n'SELECT col1, col2 FROM tbl'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code string to parse as the expressions of a\nSELECT statement. If an Expression instance is passed, this is used as-is.
  • \n
  • dialect: the dialect used to parse the input expressions (in the case that an\ninput expression is a SQL string).
  • \n
  • **opts: other options to use to parse the input expressions (again, in the case\nthat an input expression is a SQL string).
  • \n
\n\n
Returns:
\n\n
\n

Select: the syntax tree for the SELECT statement.

\n
\n", "signature": "(\t*expressions: Union[str, sqlglot.expressions.Expression],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.from_": {"fullname": "sqlglot.expressions.from_", "modulename": "sqlglot.expressions", "qualname": "from_", "kind": "function", "doc": "

Initializes a syntax tree from a FROM expression.

\n\n
Example:
\n\n
\n
\n
>>> from_("tbl").select("col1", "col2").sql()\n'SELECT col1, col2 FROM tbl'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expression: the SQL code string to parse as the FROM expressions of a\nSELECT statement. If an Expression instance is passed, this is used as-is.
  • \n
  • dialect: the dialect used to parse the input expression (in the case that the\ninput expression is a SQL string).
  • \n
  • **opts: other options to use to parse the input expressions (again, in the case\nthat the input expression is a SQL string).
  • \n
\n\n
Returns:
\n\n
\n

Select: the syntax tree for the SELECT statement.

\n
\n", "signature": "(\texpression: Union[str, sqlglot.expressions.Expression],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.update": {"fullname": "sqlglot.expressions.update", "modulename": "sqlglot.expressions", "qualname": "update", "kind": "function", "doc": "

Creates an update statement.

\n\n
Example:
\n\n
\n
\n
>>> update("my_table", {"x": 1, "y": "2", "z": None}, from_="baz", where="id > 1").sql()\n"UPDATE my_table SET x = 1, y = '2', z = NULL FROM baz WHERE id > 1"\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *properties: dictionary of properties to set which are\nauto converted to sql objects eg None -> NULL
  • \n
  • where: sql conditional parsed into a WHERE statement
  • \n
  • from_: sql statement parsed into a FROM statement
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • **opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Update: the syntax tree for the UPDATE statement.

\n
\n", "signature": "(\ttable: str | sqlglot.expressions.Table,\tproperties: dict,\twhere: Union[str, sqlglot.expressions.Expression, NoneType] = None,\tfrom_: Union[str, sqlglot.expressions.Expression, NoneType] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> sqlglot.expressions.Update:", "funcdef": "def"}, "sqlglot.expressions.delete": {"fullname": "sqlglot.expressions.delete", "modulename": "sqlglot.expressions", "qualname": "delete", "kind": "function", "doc": "

Builds a delete statement.

\n\n
Example:
\n\n
\n
\n
>>> delete("my_table", where="id > 1").sql()\n'DELETE FROM my_table WHERE id > 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • where: sql conditional parsed into a WHERE statement
  • \n
  • returning: sql conditional parsed into a RETURNING statement
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • **opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Delete: the syntax tree for the DELETE statement.

\n
\n", "signature": "(\ttable: Union[str, sqlglot.expressions.Expression],\twhere: Union[str, sqlglot.expressions.Expression, NoneType] = None,\treturning: Union[str, sqlglot.expressions.Expression, NoneType] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> sqlglot.expressions.Delete:", "funcdef": "def"}, "sqlglot.expressions.insert": {"fullname": "sqlglot.expressions.insert", "modulename": "sqlglot.expressions", "qualname": "insert", "kind": "function", "doc": "

Builds an INSERT statement.

\n\n
Example:
\n\n
\n
\n
>>> insert("VALUES (1, 2, 3)", "tbl").sql()\n'INSERT INTO tbl VALUES (1, 2, 3)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the sql string or expression of the INSERT statement
  • \n
  • into: the tbl to insert data to.
  • \n
  • columns: optionally the table's column names.
  • \n
  • overwrite: whether to INSERT OVERWRITE or not.
  • \n
  • dialect: the dialect used to parse the input expressions.
  • \n
  • copy: whether or not to copy the expression.
  • \n
  • **opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Insert: the syntax tree for the INSERT statement.

\n
\n", "signature": "(\texpression: Union[str, sqlglot.expressions.Expression],\tinto: Union[str, sqlglot.expressions.Expression],\tcolumns: Optional[Sequence[Union[str, sqlglot.expressions.Expression]]] = None,\toverwrite: Optional[bool] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Insert:", "funcdef": "def"}, "sqlglot.expressions.condition": {"fullname": "sqlglot.expressions.condition", "modulename": "sqlglot.expressions", "qualname": "condition", "kind": "function", "doc": "

Initialize a logical condition expression.

\n\n
Example:
\n\n
\n
\n
>>> condition("x=1").sql()\n'x = 1'\n
\n
\n \n

This is helpful for composing larger logical syntax trees:

\n \n
\n
>>> where = condition("x=1")\n>>> where = where.and_("y=1")\n>>> Select().from_("tbl").select("*").where(where).sql()\n'SELECT * FROM tbl WHERE x = 1 AND y = 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expression: the SQL code string to parse.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • dialect: the dialect used to parse the input expression (in the case that the\ninput expression is a SQL string).
  • \n
  • copy: Whether or not to copy expression (only applies to expressions).
  • \n
  • **opts: other options to use to parse the input expressions (again, in the case\nthat the input expression is a SQL string).
  • \n
\n\n
Returns:
\n\n
\n

The new Condition instance

\n
\n", "signature": "(\texpression: Union[str, sqlglot.expressions.Expression],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Condition:", "funcdef": "def"}, "sqlglot.expressions.and_": {"fullname": "sqlglot.expressions.and_", "modulename": "sqlglot.expressions", "qualname": "and_", "kind": "function", "doc": "

Combine multiple conditions with an AND logical operator.

\n\n
Example:
\n\n
\n
\n
>>> and_("x=1", and_("y=1", "z=1")).sql()\n'x = 1 AND (y = 1 AND z = 1)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: whether or not to copy expressions (only applies to Expressions).
  • \n
  • **opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

And: the new condition

\n
\n", "signature": "(\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Condition:", "funcdef": "def"}, "sqlglot.expressions.or_": {"fullname": "sqlglot.expressions.or_", "modulename": "sqlglot.expressions", "qualname": "or_", "kind": "function", "doc": "

Combine multiple conditions with an OR logical operator.

\n\n
Example:
\n\n
\n
\n
>>> or_("x=1", or_("y=1", "z=1")).sql()\n'x = 1 OR (y = 1 OR z = 1)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions: the SQL code strings to parse.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: whether or not to copy expressions (only applies to Expressions).
  • \n
  • **opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Or: the new condition

\n
\n", "signature": "(\t*expressions: Union[str, sqlglot.expressions.Expression, NoneType],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Condition:", "funcdef": "def"}, "sqlglot.expressions.not_": {"fullname": "sqlglot.expressions.not_", "modulename": "sqlglot.expressions", "qualname": "not_", "kind": "function", "doc": "

Wrap a condition with a NOT operator.

\n\n
Example:
\n\n
\n
\n
>>> not_("this_suit='black'").sql()\n"NOT this_suit = 'black'"\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code string to parse.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: whether to copy the expression or not.
  • \n
  • **opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

The new condition.

\n
\n", "signature": "(\texpression: Union[str, sqlglot.expressions.Expression],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts) -> sqlglot.expressions.Not:", "funcdef": "def"}, "sqlglot.expressions.paren": {"fullname": "sqlglot.expressions.paren", "modulename": "sqlglot.expressions", "qualname": "paren", "kind": "function", "doc": "

Wrap an expression in parentheses.

\n\n
Example:
\n\n
\n
\n
>>> paren("5 + 3").sql()\n'(5 + 3)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code string to parse.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • copy: whether to copy the expression or not.
  • \n
\n\n
Returns:
\n\n
\n

The wrapped expression.

\n
\n", "signature": "(\texpression: Union[str, sqlglot.expressions.Expression],\tcopy: bool = True) -> sqlglot.expressions.Paren:", "funcdef": "def"}, "sqlglot.expressions.to_identifier": {"fullname": "sqlglot.expressions.to_identifier", "modulename": "sqlglot.expressions", "qualname": "to_identifier", "kind": "function", "doc": "

Builds an identifier.

\n\n
Arguments:
\n\n
    \n
  • name: The name to turn into an identifier.
  • \n
  • quoted: Whether or not force quote the identifier.
  • \n
  • copy: Whether or not to copy a passed in Identefier node.
  • \n
\n\n
Returns:
\n\n
\n

The identifier ast node.

\n
\n", "signature": "(name, quoted=None, copy=True):", "funcdef": "def"}, "sqlglot.expressions.to_interval": {"fullname": "sqlglot.expressions.to_interval", "modulename": "sqlglot.expressions", "qualname": "to_interval", "kind": "function", "doc": "

Builds an interval expression from a string like '1 day' or '5 months'.

\n", "signature": "(\tinterval: str | sqlglot.expressions.Literal) -> sqlglot.expressions.Interval:", "funcdef": "def"}, "sqlglot.expressions.to_table": {"fullname": "sqlglot.expressions.to_table", "modulename": "sqlglot.expressions", "qualname": "to_table", "kind": "function", "doc": "

Create a table expression from a [catalog].[schema].[table] sql path. Catalog and schema are optional.\nIf a table is passed in then that table is returned.

\n\n
Arguments:
\n\n
    \n
  • sql_path: a [catalog].[schema].[table] string.
  • \n
  • dialect: the source dialect according to which the table name will be parsed.
  • \n
  • kwargs: the kwargs to instantiate the resulting Table expression with.
  • \n
\n\n
Returns:
\n\n
\n

A table expression.

\n
\n", "signature": "(\tsql_path: Union[str, sqlglot.expressions.Table, NoneType],\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**kwargs) -> Optional[sqlglot.expressions.Table]:", "funcdef": "def"}, "sqlglot.expressions.to_column": {"fullname": "sqlglot.expressions.to_column", "modulename": "sqlglot.expressions", "qualname": "to_column", "kind": "function", "doc": "

Create a column from a [table].[column] sql path. Schema is optional.

\n\n

If a column is passed in then that column is returned.

\n\n
Arguments:
\n\n
    \n
  • sql_path: [table].[column] string
  • \n
\n\n
Returns:
\n\n
\n

Table: A column expression

\n
\n", "signature": "(\tsql_path: str | sqlglot.expressions.Column,\t**kwargs) -> sqlglot.expressions.Column:", "funcdef": "def"}, "sqlglot.expressions.alias_": {"fullname": "sqlglot.expressions.alias_", "modulename": "sqlglot.expressions", "qualname": "alias_", "kind": "function", "doc": "

Create an Alias expression.

\n\n
Example:
\n\n
\n
\n
>>> alias_('foo', 'bar').sql()\n'foo AS bar'\n
\n
\n \n
\n
>>> alias_('(select 1, 2)', 'bar', table=['a', 'b']).sql()\n'(SELECT 1, 2) AS bar(a, b)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code strings to parse.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • alias: the alias name to use. If the name has\nspecial characters it is quoted.
  • \n
  • table: Whether or not to create a table alias, can also be a list of columns.
  • \n
  • quoted: whether or not to quote the alias
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • copy: Whether or not to copy the expression.
  • \n
  • **opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Alias: the aliased expression

\n
\n", "signature": "(\texpression: Union[str, sqlglot.expressions.Expression],\talias: str | sqlglot.expressions.Identifier,\ttable: Union[bool, Sequence[str | sqlglot.expressions.Identifier]] = False,\tquoted: Optional[bool] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tcopy: bool = True,\t**opts):", "funcdef": "def"}, "sqlglot.expressions.subquery": {"fullname": "sqlglot.expressions.subquery", "modulename": "sqlglot.expressions", "qualname": "subquery", "kind": "function", "doc": "

Build a subquery expression.

\n\n
Example:
\n\n
\n
\n
>>> subquery('select x from tbl', 'bar').select('x').sql()\n'SELECT x FROM (SELECT x FROM tbl) AS bar'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code strings to parse.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • alias: the alias name to use.
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • **opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

A new Select instance with the subquery expression included.

\n
\n", "signature": "(\texpression: Union[str, sqlglot.expressions.Expression],\talias: Union[sqlglot.expressions.Identifier, str, NoneType] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.column": {"fullname": "sqlglot.expressions.column", "modulename": "sqlglot.expressions", "qualname": "column", "kind": "function", "doc": "

Build a Column.

\n\n
Arguments:
\n\n
    \n
  • col: Column name.
  • \n
  • table: Table name.
  • \n
  • db: Database name.
  • \n
  • catalog: Catalog name.
  • \n
  • quoted: Whether to force quotes on the column's identifiers.
  • \n
\n\n
Returns:
\n\n
\n

The new Column instance.

\n
\n", "signature": "(\tcol: str | sqlglot.expressions.Identifier,\ttable: Union[sqlglot.expressions.Identifier, str, NoneType] = None,\tdb: Union[sqlglot.expressions.Identifier, str, NoneType] = None,\tcatalog: Union[sqlglot.expressions.Identifier, str, NoneType] = None,\tquoted: Optional[bool] = None) -> sqlglot.expressions.Column:", "funcdef": "def"}, "sqlglot.expressions.cast": {"fullname": "sqlglot.expressions.cast", "modulename": "sqlglot.expressions", "qualname": "cast", "kind": "function", "doc": "

Cast an expression to a data type.

\n\n
Example:
\n\n
\n
\n
>>> cast('x + 1', 'int').sql()\n'CAST(x + 1 AS INT)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: The expression to cast.
  • \n
  • to: The datatype to cast to.
  • \n
\n\n
Returns:
\n\n
\n

The new Cast instance.

\n
\n", "signature": "(\texpression: Union[str, sqlglot.expressions.Expression],\tto: str | sqlglot.expressions.DataType | sqlglot.expressions.DataType.Type,\t**opts) -> sqlglot.expressions.Cast:", "funcdef": "def"}, "sqlglot.expressions.table_": {"fullname": "sqlglot.expressions.table_", "modulename": "sqlglot.expressions", "qualname": "table_", "kind": "function", "doc": "

Build a Table.

\n\n
Arguments:
\n\n
    \n
  • table: Table name.
  • \n
  • db: Database name.
  • \n
  • catalog: Catalog name.
  • \n
  • quote: Whether to force quotes on the table's identifiers.
  • \n
  • alias: Table's alias.
  • \n
\n\n
Returns:
\n\n
\n

The new Table instance.

\n
\n", "signature": "(\ttable: sqlglot.expressions.Identifier | str,\tdb: Union[sqlglot.expressions.Identifier, str, NoneType] = None,\tcatalog: Union[sqlglot.expressions.Identifier, str, NoneType] = None,\tquoted: Optional[bool] = None,\talias: Union[sqlglot.expressions.Identifier, str, NoneType] = None) -> sqlglot.expressions.Table:", "funcdef": "def"}, "sqlglot.expressions.values": {"fullname": "sqlglot.expressions.values", "modulename": "sqlglot.expressions", "qualname": "values", "kind": "function", "doc": "

Build VALUES statement.

\n\n
Example:
\n\n
\n
\n
>>> values([(1, '2')]).sql()\n"VALUES (1, '2')"\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • values: values statements that will be converted to SQL
  • \n
  • alias: optional alias
  • \n
  • columns: Optional list of ordered column names or ordered dictionary of column names to types.\nIf either are provided then an alias is also required.
  • \n
\n\n
Returns:
\n\n
\n

Values: the Values expression object

\n
\n", "signature": "(\tvalues: Iterable[Tuple[Any, ...]],\talias: Optional[str] = None,\tcolumns: Union[Iterable[str], Dict[str, sqlglot.expressions.DataType], NoneType] = None) -> sqlglot.expressions.Values:", "funcdef": "def"}, "sqlglot.expressions.var": {"fullname": "sqlglot.expressions.var", "modulename": "sqlglot.expressions", "qualname": "var", "kind": "function", "doc": "

Build a SQL variable.

\n\n
Example:
\n\n
\n
\n
>>> repr(var('x'))\n'(VAR this: x)'\n
\n
\n \n
\n
>>> repr(var(column('x', table='y')))\n'(VAR this: x)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • name: The name of the var or an expression who's name will become the var.
  • \n
\n\n
Returns:
\n\n
\n

The new variable node.

\n
\n", "signature": "(\tname: Union[str, sqlglot.expressions.Expression, NoneType]) -> sqlglot.expressions.Var:", "funcdef": "def"}, "sqlglot.expressions.rename_table": {"fullname": "sqlglot.expressions.rename_table", "modulename": "sqlglot.expressions", "qualname": "rename_table", "kind": "function", "doc": "

Build ALTER TABLE... RENAME... expression

\n\n
Arguments:
\n\n
    \n
  • old_name: The old name of the table
  • \n
  • new_name: The new name of the table
  • \n
\n\n
Returns:
\n\n
\n

Alter table expression

\n
\n", "signature": "(\told_name: str | sqlglot.expressions.Table,\tnew_name: str | sqlglot.expressions.Table) -> sqlglot.expressions.AlterTable:", "funcdef": "def"}, "sqlglot.expressions.convert": {"fullname": "sqlglot.expressions.convert", "modulename": "sqlglot.expressions", "qualname": "convert", "kind": "function", "doc": "

Convert a python value into an expression object.

\n\n

Raises an error if a conversion is not possible.

\n\n
Arguments:
\n\n
    \n
  • value: A python object.
  • \n
  • copy: Whether or not to copy value (only applies to Expressions and collections).
  • \n
\n\n
Returns:
\n\n
\n

Expression: the equivalent expression object.

\n
\n", "signature": "(value: Any, copy: bool = False) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.expressions.replace_children": {"fullname": "sqlglot.expressions.replace_children", "modulename": "sqlglot.expressions", "qualname": "replace_children", "kind": "function", "doc": "

Replace children of an expression with the result of a lambda fun(child) -> exp.

\n", "signature": "(\texpression: sqlglot.expressions.Expression,\tfun: Callable,\t*args,\t**kwargs) -> None:", "funcdef": "def"}, "sqlglot.expressions.column_table_names": {"fullname": "sqlglot.expressions.column_table_names", "modulename": "sqlglot.expressions", "qualname": "column_table_names", "kind": "function", "doc": "

Return all table names referenced through columns in an expression.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> column_table_names(sqlglot.parse_one("a.b AND c.d AND c.e"))\n['c', 'a']\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: expression to find table names.
  • \n
\n\n
Returns:
\n\n
\n

A list of unique names.

\n
\n", "signature": "(expression: sqlglot.expressions.Expression) -> List[str]:", "funcdef": "def"}, "sqlglot.expressions.table_name": {"fullname": "sqlglot.expressions.table_name", "modulename": "sqlglot.expressions", "qualname": "table_name", "kind": "function", "doc": "

Get the full name of a table as a string.

\n\n
Arguments:
\n\n
    \n
  • table: table expression node or string.
  • \n
\n\n
Examples:
\n\n
\n
\n
>>> from sqlglot import exp, parse_one\n>>> table_name(parse_one("select * from a.b.c").find(exp.Table))\n'a.b.c'\n
\n
\n
\n\n
Returns:
\n\n
\n

The table name.

\n
\n", "signature": "(table: sqlglot.expressions.Table | str) -> str:", "funcdef": "def"}, "sqlglot.expressions.replace_tables": {"fullname": "sqlglot.expressions.replace_tables", "modulename": "sqlglot.expressions", "qualname": "replace_tables", "kind": "function", "doc": "

Replace all tables in expression according to the mapping.

\n\n
Arguments:
\n\n
    \n
  • expression: expression node to be transformed and replaced.
  • \n
  • mapping: mapping of table names.
  • \n
  • copy: whether or not to copy the expression.
  • \n
\n\n
Examples:
\n\n
\n
\n
>>> from sqlglot import exp, parse_one\n>>> replace_tables(parse_one("select * from a.b"), {"a.b": "c"}).sql()\n'SELECT * FROM c'\n
\n
\n
\n\n
Returns:
\n\n
\n

The mapped expression.

\n
\n", "signature": "(expression: ~E, mapping: Dict[str, str], copy: bool = True) -> ~E:", "funcdef": "def"}, "sqlglot.expressions.replace_placeholders": {"fullname": "sqlglot.expressions.replace_placeholders", "modulename": "sqlglot.expressions", "qualname": "replace_placeholders", "kind": "function", "doc": "

Replace placeholders in an expression.

\n\n
Arguments:
\n\n
    \n
  • expression: expression node to be transformed and replaced.
  • \n
  • args: positional names that will substitute unnamed placeholders in the given order.
  • \n
  • kwargs: keyword arguments that will substitute named placeholders.
  • \n
\n\n
Examples:
\n\n
\n
\n
>>> from sqlglot import exp, parse_one\n>>> replace_placeholders(\n...     parse_one("select * from :tbl where ? = ?"),\n...     exp.to_identifier("str_col"), "b", tbl=exp.to_identifier("foo")\n... ).sql()\n"SELECT * FROM foo WHERE str_col = 'b'"\n
\n
\n
\n\n
Returns:
\n\n
\n

The mapped expression.

\n
\n", "signature": "(\texpression: sqlglot.expressions.Expression,\t*args,\t**kwargs) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.expressions.expand": {"fullname": "sqlglot.expressions.expand", "modulename": "sqlglot.expressions", "qualname": "expand", "kind": "function", "doc": "

Transforms an expression by expanding all referenced sources into subqueries.

\n\n
Examples:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y")}).sql()\n'SELECT * FROM (SELECT * FROM y) AS z /* source: x */'\n
\n
\n \n
\n
>>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y"), "y": parse_one("select * from z")}).sql()\n'SELECT * FROM (SELECT * FROM (SELECT * FROM z) AS y /* source: y */) AS z /* source: x */'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: The expression to expand.
  • \n
  • sources: A dictionary of name to Subqueryables.
  • \n
  • copy: Whether or not to copy the expression during transformation. Defaults to True.
  • \n
\n\n
Returns:
\n\n
\n

The transformed expression.

\n
\n", "signature": "(\texpression: sqlglot.expressions.Expression,\tsources: Dict[str, sqlglot.expressions.Subqueryable],\tcopy: bool = True) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.expressions.func": {"fullname": "sqlglot.expressions.func", "modulename": "sqlglot.expressions", "qualname": "func", "kind": "function", "doc": "

Returns a Func expression.

\n\n
Examples:
\n\n
\n
\n
>>> func("abs", 5).sql()\n'ABS(5)'\n
\n
\n \n
\n
>>> func("cast", this=5, to=DataType.build("DOUBLE")).sql()\n'CAST(5 AS DOUBLE)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • name: the name of the function to build.
  • \n
  • args: the args used to instantiate the function of interest.
  • \n
  • dialect: the source dialect.
  • \n
  • kwargs: the kwargs used to instantiate the function of interest.
  • \n
\n\n
Note:
\n\n
\n

The arguments args and kwargs are mutually exclusive.

\n
\n\n
Returns:
\n\n
\n

An instance of the function of interest, or an anonymous function, if name doesn't\n correspond to an existing sqlglot.expressions.Func class.

\n
\n", "signature": "(\tname: str,\t*args,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**kwargs) -> sqlglot.expressions.Func:", "funcdef": "def"}, "sqlglot.expressions.true": {"fullname": "sqlglot.expressions.true", "modulename": "sqlglot.expressions", "qualname": "true", "kind": "function", "doc": "

Returns a true Boolean expression.

\n", "signature": "() -> sqlglot.expressions.Boolean:", "funcdef": "def"}, "sqlglot.expressions.false": {"fullname": "sqlglot.expressions.false", "modulename": "sqlglot.expressions", "qualname": "false", "kind": "function", "doc": "

Returns a false Boolean expression.

\n", "signature": "() -> sqlglot.expressions.Boolean:", "funcdef": "def"}, "sqlglot.expressions.null": {"fullname": "sqlglot.expressions.null", "modulename": "sqlglot.expressions", "qualname": "null", "kind": "function", "doc": "

Returns a Null expression.

\n", "signature": "() -> sqlglot.expressions.Null:", "funcdef": "def"}, "sqlglot.generator": {"fullname": "sqlglot.generator", "modulename": "sqlglot.generator", "kind": "module", "doc": "

\n"}, "sqlglot.generator.Generator": {"fullname": "sqlglot.generator.Generator", "modulename": "sqlglot.generator", "qualname": "Generator", "kind": "class", "doc": "

Generator converts a given syntax tree to the corresponding SQL string.

\n\n
Arguments:
\n\n
    \n
  • pretty: Whether or not to format the produced SQL string.\nDefault: False.
  • \n
  • identify: Determines when an identifier should be quoted. Possible values are:\nFalse (default): Never quote, except in cases where it's mandatory by the dialect.\nTrue or 'always': Always quote.\n'safe': Only quote identifiers that are case insensitive.
  • \n
  • normalize: Whether or not to normalize identifiers to lowercase.\nDefault: False.
  • \n
  • pad: Determines the pad size in a formatted string.\nDefault: 2.
  • \n
  • indent: Determines the indentation size in a formatted string.\nDefault: 2.
  • \n
  • normalize_functions: Whether or not to normalize all function names. Possible values are:\n\"upper\" or True (default): Convert names to uppercase.\n\"lower\": Convert names to lowercase.\nFalse: Disables function name normalization.
  • \n
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.\nDefault ErrorLevel.WARN.
  • \n
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions.\nThis is only relevant when generating in pretty mode.\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n"}, "sqlglot.generator.Generator.__init__": {"fullname": "sqlglot.generator.Generator.__init__", "modulename": "sqlglot.generator", "qualname": "Generator.__init__", "kind": "function", "doc": "

\n", "signature": "(\tpretty: Optional[bool] = None,\tidentify: str | bool = False,\tnormalize: bool = False,\tpad: int = 2,\tindent: int = 2,\tnormalize_functions: Union[str, bool, NoneType] = None,\tunsupported_level: sqlglot.errors.ErrorLevel = <ErrorLevel.WARN: 'WARN'>,\tmax_unsupported: int = 3,\tleading_comma: bool = False,\tmax_text_width: int = 80,\tcomments: bool = True)"}, "sqlglot.generator.Generator.generate": {"fullname": "sqlglot.generator.Generator.generate", "modulename": "sqlglot.generator", "qualname": "Generator.generate", "kind": "function", "doc": "

Generates the SQL string corresponding to the given syntax tree.

\n\n
Arguments:
\n\n
    \n
  • expression: The syntax tree.
  • \n
  • cache: An optional sql string cache. This leverages the hash of an Expression\nwhich can be slow to compute, so only use it if you set _hash on each node.
  • \n
\n\n
Returns:
\n\n
\n

The SQL string corresponding to expression.

\n
\n", "signature": "(\tself,\texpression: Optional[sqlglot.expressions.Expression],\tcache: Optional[Dict[int, str]] = None) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.unsupported": {"fullname": "sqlglot.generator.Generator.unsupported", "modulename": "sqlglot.generator", "qualname": "Generator.unsupported", "kind": "function", "doc": "

\n", "signature": "(self, message: str) -> None:", "funcdef": "def"}, "sqlglot.generator.Generator.sep": {"fullname": "sqlglot.generator.Generator.sep", "modulename": "sqlglot.generator", "qualname": "Generator.sep", "kind": "function", "doc": "

\n", "signature": "(self, sep: str = ' ') -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.seg": {"fullname": "sqlglot.generator.Generator.seg", "modulename": "sqlglot.generator", "qualname": "Generator.seg", "kind": "function", "doc": "

\n", "signature": "(self, sql: str, sep: str = ' ') -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.pad_comment": {"fullname": "sqlglot.generator.Generator.pad_comment", "modulename": "sqlglot.generator", "qualname": "Generator.pad_comment", "kind": "function", "doc": "

\n", "signature": "(self, comment: str) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.maybe_comment": {"fullname": "sqlglot.generator.Generator.maybe_comment", "modulename": "sqlglot.generator", "qualname": "Generator.maybe_comment", "kind": "function", "doc": "

\n", "signature": "(\tself,\tsql: str,\texpression: Optional[sqlglot.expressions.Expression] = None,\tcomments: Optional[List[str]] = None) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.wrap": {"fullname": "sqlglot.generator.Generator.wrap", "modulename": "sqlglot.generator", "qualname": "Generator.wrap", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Expression | str) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.no_identify": {"fullname": "sqlglot.generator.Generator.no_identify", "modulename": "sqlglot.generator", "qualname": "Generator.no_identify", "kind": "function", "doc": "

\n", "signature": "(self, func: Callable[..., str], *args, **kwargs) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.normalize_func": {"fullname": "sqlglot.generator.Generator.normalize_func", "modulename": "sqlglot.generator", "qualname": "Generator.normalize_func", "kind": "function", "doc": "

\n", "signature": "(self, name: str) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.indent": {"fullname": "sqlglot.generator.Generator.indent", "modulename": "sqlglot.generator", "qualname": "Generator.indent", "kind": "function", "doc": "

\n", "signature": "(\tself,\tsql: str,\tlevel: int = 0,\tpad: Optional[int] = None,\tskip_first: bool = False,\tskip_last: bool = False) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.sql": {"fullname": "sqlglot.generator.Generator.sql", "modulename": "sqlglot.generator", "qualname": "Generator.sql", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: Union[str, sqlglot.expressions.Expression, NoneType],\tkey: Optional[str] = None,\tcomment: bool = True) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.uncache_sql": {"fullname": "sqlglot.generator.Generator.uncache_sql", "modulename": "sqlglot.generator", "qualname": "Generator.uncache_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Uncache) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.cache_sql": {"fullname": "sqlglot.generator.Generator.cache_sql", "modulename": "sqlglot.generator", "qualname": "Generator.cache_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Cache) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.characterset_sql": {"fullname": "sqlglot.generator.Generator.characterset_sql", "modulename": "sqlglot.generator", "qualname": "Generator.characterset_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.CharacterSet) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.column_sql": {"fullname": "sqlglot.generator.Generator.column_sql", "modulename": "sqlglot.generator", "qualname": "Generator.column_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Column) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.columnposition_sql": {"fullname": "sqlglot.generator.Generator.columnposition_sql", "modulename": "sqlglot.generator", "qualname": "Generator.columnposition_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ColumnPosition) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.columndef_sql": {"fullname": "sqlglot.generator.Generator.columndef_sql", "modulename": "sqlglot.generator", "qualname": "Generator.columndef_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ColumnDef, sep: str = ' ') -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.columnconstraint_sql": {"fullname": "sqlglot.generator.Generator.columnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.columnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"fullname": "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.autoincrementcolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, _) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"fullname": "sqlglot.generator.Generator.compresscolumnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.compresscolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.CompressColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"fullname": "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.generatedasidentitycolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"fullname": "sqlglot.generator.Generator.notnullcolumnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.notnullcolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.NotNullColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"fullname": "sqlglot.generator.Generator.primarykeycolumnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.primarykeycolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.PrimaryKeyColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"fullname": "sqlglot.generator.Generator.uniquecolumnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.uniquecolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.UniqueColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.createable_sql": {"fullname": "sqlglot.generator.Generator.createable_sql", "modulename": "sqlglot.generator", "qualname": "Generator.createable_sql", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: sqlglot.expressions.Create,\tlocations: dict[sqlglot.expressions.Properties.Location, list[sqlglot.expressions.Property]]) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.create_sql": {"fullname": "sqlglot.generator.Generator.create_sql", "modulename": "sqlglot.generator", "qualname": "Generator.create_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Create) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.clone_sql": {"fullname": "sqlglot.generator.Generator.clone_sql", "modulename": "sqlglot.generator", "qualname": "Generator.clone_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Clone) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.describe_sql": {"fullname": "sqlglot.generator.Generator.describe_sql", "modulename": "sqlglot.generator", "qualname": "Generator.describe_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Describe) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.prepend_ctes": {"fullname": "sqlglot.generator.Generator.prepend_ctes", "modulename": "sqlglot.generator", "qualname": "Generator.prepend_ctes", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Expression, sql: str) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.with_sql": {"fullname": "sqlglot.generator.Generator.with_sql", "modulename": "sqlglot.generator", "qualname": "Generator.with_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.With) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.cte_sql": {"fullname": "sqlglot.generator.Generator.cte_sql", "modulename": "sqlglot.generator", "qualname": "Generator.cte_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.CTE) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.tablealias_sql": {"fullname": "sqlglot.generator.Generator.tablealias_sql", "modulename": "sqlglot.generator", "qualname": "Generator.tablealias_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.TableAlias) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bitstring_sql": {"fullname": "sqlglot.generator.Generator.bitstring_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bitstring_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.BitString) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.hexstring_sql": {"fullname": "sqlglot.generator.Generator.hexstring_sql", "modulename": "sqlglot.generator", "qualname": "Generator.hexstring_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.HexString) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bytestring_sql": {"fullname": "sqlglot.generator.Generator.bytestring_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bytestring_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ByteString) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.rawstring_sql": {"fullname": "sqlglot.generator.Generator.rawstring_sql", "modulename": "sqlglot.generator", "qualname": "Generator.rawstring_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.RawString) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.datatypesize_sql": {"fullname": "sqlglot.generator.Generator.datatypesize_sql", "modulename": "sqlglot.generator", "qualname": "Generator.datatypesize_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DataTypeSize) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.datatype_sql": {"fullname": "sqlglot.generator.Generator.datatype_sql", "modulename": "sqlglot.generator", "qualname": "Generator.datatype_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DataType) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.directory_sql": {"fullname": "sqlglot.generator.Generator.directory_sql", "modulename": "sqlglot.generator", "qualname": "Generator.directory_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Directory) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.delete_sql": {"fullname": "sqlglot.generator.Generator.delete_sql", "modulename": "sqlglot.generator", "qualname": "Generator.delete_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Delete) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.drop_sql": {"fullname": "sqlglot.generator.Generator.drop_sql", "modulename": "sqlglot.generator", "qualname": "Generator.drop_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Drop) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.except_sql": {"fullname": "sqlglot.generator.Generator.except_sql", "modulename": "sqlglot.generator", "qualname": "Generator.except_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Except) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.except_op": {"fullname": "sqlglot.generator.Generator.except_op", "modulename": "sqlglot.generator", "qualname": "Generator.except_op", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Except) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.fetch_sql": {"fullname": "sqlglot.generator.Generator.fetch_sql", "modulename": "sqlglot.generator", "qualname": "Generator.fetch_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Fetch) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.filter_sql": {"fullname": "sqlglot.generator.Generator.filter_sql", "modulename": "sqlglot.generator", "qualname": "Generator.filter_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Filter) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.hint_sql": {"fullname": "sqlglot.generator.Generator.hint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.hint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Hint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.index_sql": {"fullname": "sqlglot.generator.Generator.index_sql", "modulename": "sqlglot.generator", "qualname": "Generator.index_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Index) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.identifier_sql": {"fullname": "sqlglot.generator.Generator.identifier_sql", "modulename": "sqlglot.generator", "qualname": "Generator.identifier_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Identifier) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.inputoutputformat_sql": {"fullname": "sqlglot.generator.Generator.inputoutputformat_sql", "modulename": "sqlglot.generator", "qualname": "Generator.inputoutputformat_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.InputOutputFormat) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.national_sql": {"fullname": "sqlglot.generator.Generator.national_sql", "modulename": "sqlglot.generator", "qualname": "Generator.national_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.National, prefix: str = 'N') -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.partition_sql": {"fullname": "sqlglot.generator.Generator.partition_sql", "modulename": "sqlglot.generator", "qualname": "Generator.partition_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Partition) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.properties_sql": {"fullname": "sqlglot.generator.Generator.properties_sql", "modulename": "sqlglot.generator", "qualname": "Generator.properties_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Properties) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.root_properties": {"fullname": "sqlglot.generator.Generator.root_properties", "modulename": "sqlglot.generator", "qualname": "Generator.root_properties", "kind": "function", "doc": "

\n", "signature": "(self, properties: sqlglot.expressions.Properties) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.properties": {"fullname": "sqlglot.generator.Generator.properties", "modulename": "sqlglot.generator", "qualname": "Generator.properties", "kind": "function", "doc": "

\n", "signature": "(\tself,\tproperties: sqlglot.expressions.Properties,\tprefix: str = '',\tsep: str = ', ',\tsuffix: str = '',\twrapped: bool = True) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.with_properties": {"fullname": "sqlglot.generator.Generator.with_properties", "modulename": "sqlglot.generator", "qualname": "Generator.with_properties", "kind": "function", "doc": "

\n", "signature": "(self, properties: sqlglot.expressions.Properties) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.locate_properties": {"fullname": "sqlglot.generator.Generator.locate_properties", "modulename": "sqlglot.generator", "qualname": "Generator.locate_properties", "kind": "function", "doc": "

\n", "signature": "(\tself,\tproperties: sqlglot.expressions.Properties) -> Dict[sqlglot.expressions.Properties.Location, list[sqlglot.expressions.Property]]:", "funcdef": "def"}, "sqlglot.generator.Generator.property_sql": {"fullname": "sqlglot.generator.Generator.property_sql", "modulename": "sqlglot.generator", "qualname": "Generator.property_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Property) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.likeproperty_sql": {"fullname": "sqlglot.generator.Generator.likeproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.likeproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.LikeProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.fallbackproperty_sql": {"fullname": "sqlglot.generator.Generator.fallbackproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.fallbackproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.FallbackProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.journalproperty_sql": {"fullname": "sqlglot.generator.Generator.journalproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.journalproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.JournalProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.freespaceproperty_sql": {"fullname": "sqlglot.generator.Generator.freespaceproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.freespaceproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.FreespaceProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.checksumproperty_sql": {"fullname": "sqlglot.generator.Generator.checksumproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.checksumproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ChecksumProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"fullname": "sqlglot.generator.Generator.mergeblockratioproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.mergeblockratioproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.MergeBlockRatioProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"fullname": "sqlglot.generator.Generator.datablocksizeproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.datablocksizeproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DataBlocksizeProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"fullname": "sqlglot.generator.Generator.blockcompressionproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.blockcompressionproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.BlockCompressionProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"fullname": "sqlglot.generator.Generator.isolatedloadingproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.isolatedloadingproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.IsolatedLoadingProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.lockingproperty_sql": {"fullname": "sqlglot.generator.Generator.lockingproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.lockingproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.LockingProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.withdataproperty_sql": {"fullname": "sqlglot.generator.Generator.withdataproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.withdataproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.WithDataProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.insert_sql": {"fullname": "sqlglot.generator.Generator.insert_sql", "modulename": "sqlglot.generator", "qualname": "Generator.insert_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Insert) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.intersect_sql": {"fullname": "sqlglot.generator.Generator.intersect_sql", "modulename": "sqlglot.generator", "qualname": "Generator.intersect_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Intersect) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.intersect_op": {"fullname": "sqlglot.generator.Generator.intersect_op", "modulename": "sqlglot.generator", "qualname": "Generator.intersect_op", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Intersect) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.introducer_sql": {"fullname": "sqlglot.generator.Generator.introducer_sql", "modulename": "sqlglot.generator", "qualname": "Generator.introducer_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Introducer) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.pseudotype_sql": {"fullname": "sqlglot.generator.Generator.pseudotype_sql", "modulename": "sqlglot.generator", "qualname": "Generator.pseudotype_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.PseudoType) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.onconflict_sql": {"fullname": "sqlglot.generator.Generator.onconflict_sql", "modulename": "sqlglot.generator", "qualname": "Generator.onconflict_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.OnConflict) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.returning_sql": {"fullname": "sqlglot.generator.Generator.returning_sql", "modulename": "sqlglot.generator", "qualname": "Generator.returning_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Returning) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"fullname": "sqlglot.generator.Generator.rowformatdelimitedproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.rowformatdelimitedproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.RowFormatDelimitedProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.table_sql": {"fullname": "sqlglot.generator.Generator.table_sql", "modulename": "sqlglot.generator", "qualname": "Generator.table_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Table, sep: str = ' AS ') -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.tablesample_sql": {"fullname": "sqlglot.generator.Generator.tablesample_sql", "modulename": "sqlglot.generator", "qualname": "Generator.tablesample_sql", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: sqlglot.expressions.TableSample,\tseed_prefix: str = 'SEED',\tsep=' AS ') -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.pivot_sql": {"fullname": "sqlglot.generator.Generator.pivot_sql", "modulename": "sqlglot.generator", "qualname": "Generator.pivot_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Pivot) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.tuple_sql": {"fullname": "sqlglot.generator.Generator.tuple_sql", "modulename": "sqlglot.generator", "qualname": "Generator.tuple_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Tuple) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.update_sql": {"fullname": "sqlglot.generator.Generator.update_sql", "modulename": "sqlglot.generator", "qualname": "Generator.update_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Update) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.values_sql": {"fullname": "sqlglot.generator.Generator.values_sql", "modulename": "sqlglot.generator", "qualname": "Generator.values_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Values) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.var_sql": {"fullname": "sqlglot.generator.Generator.var_sql", "modulename": "sqlglot.generator", "qualname": "Generator.var_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Var) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.into_sql": {"fullname": "sqlglot.generator.Generator.into_sql", "modulename": "sqlglot.generator", "qualname": "Generator.into_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Into) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.from_sql": {"fullname": "sqlglot.generator.Generator.from_sql", "modulename": "sqlglot.generator", "qualname": "Generator.from_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.From) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.group_sql": {"fullname": "sqlglot.generator.Generator.group_sql", "modulename": "sqlglot.generator", "qualname": "Generator.group_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Group) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.having_sql": {"fullname": "sqlglot.generator.Generator.having_sql", "modulename": "sqlglot.generator", "qualname": "Generator.having_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Having) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.join_sql": {"fullname": "sqlglot.generator.Generator.join_sql", "modulename": "sqlglot.generator", "qualname": "Generator.join_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Join) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.lambda_sql": {"fullname": "sqlglot.generator.Generator.lambda_sql", "modulename": "sqlglot.generator", "qualname": "Generator.lambda_sql", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: sqlglot.expressions.Lambda,\tarrow_sep: str = '->') -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.lateral_sql": {"fullname": "sqlglot.generator.Generator.lateral_sql", "modulename": "sqlglot.generator", "qualname": "Generator.lateral_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Lateral) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.limit_sql": {"fullname": "sqlglot.generator.Generator.limit_sql", "modulename": "sqlglot.generator", "qualname": "Generator.limit_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Limit) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.offset_sql": {"fullname": "sqlglot.generator.Generator.offset_sql", "modulename": "sqlglot.generator", "qualname": "Generator.offset_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Offset) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.setitem_sql": {"fullname": "sqlglot.generator.Generator.setitem_sql", "modulename": "sqlglot.generator", "qualname": "Generator.setitem_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.SetItem) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.set_sql": {"fullname": "sqlglot.generator.Generator.set_sql", "modulename": "sqlglot.generator", "qualname": "Generator.set_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Set) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.pragma_sql": {"fullname": "sqlglot.generator.Generator.pragma_sql", "modulename": "sqlglot.generator", "qualname": "Generator.pragma_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Pragma) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.lock_sql": {"fullname": "sqlglot.generator.Generator.lock_sql", "modulename": "sqlglot.generator", "qualname": "Generator.lock_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Lock) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.literal_sql": {"fullname": "sqlglot.generator.Generator.literal_sql", "modulename": "sqlglot.generator", "qualname": "Generator.literal_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Literal) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.loaddata_sql": {"fullname": "sqlglot.generator.Generator.loaddata_sql", "modulename": "sqlglot.generator", "qualname": "Generator.loaddata_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.LoadData) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.null_sql": {"fullname": "sqlglot.generator.Generator.null_sql", "modulename": "sqlglot.generator", "qualname": "Generator.null_sql", "kind": "function", "doc": "

\n", "signature": "(self, *_) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.boolean_sql": {"fullname": "sqlglot.generator.Generator.boolean_sql", "modulename": "sqlglot.generator", "qualname": "Generator.boolean_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Boolean) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.order_sql": {"fullname": "sqlglot.generator.Generator.order_sql", "modulename": "sqlglot.generator", "qualname": "Generator.order_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Order, flat: bool = False) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.cluster_sql": {"fullname": "sqlglot.generator.Generator.cluster_sql", "modulename": "sqlglot.generator", "qualname": "Generator.cluster_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Cluster) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.distribute_sql": {"fullname": "sqlglot.generator.Generator.distribute_sql", "modulename": "sqlglot.generator", "qualname": "Generator.distribute_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Distribute) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.sort_sql": {"fullname": "sqlglot.generator.Generator.sort_sql", "modulename": "sqlglot.generator", "qualname": "Generator.sort_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Sort) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.ordered_sql": {"fullname": "sqlglot.generator.Generator.ordered_sql", "modulename": "sqlglot.generator", "qualname": "Generator.ordered_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Ordered) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.matchrecognize_sql": {"fullname": "sqlglot.generator.Generator.matchrecognize_sql", "modulename": "sqlglot.generator", "qualname": "Generator.matchrecognize_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.MatchRecognize) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.query_modifiers": {"fullname": "sqlglot.generator.Generator.query_modifiers", "modulename": "sqlglot.generator", "qualname": "Generator.query_modifiers", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Expression, *sqls: str) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.offset_limit_modifiers": {"fullname": "sqlglot.generator.Generator.offset_limit_modifiers", "modulename": "sqlglot.generator", "qualname": "Generator.offset_limit_modifiers", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: sqlglot.expressions.Expression,\tfetch: bool,\tlimit: Union[sqlglot.expressions.Fetch, sqlglot.expressions.Limit, NoneType]) -> List[str]:", "funcdef": "def"}, "sqlglot.generator.Generator.after_having_modifiers": {"fullname": "sqlglot.generator.Generator.after_having_modifiers", "modulename": "sqlglot.generator", "qualname": "Generator.after_having_modifiers", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Expression) -> List[str]:", "funcdef": "def"}, "sqlglot.generator.Generator.after_limit_modifiers": {"fullname": "sqlglot.generator.Generator.after_limit_modifiers", "modulename": "sqlglot.generator", "qualname": "Generator.after_limit_modifiers", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Expression) -> List[str]:", "funcdef": "def"}, "sqlglot.generator.Generator.select_sql": {"fullname": "sqlglot.generator.Generator.select_sql", "modulename": "sqlglot.generator", "qualname": "Generator.select_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Select) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.schema_sql": {"fullname": "sqlglot.generator.Generator.schema_sql", "modulename": "sqlglot.generator", "qualname": "Generator.schema_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Schema) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.schema_columns_sql": {"fullname": "sqlglot.generator.Generator.schema_columns_sql", "modulename": "sqlglot.generator", "qualname": "Generator.schema_columns_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Schema) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.star_sql": {"fullname": "sqlglot.generator.Generator.star_sql", "modulename": "sqlglot.generator", "qualname": "Generator.star_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Star) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.parameter_sql": {"fullname": "sqlglot.generator.Generator.parameter_sql", "modulename": "sqlglot.generator", "qualname": "Generator.parameter_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Parameter) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.sessionparameter_sql": {"fullname": "sqlglot.generator.Generator.sessionparameter_sql", "modulename": "sqlglot.generator", "qualname": "Generator.sessionparameter_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.SessionParameter) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.placeholder_sql": {"fullname": "sqlglot.generator.Generator.placeholder_sql", "modulename": "sqlglot.generator", "qualname": "Generator.placeholder_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Placeholder) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.subquery_sql": {"fullname": "sqlglot.generator.Generator.subquery_sql", "modulename": "sqlglot.generator", "qualname": "Generator.subquery_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Subquery, sep: str = ' AS ') -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.qualify_sql": {"fullname": "sqlglot.generator.Generator.qualify_sql", "modulename": "sqlglot.generator", "qualname": "Generator.qualify_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Qualify) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.union_sql": {"fullname": "sqlglot.generator.Generator.union_sql", "modulename": "sqlglot.generator", "qualname": "Generator.union_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Union) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.union_op": {"fullname": "sqlglot.generator.Generator.union_op", "modulename": "sqlglot.generator", "qualname": "Generator.union_op", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Union) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.unnest_sql": {"fullname": "sqlglot.generator.Generator.unnest_sql", "modulename": "sqlglot.generator", "qualname": "Generator.unnest_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Unnest) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.where_sql": {"fullname": "sqlglot.generator.Generator.where_sql", "modulename": "sqlglot.generator", "qualname": "Generator.where_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Where) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.window_sql": {"fullname": "sqlglot.generator.Generator.window_sql", "modulename": "sqlglot.generator", "qualname": "Generator.window_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Window) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.partition_by_sql": {"fullname": "sqlglot.generator.Generator.partition_by_sql", "modulename": "sqlglot.generator", "qualname": "Generator.partition_by_sql", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: sqlglot.expressions.Window | sqlglot.expressions.MatchRecognize) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.windowspec_sql": {"fullname": "sqlglot.generator.Generator.windowspec_sql", "modulename": "sqlglot.generator", "qualname": "Generator.windowspec_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.WindowSpec) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.withingroup_sql": {"fullname": "sqlglot.generator.Generator.withingroup_sql", "modulename": "sqlglot.generator", "qualname": "Generator.withingroup_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.WithinGroup) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.between_sql": {"fullname": "sqlglot.generator.Generator.between_sql", "modulename": "sqlglot.generator", "qualname": "Generator.between_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Between) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bracket_sql": {"fullname": "sqlglot.generator.Generator.bracket_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bracket_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Bracket) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.all_sql": {"fullname": "sqlglot.generator.Generator.all_sql", "modulename": "sqlglot.generator", "qualname": "Generator.all_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.All) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.any_sql": {"fullname": "sqlglot.generator.Generator.any_sql", "modulename": "sqlglot.generator", "qualname": "Generator.any_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Any) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.exists_sql": {"fullname": "sqlglot.generator.Generator.exists_sql", "modulename": "sqlglot.generator", "qualname": "Generator.exists_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Exists) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.case_sql": {"fullname": "sqlglot.generator.Generator.case_sql", "modulename": "sqlglot.generator", "qualname": "Generator.case_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Case) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.constraint_sql": {"fullname": "sqlglot.generator.Generator.constraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.constraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Constraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.nextvaluefor_sql": {"fullname": "sqlglot.generator.Generator.nextvaluefor_sql", "modulename": "sqlglot.generator", "qualname": "Generator.nextvaluefor_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.NextValueFor) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.extract_sql": {"fullname": "sqlglot.generator.Generator.extract_sql", "modulename": "sqlglot.generator", "qualname": "Generator.extract_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Extract) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.trim_sql": {"fullname": "sqlglot.generator.Generator.trim_sql", "modulename": "sqlglot.generator", "qualname": "Generator.trim_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Trim) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.safeconcat_sql": {"fullname": "sqlglot.generator.Generator.safeconcat_sql", "modulename": "sqlglot.generator", "qualname": "Generator.safeconcat_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.SafeConcat) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.check_sql": {"fullname": "sqlglot.generator.Generator.check_sql", "modulename": "sqlglot.generator", "qualname": "Generator.check_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Check) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.foreignkey_sql": {"fullname": "sqlglot.generator.Generator.foreignkey_sql", "modulename": "sqlglot.generator", "qualname": "Generator.foreignkey_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ForeignKey) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.primarykey_sql": {"fullname": "sqlglot.generator.Generator.primarykey_sql", "modulename": "sqlglot.generator", "qualname": "Generator.primarykey_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ForeignKey) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.if_sql": {"fullname": "sqlglot.generator.Generator.if_sql", "modulename": "sqlglot.generator", "qualname": "Generator.if_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.If) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.matchagainst_sql": {"fullname": "sqlglot.generator.Generator.matchagainst_sql", "modulename": "sqlglot.generator", "qualname": "Generator.matchagainst_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.MatchAgainst) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"fullname": "sqlglot.generator.Generator.jsonkeyvalue_sql", "modulename": "sqlglot.generator", "qualname": "Generator.jsonkeyvalue_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.JSONKeyValue) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.jsonobject_sql": {"fullname": "sqlglot.generator.Generator.jsonobject_sql", "modulename": "sqlglot.generator", "qualname": "Generator.jsonobject_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.JSONObject) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"fullname": "sqlglot.generator.Generator.openjsoncolumndef_sql", "modulename": "sqlglot.generator", "qualname": "Generator.openjsoncolumndef_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.OpenJSONColumnDef) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.openjson_sql": {"fullname": "sqlglot.generator.Generator.openjson_sql", "modulename": "sqlglot.generator", "qualname": "Generator.openjson_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.OpenJSON) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.in_sql": {"fullname": "sqlglot.generator.Generator.in_sql", "modulename": "sqlglot.generator", "qualname": "Generator.in_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.In) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.in_unnest_op": {"fullname": "sqlglot.generator.Generator.in_unnest_op", "modulename": "sqlglot.generator", "qualname": "Generator.in_unnest_op", "kind": "function", "doc": "

\n", "signature": "(self, unnest: sqlglot.expressions.Unnest) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.interval_sql": {"fullname": "sqlglot.generator.Generator.interval_sql", "modulename": "sqlglot.generator", "qualname": "Generator.interval_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Interval) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.return_sql": {"fullname": "sqlglot.generator.Generator.return_sql", "modulename": "sqlglot.generator", "qualname": "Generator.return_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Return) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.reference_sql": {"fullname": "sqlglot.generator.Generator.reference_sql", "modulename": "sqlglot.generator", "qualname": "Generator.reference_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Reference) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.anonymous_sql": {"fullname": "sqlglot.generator.Generator.anonymous_sql", "modulename": "sqlglot.generator", "qualname": "Generator.anonymous_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Anonymous) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.paren_sql": {"fullname": "sqlglot.generator.Generator.paren_sql", "modulename": "sqlglot.generator", "qualname": "Generator.paren_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Paren) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.neg_sql": {"fullname": "sqlglot.generator.Generator.neg_sql", "modulename": "sqlglot.generator", "qualname": "Generator.neg_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Neg) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.not_sql": {"fullname": "sqlglot.generator.Generator.not_sql", "modulename": "sqlglot.generator", "qualname": "Generator.not_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Not) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.alias_sql": {"fullname": "sqlglot.generator.Generator.alias_sql", "modulename": "sqlglot.generator", "qualname": "Generator.alias_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Alias) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.aliases_sql": {"fullname": "sqlglot.generator.Generator.aliases_sql", "modulename": "sqlglot.generator", "qualname": "Generator.aliases_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Aliases) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.attimezone_sql": {"fullname": "sqlglot.generator.Generator.attimezone_sql", "modulename": "sqlglot.generator", "qualname": "Generator.attimezone_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.AtTimeZone) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.add_sql": {"fullname": "sqlglot.generator.Generator.add_sql", "modulename": "sqlglot.generator", "qualname": "Generator.add_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Add) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.and_sql": {"fullname": "sqlglot.generator.Generator.and_sql", "modulename": "sqlglot.generator", "qualname": "Generator.and_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.And) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.connector_sql": {"fullname": "sqlglot.generator.Generator.connector_sql", "modulename": "sqlglot.generator", "qualname": "Generator.connector_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Connector, op: str) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bitwiseand_sql": {"fullname": "sqlglot.generator.Generator.bitwiseand_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bitwiseand_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.BitwiseAnd) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"fullname": "sqlglot.generator.Generator.bitwiseleftshift_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bitwiseleftshift_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.BitwiseLeftShift) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bitwisenot_sql": {"fullname": "sqlglot.generator.Generator.bitwisenot_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bitwisenot_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.BitwiseNot) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bitwiseor_sql": {"fullname": "sqlglot.generator.Generator.bitwiseor_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bitwiseor_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.BitwiseOr) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"fullname": "sqlglot.generator.Generator.bitwiserightshift_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bitwiserightshift_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.BitwiseRightShift) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bitwisexor_sql": {"fullname": "sqlglot.generator.Generator.bitwisexor_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bitwisexor_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.BitwiseXor) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.cast_sql": {"fullname": "sqlglot.generator.Generator.cast_sql", "modulename": "sqlglot.generator", "qualname": "Generator.cast_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Cast) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.currentdate_sql": {"fullname": "sqlglot.generator.Generator.currentdate_sql", "modulename": "sqlglot.generator", "qualname": "Generator.currentdate_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.CurrentDate) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.collate_sql": {"fullname": "sqlglot.generator.Generator.collate_sql", "modulename": "sqlglot.generator", "qualname": "Generator.collate_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Collate) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.command_sql": {"fullname": "sqlglot.generator.Generator.command_sql", "modulename": "sqlglot.generator", "qualname": "Generator.command_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Command) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.comment_sql": {"fullname": "sqlglot.generator.Generator.comment_sql", "modulename": "sqlglot.generator", "qualname": "Generator.comment_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Comment) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"fullname": "sqlglot.generator.Generator.mergetreettlaction_sql", "modulename": "sqlglot.generator", "qualname": "Generator.mergetreettlaction_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.MergeTreeTTLAction) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.mergetreettl_sql": {"fullname": "sqlglot.generator.Generator.mergetreettl_sql", "modulename": "sqlglot.generator", "qualname": "Generator.mergetreettl_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.MergeTreeTTL) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.transaction_sql": {"fullname": "sqlglot.generator.Generator.transaction_sql", "modulename": "sqlglot.generator", "qualname": "Generator.transaction_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Transaction) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.commit_sql": {"fullname": "sqlglot.generator.Generator.commit_sql", "modulename": "sqlglot.generator", "qualname": "Generator.commit_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Commit) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.rollback_sql": {"fullname": "sqlglot.generator.Generator.rollback_sql", "modulename": "sqlglot.generator", "qualname": "Generator.rollback_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Rollback) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.altercolumn_sql": {"fullname": "sqlglot.generator.Generator.altercolumn_sql", "modulename": "sqlglot.generator", "qualname": "Generator.altercolumn_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.AlterColumn) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.renametable_sql": {"fullname": "sqlglot.generator.Generator.renametable_sql", "modulename": "sqlglot.generator", "qualname": "Generator.renametable_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.RenameTable) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.altertable_sql": {"fullname": "sqlglot.generator.Generator.altertable_sql", "modulename": "sqlglot.generator", "qualname": "Generator.altertable_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.AlterTable) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.droppartition_sql": {"fullname": "sqlglot.generator.Generator.droppartition_sql", "modulename": "sqlglot.generator", "qualname": "Generator.droppartition_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DropPartition) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.addconstraint_sql": {"fullname": "sqlglot.generator.Generator.addconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.addconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.AddConstraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.distinct_sql": {"fullname": "sqlglot.generator.Generator.distinct_sql", "modulename": "sqlglot.generator", "qualname": "Generator.distinct_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Distinct) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.ignorenulls_sql": {"fullname": "sqlglot.generator.Generator.ignorenulls_sql", "modulename": "sqlglot.generator", "qualname": "Generator.ignorenulls_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.IgnoreNulls) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.respectnulls_sql": {"fullname": "sqlglot.generator.Generator.respectnulls_sql", "modulename": "sqlglot.generator", "qualname": "Generator.respectnulls_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.RespectNulls) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.intdiv_sql": {"fullname": "sqlglot.generator.Generator.intdiv_sql", "modulename": "sqlglot.generator", "qualname": "Generator.intdiv_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.IntDiv) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.dpipe_sql": {"fullname": "sqlglot.generator.Generator.dpipe_sql", "modulename": "sqlglot.generator", "qualname": "Generator.dpipe_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DPipe) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.safedpipe_sql": {"fullname": "sqlglot.generator.Generator.safedpipe_sql", "modulename": "sqlglot.generator", "qualname": "Generator.safedpipe_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.SafeDPipe) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.div_sql": {"fullname": "sqlglot.generator.Generator.div_sql", "modulename": "sqlglot.generator", "qualname": "Generator.div_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Div) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.overlaps_sql": {"fullname": "sqlglot.generator.Generator.overlaps_sql", "modulename": "sqlglot.generator", "qualname": "Generator.overlaps_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Overlaps) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.distance_sql": {"fullname": "sqlglot.generator.Generator.distance_sql", "modulename": "sqlglot.generator", "qualname": "Generator.distance_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Distance) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.dot_sql": {"fullname": "sqlglot.generator.Generator.dot_sql", "modulename": "sqlglot.generator", "qualname": "Generator.dot_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Dot) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.eq_sql": {"fullname": "sqlglot.generator.Generator.eq_sql", "modulename": "sqlglot.generator", "qualname": "Generator.eq_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.EQ) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.escape_sql": {"fullname": "sqlglot.generator.Generator.escape_sql", "modulename": "sqlglot.generator", "qualname": "Generator.escape_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Escape) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.glob_sql": {"fullname": "sqlglot.generator.Generator.glob_sql", "modulename": "sqlglot.generator", "qualname": "Generator.glob_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Glob) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.gt_sql": {"fullname": "sqlglot.generator.Generator.gt_sql", "modulename": "sqlglot.generator", "qualname": "Generator.gt_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.GT) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.gte_sql": {"fullname": "sqlglot.generator.Generator.gte_sql", "modulename": "sqlglot.generator", "qualname": "Generator.gte_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.GTE) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.ilike_sql": {"fullname": "sqlglot.generator.Generator.ilike_sql", "modulename": "sqlglot.generator", "qualname": "Generator.ilike_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ILike) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.ilikeany_sql": {"fullname": "sqlglot.generator.Generator.ilikeany_sql", "modulename": "sqlglot.generator", "qualname": "Generator.ilikeany_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ILikeAny) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.is_sql": {"fullname": "sqlglot.generator.Generator.is_sql", "modulename": "sqlglot.generator", "qualname": "Generator.is_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Is) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.like_sql": {"fullname": "sqlglot.generator.Generator.like_sql", "modulename": "sqlglot.generator", "qualname": "Generator.like_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Like) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.likeany_sql": {"fullname": "sqlglot.generator.Generator.likeany_sql", "modulename": "sqlglot.generator", "qualname": "Generator.likeany_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.LikeAny) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.similarto_sql": {"fullname": "sqlglot.generator.Generator.similarto_sql", "modulename": "sqlglot.generator", "qualname": "Generator.similarto_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.SimilarTo) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.lt_sql": {"fullname": "sqlglot.generator.Generator.lt_sql", "modulename": "sqlglot.generator", "qualname": "Generator.lt_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.LT) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.lte_sql": {"fullname": "sqlglot.generator.Generator.lte_sql", "modulename": "sqlglot.generator", "qualname": "Generator.lte_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.LTE) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.mod_sql": {"fullname": "sqlglot.generator.Generator.mod_sql", "modulename": "sqlglot.generator", "qualname": "Generator.mod_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Mod) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.mul_sql": {"fullname": "sqlglot.generator.Generator.mul_sql", "modulename": "sqlglot.generator", "qualname": "Generator.mul_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Mul) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.neq_sql": {"fullname": "sqlglot.generator.Generator.neq_sql", "modulename": "sqlglot.generator", "qualname": "Generator.neq_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.NEQ) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.nullsafeeq_sql": {"fullname": "sqlglot.generator.Generator.nullsafeeq_sql", "modulename": "sqlglot.generator", "qualname": "Generator.nullsafeeq_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.NullSafeEQ) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.nullsafeneq_sql": {"fullname": "sqlglot.generator.Generator.nullsafeneq_sql", "modulename": "sqlglot.generator", "qualname": "Generator.nullsafeneq_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.NullSafeNEQ) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.or_sql": {"fullname": "sqlglot.generator.Generator.or_sql", "modulename": "sqlglot.generator", "qualname": "Generator.or_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Or) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.slice_sql": {"fullname": "sqlglot.generator.Generator.slice_sql", "modulename": "sqlglot.generator", "qualname": "Generator.slice_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Slice) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.sub_sql": {"fullname": "sqlglot.generator.Generator.sub_sql", "modulename": "sqlglot.generator", "qualname": "Generator.sub_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Sub) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.trycast_sql": {"fullname": "sqlglot.generator.Generator.trycast_sql", "modulename": "sqlglot.generator", "qualname": "Generator.trycast_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.TryCast) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.use_sql": {"fullname": "sqlglot.generator.Generator.use_sql", "modulename": "sqlglot.generator", "qualname": "Generator.use_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Use) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.binary": {"fullname": "sqlglot.generator.Generator.binary", "modulename": "sqlglot.generator", "qualname": "Generator.binary", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Binary, op: str) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.function_fallback_sql": {"fullname": "sqlglot.generator.Generator.function_fallback_sql", "modulename": "sqlglot.generator", "qualname": "Generator.function_fallback_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Func) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.func": {"fullname": "sqlglot.generator.Generator.func", "modulename": "sqlglot.generator", "qualname": "Generator.func", "kind": "function", "doc": "

\n", "signature": "(\tself,\tname: str,\t*args: Union[str, sqlglot.expressions.Expression, NoneType],\tprefix: str = '(',\tsuffix: str = ')') -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.format_args": {"fullname": "sqlglot.generator.Generator.format_args", "modulename": "sqlglot.generator", "qualname": "Generator.format_args", "kind": "function", "doc": "

\n", "signature": "(self, *args: Union[str, sqlglot.expressions.Expression, NoneType]) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.text_width": {"fullname": "sqlglot.generator.Generator.text_width", "modulename": "sqlglot.generator", "qualname": "Generator.text_width", "kind": "function", "doc": "

\n", "signature": "(self, args: Iterable) -> int:", "funcdef": "def"}, "sqlglot.generator.Generator.format_time": {"fullname": "sqlglot.generator.Generator.format_time", "modulename": "sqlglot.generator", "qualname": "Generator.format_time", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Expression) -> Optional[str]:", "funcdef": "def"}, "sqlglot.generator.Generator.expressions": {"fullname": "sqlglot.generator.Generator.expressions", "modulename": "sqlglot.generator", "qualname": "Generator.expressions", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: Optional[sqlglot.expressions.Expression] = None,\tkey: Optional[str] = None,\tsqls: Optional[List[str]] = None,\tflat: bool = False,\tindent: bool = True,\tsep: str = ', ',\tprefix: str = '') -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.op_expressions": {"fullname": "sqlglot.generator.Generator.op_expressions", "modulename": "sqlglot.generator", "qualname": "Generator.op_expressions", "kind": "function", "doc": "

\n", "signature": "(\tself,\top: str,\texpression: sqlglot.expressions.Expression,\tflat: bool = False) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.naked_property": {"fullname": "sqlglot.generator.Generator.naked_property", "modulename": "sqlglot.generator", "qualname": "Generator.naked_property", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Property) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.set_operation": {"fullname": "sqlglot.generator.Generator.set_operation", "modulename": "sqlglot.generator", "qualname": "Generator.set_operation", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Expression, op: str) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.tag_sql": {"fullname": "sqlglot.generator.Generator.tag_sql", "modulename": "sqlglot.generator", "qualname": "Generator.tag_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Tag) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.token_sql": {"fullname": "sqlglot.generator.Generator.token_sql", "modulename": "sqlglot.generator", "qualname": "Generator.token_sql", "kind": "function", "doc": "

\n", "signature": "(self, token_type: sqlglot.tokens.TokenType) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"fullname": "sqlglot.generator.Generator.userdefinedfunction_sql", "modulename": "sqlglot.generator", "qualname": "Generator.userdefinedfunction_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.UserDefinedFunction) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.joinhint_sql": {"fullname": "sqlglot.generator.Generator.joinhint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.joinhint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.JoinHint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.kwarg_sql": {"fullname": "sqlglot.generator.Generator.kwarg_sql", "modulename": "sqlglot.generator", "qualname": "Generator.kwarg_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Kwarg) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.when_sql": {"fullname": "sqlglot.generator.Generator.when_sql", "modulename": "sqlglot.generator", "qualname": "Generator.when_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.When) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.merge_sql": {"fullname": "sqlglot.generator.Generator.merge_sql", "modulename": "sqlglot.generator", "qualname": "Generator.merge_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Merge) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.tochar_sql": {"fullname": "sqlglot.generator.Generator.tochar_sql", "modulename": "sqlglot.generator", "qualname": "Generator.tochar_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ToChar) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.dictproperty_sql": {"fullname": "sqlglot.generator.Generator.dictproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.dictproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DictProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.dictrange_sql": {"fullname": "sqlglot.generator.Generator.dictrange_sql", "modulename": "sqlglot.generator", "qualname": "Generator.dictrange_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DictRange) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.dictsubproperty_sql": {"fullname": "sqlglot.generator.Generator.dictsubproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.dictsubproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DictSubProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.oncluster_sql": {"fullname": "sqlglot.generator.Generator.oncluster_sql", "modulename": "sqlglot.generator", "qualname": "Generator.oncluster_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.OnCluster) -> str:", "funcdef": "def"}, "sqlglot.generator.cached_generator": {"fullname": "sqlglot.generator.cached_generator", "modulename": "sqlglot.generator", "qualname": "cached_generator", "kind": "function", "doc": "

Returns a cached generator.

\n", "signature": "(\tcache: Optional[Dict[int, str]] = None) -> Callable[[sqlglot.expressions.Expression], str]:", "funcdef": "def"}, "sqlglot.helper": {"fullname": "sqlglot.helper", "modulename": "sqlglot.helper", "kind": "module", "doc": "

\n"}, "sqlglot.helper.AutoName": {"fullname": "sqlglot.helper.AutoName", "modulename": "sqlglot.helper", "qualname": "AutoName", "kind": "class", "doc": "

This is used for creating enum classes where auto() is the string form of the corresponding value's name.

\n", "bases": "enum.Enum"}, "sqlglot.helper.seq_get": {"fullname": "sqlglot.helper.seq_get", "modulename": "sqlglot.helper", "qualname": "seq_get", "kind": "function", "doc": "

Returns the value in seq at position index, or None if index is out of bounds.

\n", "signature": "(seq: Sequence[~T], index: int) -> Optional[~T]:", "funcdef": "def"}, "sqlglot.helper.ensure_list": {"fullname": "sqlglot.helper.ensure_list", "modulename": "sqlglot.helper", "qualname": "ensure_list", "kind": "function", "doc": "

Ensures that a value is a list, otherwise casts or wraps it into one.

\n\n
Arguments:
\n\n
    \n
  • value: the value of interest.
  • \n
\n\n
Returns:
\n\n
\n

The value cast as a list if it's a list or a tuple, or else the value wrapped in a list.

\n
\n", "signature": "(value):", "funcdef": "def"}, "sqlglot.helper.ensure_collection": {"fullname": "sqlglot.helper.ensure_collection", "modulename": "sqlglot.helper", "qualname": "ensure_collection", "kind": "function", "doc": "

Ensures that a value is a collection (excluding str and bytes), otherwise wraps it into a list.

\n\n
Arguments:
\n\n
    \n
  • value: the value of interest.
  • \n
\n\n
Returns:
\n\n
\n

The value if it's a collection, or else the value wrapped in a list.

\n
\n", "signature": "(value):", "funcdef": "def"}, "sqlglot.helper.csv": {"fullname": "sqlglot.helper.csv", "modulename": "sqlglot.helper", "qualname": "csv", "kind": "function", "doc": "

Formats any number of string arguments as CSV.

\n\n
Arguments:
\n\n
    \n
  • args: the string arguments to format.
  • \n
  • sep: the argument separator.
  • \n
\n\n
Returns:
\n\n
\n

The arguments formatted as a CSV string.

\n
\n", "signature": "(*args: str, sep: str = ', ') -> str:", "funcdef": "def"}, "sqlglot.helper.subclasses": {"fullname": "sqlglot.helper.subclasses", "modulename": "sqlglot.helper", "qualname": "subclasses", "kind": "function", "doc": "

Returns all subclasses for a collection of classes, possibly excluding some of them.

\n\n
Arguments:
\n\n
    \n
  • module_name: the name of the module to search for subclasses in.
  • \n
  • classes: class(es) we want to find the subclasses of.
  • \n
  • exclude: class(es) we want to exclude from the returned list.
  • \n
\n\n
Returns:
\n\n
\n

The target subclasses.

\n
\n", "signature": "(\tmodule_name: str,\tclasses: Union[Type, Tuple[Type, ...]],\texclude: Union[Type, Tuple[Type, ...]] = ()) -> List[Type]:", "funcdef": "def"}, "sqlglot.helper.apply_index_offset": {"fullname": "sqlglot.helper.apply_index_offset", "modulename": "sqlglot.helper", "qualname": "apply_index_offset", "kind": "function", "doc": "

Applies an offset to a given integer literal expression.

\n\n
Arguments:
\n\n
    \n
  • this: the target of the index
  • \n
  • expressions: the expression the offset will be applied to, wrapped in a list.
  • \n
  • offset: the offset that will be applied.
  • \n
\n\n
Returns:
\n\n
\n

The original expression with the offset applied to it, wrapped in a list. If the provided\n expressions argument contains more than one expressions, it's returned unaffected.

\n
\n", "signature": "(\tthis: sqlglot.expressions.Expression,\texpressions: List[Optional[~E]],\toffset: int) -> List[Optional[~E]]:", "funcdef": "def"}, "sqlglot.helper.camel_to_snake_case": {"fullname": "sqlglot.helper.camel_to_snake_case", "modulename": "sqlglot.helper", "qualname": "camel_to_snake_case", "kind": "function", "doc": "

Converts name from camelCase to snake_case and returns the result.

\n", "signature": "(name: str) -> str:", "funcdef": "def"}, "sqlglot.helper.while_changing": {"fullname": "sqlglot.helper.while_changing", "modulename": "sqlglot.helper", "qualname": "while_changing", "kind": "function", "doc": "

Applies a transformation to a given expression until a fix point is reached.

\n\n
Arguments:
\n\n
    \n
  • expression: the expression to be transformed.
  • \n
  • func: the transformation to be applied.
  • \n
\n\n
Returns:
\n\n
\n

The transformed expression.

\n
\n", "signature": "(\texpression: sqlglot.expressions.Expression,\tfunc: Callable[[sqlglot.expressions.Expression], ~E]) -> ~E:", "funcdef": "def"}, "sqlglot.helper.tsort": {"fullname": "sqlglot.helper.tsort", "modulename": "sqlglot.helper", "qualname": "tsort", "kind": "function", "doc": "

Sorts a given directed acyclic graph in topological order.

\n\n
Arguments:
\n\n
    \n
  • dag: the graph to be sorted.
  • \n
\n\n
Returns:
\n\n
\n

A list that contains all of the graph's nodes in topological order.

\n
\n", "signature": "(dag: Dict[~T, List[~T]]) -> List[~T]:", "funcdef": "def"}, "sqlglot.helper.open_file": {"fullname": "sqlglot.helper.open_file", "modulename": "sqlglot.helper", "qualname": "open_file", "kind": "function", "doc": "

Open a file that may be compressed as gzip and return it in universal newline mode.

\n", "signature": "(file_name: str) -> <class 'TextIO'>:", "funcdef": "def"}, "sqlglot.helper.csv_reader": {"fullname": "sqlglot.helper.csv_reader", "modulename": "sqlglot.helper", "qualname": "csv_reader", "kind": "function", "doc": "

Returns a csv reader given the expression READ_CSV(name, ['delimiter', '|', ...]).

\n\n
Arguments:
\n\n
    \n
  • read_csv: a ReadCSV function call
  • \n
\n\n
Yields:
\n\n
\n

A python csv reader.

\n
\n", "signature": "(read_csv: sqlglot.expressions.ReadCSV) -> Any:", "funcdef": "def"}, "sqlglot.helper.find_new_name": {"fullname": "sqlglot.helper.find_new_name", "modulename": "sqlglot.helper", "qualname": "find_new_name", "kind": "function", "doc": "

Searches for a new name.

\n\n
Arguments:
\n\n
    \n
  • taken: a collection of taken names.
  • \n
  • base: base name to alter.
  • \n
\n\n
Returns:
\n\n
\n

The new, available name.

\n
\n", "signature": "(taken: Collection[str], base: str) -> str:", "funcdef": "def"}, "sqlglot.helper.name_sequence": {"fullname": "sqlglot.helper.name_sequence", "modulename": "sqlglot.helper", "qualname": "name_sequence", "kind": "function", "doc": "

Returns a name generator given a prefix (e.g. a0, a1, a2, ... if the prefix is \"a\").

\n", "signature": "(prefix: str) -> Callable[[], str]:", "funcdef": "def"}, "sqlglot.helper.object_to_dict": {"fullname": "sqlglot.helper.object_to_dict", "modulename": "sqlglot.helper", "qualname": "object_to_dict", "kind": "function", "doc": "

Returns a dictionary created from an object's attributes.

\n", "signature": "(obj: Any, **kwargs) -> Dict:", "funcdef": "def"}, "sqlglot.helper.split_num_words": {"fullname": "sqlglot.helper.split_num_words", "modulename": "sqlglot.helper", "qualname": "split_num_words", "kind": "function", "doc": "

Perform a split on a value and return N words as a result with None used for words that don't exist.

\n\n
Arguments:
\n\n
    \n
  • value: the value to be split.
  • \n
  • sep: the value to use to split on.
  • \n
  • min_num_words: the minimum number of words that are going to be in the result.
  • \n
  • fill_from_start: indicates that if None values should be inserted at the start or end of the list.
  • \n
\n\n
Examples:
\n\n
\n
\n
>>> split_num_words("db.table", ".", 3)\n[None, 'db', 'table']\n>>> split_num_words("db.table", ".", 3, fill_from_start=False)\n['db', 'table', None]\n>>> split_num_words("db.table", ".", 1)\n['db', 'table']\n
\n
\n
\n\n
Returns:
\n\n
\n

The list of words returned by split, possibly augmented by a number of None values.

\n
\n", "signature": "(\tvalue: str,\tsep: str,\tmin_num_words: int,\tfill_from_start: bool = True) -> List[Optional[str]]:", "funcdef": "def"}, "sqlglot.helper.is_iterable": {"fullname": "sqlglot.helper.is_iterable", "modulename": "sqlglot.helper", "qualname": "is_iterable", "kind": "function", "doc": "

Checks if the value is an iterable, excluding the types str and bytes.

\n\n
Examples:
\n\n
\n
\n
>>> is_iterable([1,2])\nTrue\n>>> is_iterable("test")\nFalse\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • value: the value to check if it is an iterable.
  • \n
\n\n
Returns:
\n\n
\n

A bool value indicating if it is an iterable.

\n
\n", "signature": "(value: Any) -> bool:", "funcdef": "def"}, "sqlglot.helper.flatten": {"fullname": "sqlglot.helper.flatten", "modulename": "sqlglot.helper", "qualname": "flatten", "kind": "function", "doc": "

Flattens an iterable that can contain both iterable and non-iterable elements. Objects of\ntype str and bytes are not regarded as iterables.

\n\n
Examples:
\n\n
\n
\n
>>> list(flatten([[1, 2], 3, {4}, (5, "bla")]))\n[1, 2, 3, 4, 5, 'bla']\n>>> list(flatten([1, 2, 3]))\n[1, 2, 3]\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • values: the value to be flattened.
  • \n
\n\n
Yields:
\n\n
\n

Non-iterable elements in values.

\n
\n", "signature": "(values: Iterable[Union[Iterable[Any], Any]]) -> Iterator[Any]:", "funcdef": "def"}, "sqlglot.helper.dict_depth": {"fullname": "sqlglot.helper.dict_depth", "modulename": "sqlglot.helper", "qualname": "dict_depth", "kind": "function", "doc": "

Get the nesting depth of a dictionary.

\n\n
For example:
\n\n
\n
\n
>>> dict_depth(None)\n0\n>>> dict_depth({})\n1\n>>> dict_depth({"a": "b"})\n1\n>>> dict_depth({"a": {}})\n2\n>>> dict_depth({"a": {"b": {}}})\n3\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • d (dict): dictionary
  • \n
\n\n
Returns:
\n\n
\n

int: depth

\n
\n", "signature": "(d: Dict) -> int:", "funcdef": "def"}, "sqlglot.helper.first": {"fullname": "sqlglot.helper.first", "modulename": "sqlglot.helper", "qualname": "first", "kind": "function", "doc": "

Returns the first element from an iterable.

\n\n

Useful for sets.

\n", "signature": "(it: Iterable[~T]) -> ~T:", "funcdef": "def"}, "sqlglot.helper.case_sensitive": {"fullname": "sqlglot.helper.case_sensitive", "modulename": "sqlglot.helper", "qualname": "case_sensitive", "kind": "function", "doc": "

Checks if text contains any case sensitive characters depending on dialect.

\n", "signature": "(\ttext: str,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType]) -> bool:", "funcdef": "def"}, "sqlglot.helper.should_identify": {"fullname": "sqlglot.helper.should_identify", "modulename": "sqlglot.helper", "qualname": "should_identify", "kind": "function", "doc": "

Checks if text should be identified given an identify option.

\n\n
Arguments:
\n\n
    \n
  • text: the text to check.
  • \n
  • identify: \"always\" or True: always returns true.\n\"safe\": true if there is no uppercase or lowercase character in text, depending on dialect.
  • \n
  • dialect: the dialect to use in order to decide whether a text should be identified.
  • \n
\n\n
Returns:
\n\n
\n

Whether or not a string should be identified.

\n
\n", "signature": "(\ttext: str,\tidentify: str | bool,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None) -> bool:", "funcdef": "def"}, "sqlglot.lineage": {"fullname": "sqlglot.lineage", "modulename": "sqlglot.lineage", "kind": "module", "doc": "

\n"}, "sqlglot.lineage.Node": {"fullname": "sqlglot.lineage.Node", "modulename": "sqlglot.lineage", "qualname": "Node", "kind": "class", "doc": "

\n"}, "sqlglot.lineage.Node.__init__": {"fullname": "sqlglot.lineage.Node.__init__", "modulename": "sqlglot.lineage", "qualname": "Node.__init__", "kind": "function", "doc": "

\n", "signature": "(\tname: str,\texpression: sqlglot.expressions.Expression,\tsource: sqlglot.expressions.Expression,\tdownstream: List[sqlglot.lineage.Node] = <factory>,\talias: str = '')"}, "sqlglot.lineage.Node.walk": {"fullname": "sqlglot.lineage.Node.walk", "modulename": "sqlglot.lineage", "qualname": "Node.walk", "kind": "function", "doc": "

\n", "signature": "(self) -> Iterator[sqlglot.lineage.Node]:", "funcdef": "def"}, "sqlglot.lineage.Node.to_html": {"fullname": "sqlglot.lineage.Node.to_html", "modulename": "sqlglot.lineage", "qualname": "Node.to_html", "kind": "function", "doc": "

\n", "signature": "(self, **opts) -> sqlglot.lineage.LineageHTML:", "funcdef": "def"}, "sqlglot.lineage.lineage": {"fullname": "sqlglot.lineage.lineage", "modulename": "sqlglot.lineage", "qualname": "lineage", "kind": "function", "doc": "

Build the lineage graph for a column of a SQL query.

\n\n
Arguments:
\n\n
    \n
  • column: The column to build the lineage for.
  • \n
  • sql: The SQL string or expression.
  • \n
  • schema: The schema of tables.
  • \n
  • sources: A mapping of queries which will be used to continue building lineage.
  • \n
  • dialect: The dialect of input SQL.
  • \n
  • **kwargs: Qualification optimizer kwargs.
  • \n
\n\n
Returns:
\n\n
\n

A lineage node.

\n
\n", "signature": "(\tcolumn: str | sqlglot.expressions.Column,\tsql: str | sqlglot.expressions.Expression,\tschema: Union[Dict, sqlglot.schema.Schema, NoneType] = None,\tsources: Optional[Dict[str, str | sqlglot.expressions.Subqueryable]] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**kwargs) -> sqlglot.lineage.Node:", "funcdef": "def"}, "sqlglot.lineage.LineageHTML": {"fullname": "sqlglot.lineage.LineageHTML", "modulename": "sqlglot.lineage", "qualname": "LineageHTML", "kind": "class", "doc": "

Node to HTML generator using vis.js.

\n\n

https://visjs.github.io/vis-network/docs/network/

\n"}, "sqlglot.lineage.LineageHTML.__init__": {"fullname": "sqlglot.lineage.LineageHTML.__init__", "modulename": "sqlglot.lineage", "qualname": "LineageHTML.__init__", "kind": "function", "doc": "

\n", "signature": "(\tnode: sqlglot.lineage.Node,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\timports: bool = True,\t**opts: Any)"}, "sqlglot.optimizer": {"fullname": "sqlglot.optimizer", "modulename": "sqlglot.optimizer", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.annotate_types": {"fullname": "sqlglot.optimizer.annotate_types", "modulename": "sqlglot.optimizer.annotate_types", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.annotate_types.annotate_types": {"fullname": "sqlglot.optimizer.annotate_types.annotate_types", "modulename": "sqlglot.optimizer.annotate_types", "qualname": "annotate_types", "kind": "function", "doc": "

Infers the types of an expression, annotating its AST accordingly.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> schema = {"y": {"cola": "SMALLINT"}}\n>>> sql = "SELECT x.cola + 2.5 AS cola FROM (SELECT y.cola AS cola FROM y AS y) AS x"\n>>> annotated_expr = annotate_types(sqlglot.parse_one(sql), schema=schema)\n>>> annotated_expr.expressions[0].type.this  # Get the type of "x.cola + 2.5 AS cola"\n<Type.DOUBLE: 'DOUBLE'>\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: Expression to annotate.
  • \n
  • schema: Database schema.
  • \n
  • annotators: Maps expression type to corresponding annotation function.
  • \n
  • coerces_to: Maps expression type to set of types that it can be coerced into.
  • \n
\n\n
Returns:
\n\n
\n

The expression annotated with types.

\n
\n", "signature": "(\texpression: ~E,\tschema: Union[Dict, sqlglot.schema.Schema, NoneType] = None,\tannotators: Optional[Dict[Type[~E], Callable[[sqlglot.optimizer.annotate_types.TypeAnnotator, ~E], ~E]]] = None,\tcoerces_to: Optional[Dict[sqlglot.expressions.DataType.Type, Set[sqlglot.expressions.DataType.Type]]] = None) -> ~E:", "funcdef": "def"}, "sqlglot.optimizer.annotate_types.TypeAnnotator": {"fullname": "sqlglot.optimizer.annotate_types.TypeAnnotator", "modulename": "sqlglot.optimizer.annotate_types", "qualname": "TypeAnnotator", "kind": "class", "doc": "

\n"}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"fullname": "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__", "modulename": "sqlglot.optimizer.annotate_types", "qualname": "TypeAnnotator.__init__", "kind": "function", "doc": "

\n", "signature": "(\tschema: sqlglot.schema.Schema,\tannotators: Optional[Dict[Type[~E], Callable[[sqlglot.optimizer.annotate_types.TypeAnnotator, ~E], ~E]]] = None,\tcoerces_to: Optional[Dict[sqlglot.expressions.DataType.Type, Set[sqlglot.expressions.DataType.Type]]] = None)"}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"fullname": "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate", "modulename": "sqlglot.optimizer.annotate_types", "qualname": "TypeAnnotator.annotate", "kind": "function", "doc": "

\n", "signature": "(self, expression: ~E) -> ~E:", "funcdef": "def"}, "sqlglot.optimizer.canonicalize": {"fullname": "sqlglot.optimizer.canonicalize", "modulename": "sqlglot.optimizer.canonicalize", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.canonicalize.canonicalize": {"fullname": "sqlglot.optimizer.canonicalize.canonicalize", "modulename": "sqlglot.optimizer.canonicalize", "qualname": "canonicalize", "kind": "function", "doc": "

Converts a sql expression into a standard form.

\n\n

This method relies on annotate_types because many of the\nconversions rely on type inference.

\n\n
Arguments:
\n\n
    \n
  • expression: The expression to canonicalize.
  • \n
\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"fullname": "sqlglot.optimizer.canonicalize.add_text_to_concat", "modulename": "sqlglot.optimizer.canonicalize", "qualname": "add_text_to_concat", "kind": "function", "doc": "

\n", "signature": "(node: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.optimizer.canonicalize.coerce_type": {"fullname": "sqlglot.optimizer.canonicalize.coerce_type", "modulename": "sqlglot.optimizer.canonicalize", "qualname": "coerce_type", "kind": "function", "doc": "

\n", "signature": "(node: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"fullname": "sqlglot.optimizer.canonicalize.remove_redundant_casts", "modulename": "sqlglot.optimizer.canonicalize", "qualname": "remove_redundant_casts", "kind": "function", "doc": "

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"fullname": "sqlglot.optimizer.canonicalize.ensure_bool_predicates", "modulename": "sqlglot.optimizer.canonicalize", "qualname": "ensure_bool_predicates", "kind": "function", "doc": "

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.optimizer.eliminate_ctes": {"fullname": "sqlglot.optimizer.eliminate_ctes", "modulename": "sqlglot.optimizer.eliminate_ctes", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"fullname": "sqlglot.optimizer.eliminate_ctes.eliminate_ctes", "modulename": "sqlglot.optimizer.eliminate_ctes", "qualname": "eliminate_ctes", "kind": "function", "doc": "

Remove unused CTEs from an expression.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sql = "WITH y AS (SELECT a FROM x) SELECT a FROM z"\n>>> expression = sqlglot.parse_one(sql)\n>>> eliminate_ctes(expression).sql()\n'SELECT a FROM z'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to optimize
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: optimized expression

\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.eliminate_joins": {"fullname": "sqlglot.optimizer.eliminate_joins", "modulename": "sqlglot.optimizer.eliminate_joins", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"fullname": "sqlglot.optimizer.eliminate_joins.eliminate_joins", "modulename": "sqlglot.optimizer.eliminate_joins", "qualname": "eliminate_joins", "kind": "function", "doc": "

Remove unused joins from an expression.

\n\n

This only removes joins when we know that the join condition doesn't produce duplicate rows.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sql = "SELECT x.a FROM x LEFT JOIN (SELECT DISTINCT y.b FROM y) AS y ON x.b = y.b"\n>>> expression = sqlglot.parse_one(sql)\n>>> eliminate_joins(expression).sql()\n'SELECT x.a FROM x'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to optimize
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: optimized expression

\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.eliminate_joins.join_condition": {"fullname": "sqlglot.optimizer.eliminate_joins.join_condition", "modulename": "sqlglot.optimizer.eliminate_joins", "qualname": "join_condition", "kind": "function", "doc": "

Extract the join condition from a join expression.

\n\n
Arguments:
\n\n
    \n
  • join (exp.Join)
  • \n
\n\n
Returns:
\n\n
\n

tuple[list[str], list[str], exp.Expression]:\n Tuple of (source key, join key, remaining predicate)

\n
\n", "signature": "(join):", "funcdef": "def"}, "sqlglot.optimizer.eliminate_subqueries": {"fullname": "sqlglot.optimizer.eliminate_subqueries", "modulename": "sqlglot.optimizer.eliminate_subqueries", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"fullname": "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries", "modulename": "sqlglot.optimizer.eliminate_subqueries", "qualname": "eliminate_subqueries", "kind": "function", "doc": "

Rewrite derived tables as CTES, deduplicating if possible.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one("SELECT a FROM (SELECT * FROM x) AS y")\n>>> eliminate_subqueries(expression).sql()\n'WITH y AS (SELECT * FROM x) SELECT a FROM y AS y'\n
\n
\n
\n\n
This also deduplicates common subqueries:
\n\n
\n
\n
>>> expression = sqlglot.parse_one("SELECT a FROM (SELECT * FROM x) AS y CROSS JOIN (SELECT * FROM x) AS z")\n>>> eliminate_subqueries(expression).sql()\n'WITH y AS (SELECT * FROM x) SELECT a FROM y AS y CROSS JOIN y AS z'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: expression

\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.isolate_table_selects": {"fullname": "sqlglot.optimizer.isolate_table_selects", "modulename": "sqlglot.optimizer.isolate_table_selects", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"fullname": "sqlglot.optimizer.isolate_table_selects.isolate_table_selects", "modulename": "sqlglot.optimizer.isolate_table_selects", "qualname": "isolate_table_selects", "kind": "function", "doc": "

\n", "signature": "(expression, schema=None):", "funcdef": "def"}, "sqlglot.optimizer.merge_subqueries": {"fullname": "sqlglot.optimizer.merge_subqueries", "modulename": "sqlglot.optimizer.merge_subqueries", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"fullname": "sqlglot.optimizer.merge_subqueries.merge_subqueries", "modulename": "sqlglot.optimizer.merge_subqueries", "qualname": "merge_subqueries", "kind": "function", "doc": "

Rewrite sqlglot AST to merge derived tables into the outer query.

\n\n

This also merges CTEs if they are selected from only once.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one("SELECT a FROM (SELECT x.a FROM x) CROSS JOIN y")\n>>> merge_subqueries(expression).sql()\n'SELECT x.a FROM x CROSS JOIN y'\n
\n
\n
\n\n

If leave_tables_isolated is True, this will not merge inner queries into outer\nqueries if it would result in multiple table selects in a single query:

\n\n
\n
\n
\n

expression = sqlglot.parse_one(\"SELECT a FROM (SELECT x.a FROM x) CROSS JOIN y\")\n merge_subqueries(expression, leave_tables_isolated=True).sql()\n 'SELECT a FROM (SELECT x.a FROM x) CROSS JOIN y'

\n
\n
\n
\n\n

Inspired by https://dev.mysql.com/doc/refman/8.0/en/derived-table-optimization.html

\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to optimize
  • \n
  • leave_tables_isolated (bool):
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: optimized expression

\n
\n", "signature": "(expression, leave_tables_isolated=False):", "funcdef": "def"}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"fullname": "sqlglot.optimizer.merge_subqueries.merge_ctes", "modulename": "sqlglot.optimizer.merge_subqueries", "qualname": "merge_ctes", "kind": "function", "doc": "

\n", "signature": "(expression, leave_tables_isolated=False):", "funcdef": "def"}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"fullname": "sqlglot.optimizer.merge_subqueries.merge_derived_tables", "modulename": "sqlglot.optimizer.merge_subqueries", "qualname": "merge_derived_tables", "kind": "function", "doc": "

\n", "signature": "(expression, leave_tables_isolated=False):", "funcdef": "def"}, "sqlglot.optimizer.normalize": {"fullname": "sqlglot.optimizer.normalize", "modulename": "sqlglot.optimizer.normalize", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.normalize.normalize": {"fullname": "sqlglot.optimizer.normalize.normalize", "modulename": "sqlglot.optimizer.normalize", "qualname": "normalize", "kind": "function", "doc": "

Rewrite sqlglot AST into conjunctive normal form or disjunctive normal form.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one("(x AND y) OR z")\n>>> normalize(expression, dnf=False).sql()\n'(x OR z) AND (y OR z)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: expression to normalize
  • \n
  • dnf: rewrite in disjunctive normal form instead.
  • \n
  • max_distance (int): the maximal estimated distance from cnf/dnf to attempt conversion
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: normalized expression

\n
\n", "signature": "(\texpression: sqlglot.expressions.Expression,\tdnf: bool = False,\tmax_distance: int = 128):", "funcdef": "def"}, "sqlglot.optimizer.normalize.normalized": {"fullname": "sqlglot.optimizer.normalize.normalized", "modulename": "sqlglot.optimizer.normalize", "qualname": "normalized", "kind": "function", "doc": "

\n", "signature": "(expression, dnf=False):", "funcdef": "def"}, "sqlglot.optimizer.normalize.normalization_distance": {"fullname": "sqlglot.optimizer.normalize.normalization_distance", "modulename": "sqlglot.optimizer.normalize", "qualname": "normalization_distance", "kind": "function", "doc": "

The difference in the number of predicates between the current expression and the normalized form.

\n\n

This is used as an estimate of the cost of the conversion which is exponential in complexity.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one("(a AND b) OR (c AND d)")\n>>> normalization_distance(expression)\n4\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to compute distance
  • \n
  • dnf (bool): compute to dnf distance instead
  • \n
\n\n
Returns:
\n\n
\n

int: difference

\n
\n", "signature": "(expression, dnf=False):", "funcdef": "def"}, "sqlglot.optimizer.normalize.distributive_law": {"fullname": "sqlglot.optimizer.normalize.distributive_law", "modulename": "sqlglot.optimizer.normalize", "qualname": "distributive_law", "kind": "function", "doc": "

x OR (y AND z) -> (x OR y) AND (x OR z)\n(x AND y) OR (y AND z) -> (x OR y) AND (x OR z) AND (y OR y) AND (y OR z)

\n", "signature": "(expression, dnf, max_distance, generate):", "funcdef": "def"}, "sqlglot.optimizer.normalize_identifiers": {"fullname": "sqlglot.optimizer.normalize_identifiers", "modulename": "sqlglot.optimizer.normalize_identifiers", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"fullname": "sqlglot.optimizer.normalize_identifiers.normalize_identifiers", "modulename": "sqlglot.optimizer.normalize_identifiers", "qualname": "normalize_identifiers", "kind": "function", "doc": "

Normalize all unquoted identifiers to either lower or upper case, depending on\nthe dialect. This essentially makes those identifiers case-insensitive.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one('SELECT Bar.A AS A FROM "Foo".Bar')\n>>> normalize_identifiers(expression).sql()\n'SELECT bar.a AS a FROM "Foo".bar'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: The expression to transform.
  • \n
  • dialect: The dialect to use in order to decide how to normalize identifiers.
  • \n
\n\n
Returns:
\n\n
\n

The transformed expression.

\n
\n", "signature": "(\texpression: ~E,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None) -> ~E:", "funcdef": "def"}, "sqlglot.optimizer.optimize_joins": {"fullname": "sqlglot.optimizer.optimize_joins", "modulename": "sqlglot.optimizer.optimize_joins", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"fullname": "sqlglot.optimizer.optimize_joins.optimize_joins", "modulename": "sqlglot.optimizer.optimize_joins", "qualname": "optimize_joins", "kind": "function", "doc": "

Removes cross joins if possible and reorder joins based on predicate dependencies.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> optimize_joins(parse_one("SELECT * FROM x CROSS JOIN y JOIN z ON x.a = z.a AND y.a = z.a")).sql()\n'SELECT * FROM x JOIN z ON x.a = z.a AND TRUE JOIN y ON y.a = z.a'\n
\n
\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"fullname": "sqlglot.optimizer.optimize_joins.reorder_joins", "modulename": "sqlglot.optimizer.optimize_joins", "qualname": "reorder_joins", "kind": "function", "doc": "

Reorder joins by topological sort order based on predicate references.

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.optimize_joins.normalize": {"fullname": "sqlglot.optimizer.optimize_joins.normalize", "modulename": "sqlglot.optimizer.optimize_joins", "qualname": "normalize", "kind": "function", "doc": "

Remove INNER and OUTER from joins as they are optional.

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.optimize_joins.other_table_names": {"fullname": "sqlglot.optimizer.optimize_joins.other_table_names", "modulename": "sqlglot.optimizer.optimize_joins", "qualname": "other_table_names", "kind": "function", "doc": "

\n", "signature": "(join, exclude):", "funcdef": "def"}, "sqlglot.optimizer.optimizer": {"fullname": "sqlglot.optimizer.optimizer", "modulename": "sqlglot.optimizer.optimizer", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.optimizer.optimize": {"fullname": "sqlglot.optimizer.optimizer.optimize", "modulename": "sqlglot.optimizer.optimizer", "qualname": "optimize", "kind": "function", "doc": "

Rewrite a sqlglot AST into an optimized form.

\n\n
Arguments:
\n\n
    \n
  • expression: expression to optimize
  • \n
  • schema: database schema.\nThis can either be an instance of sqlglot.optimizer.Schema or a mapping in one of\nthe following forms:\n 1. {table: {col: type}}\n 2. {db: {table: {col: type}}}\n 3. {catalog: {db: {table: {col: type}}}}\nIf no schema is provided then the default schema defined at sqlgot.schema will be used
  • \n
  • db: specify the default database, as might be set by a USE DATABASE db statement
  • \n
  • catalog: specify the default catalog, as might be set by a USE CATALOG c statement
  • \n
  • dialect: The dialect to parse the sql string.
  • \n
  • rules: sequence of optimizer rules to use.\nMany of the rules require tables and columns to be qualified.\nDo not remove qualify from the sequence of rules unless you know what you're doing!
  • \n
  • *kwargs: If a rule has a keyword argument with a same name in *kwargs, it will be passed in.
  • \n
\n\n
Returns:
\n\n
\n

The optimized expression.

\n
\n", "signature": "(\texpression: str | sqlglot.expressions.Expression,\tschema: Union[dict, sqlglot.schema.Schema, NoneType] = None,\tdb: Optional[str] = None,\tcatalog: Optional[str] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\trules: Sequence[Callable] = (<function qualify at 0x7f5e613d37f0>, <function pushdown_projections at 0x7f5e613d31c0>, <function normalize at 0x7f5e614032e0>, <function unnest_subqueries at 0x7f5e613d3be0>, <function pushdown_predicates at 0x7f5e613d2050>, <function optimize_joins at 0x7f5e613d1cf0>, <function eliminate_subqueries at 0x7f5e613d0ee0>, <function merge_subqueries at 0x7f5e613d0f70>, <function eliminate_joins at 0x7f5e614031c0>, <function eliminate_ctes at 0x7f5e614030a0>, <function quote_identifiers at 0x7f5e613d3130>, <function annotate_types at 0x7f5e61453520>, <function canonicalize at 0x7f5e61402b00>, <function simplify at 0x7f5e61403490>),\t**kwargs) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.optimizer.pushdown_predicates": {"fullname": "sqlglot.optimizer.pushdown_predicates", "modulename": "sqlglot.optimizer.pushdown_predicates", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"fullname": "sqlglot.optimizer.pushdown_predicates.pushdown_predicates", "modulename": "sqlglot.optimizer.pushdown_predicates", "qualname": "pushdown_predicates", "kind": "function", "doc": "

Rewrite sqlglot AST to pushdown predicates in FROMS and JOINS

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sql = "SELECT y.a AS a FROM (SELECT x.a AS a FROM x AS x) AS y WHERE y.a = 1"\n>>> expression = sqlglot.parse_one(sql)\n>>> pushdown_predicates(expression).sql()\n'SELECT y.a AS a FROM (SELECT x.a AS a FROM x AS x WHERE x.a = 1) AS y WHERE TRUE'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to optimize
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: optimized expression

\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"fullname": "sqlglot.optimizer.pushdown_predicates.pushdown", "modulename": "sqlglot.optimizer.pushdown_predicates", "qualname": "pushdown", "kind": "function", "doc": "

\n", "signature": "(condition, sources, scope_ref_count):", "funcdef": "def"}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"fullname": "sqlglot.optimizer.pushdown_predicates.pushdown_cnf", "modulename": "sqlglot.optimizer.pushdown_predicates", "qualname": "pushdown_cnf", "kind": "function", "doc": "

If the predicates are in CNF like form, we can simply replace each block in the parent.

\n", "signature": "(predicates, scope, scope_ref_count):", "funcdef": "def"}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"fullname": "sqlglot.optimizer.pushdown_predicates.pushdown_dnf", "modulename": "sqlglot.optimizer.pushdown_predicates", "qualname": "pushdown_dnf", "kind": "function", "doc": "

If the predicates are in DNF form, we can only push down conditions that are in all blocks.\nAdditionally, we can't remove predicates from their original form.

\n", "signature": "(predicates, scope, scope_ref_count):", "funcdef": "def"}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"fullname": "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate", "modulename": "sqlglot.optimizer.pushdown_predicates", "qualname": "nodes_for_predicate", "kind": "function", "doc": "

\n", "signature": "(predicate, sources, scope_ref_count):", "funcdef": "def"}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"fullname": "sqlglot.optimizer.pushdown_predicates.replace_aliases", "modulename": "sqlglot.optimizer.pushdown_predicates", "qualname": "replace_aliases", "kind": "function", "doc": "

\n", "signature": "(source, predicate):", "funcdef": "def"}, "sqlglot.optimizer.pushdown_projections": {"fullname": "sqlglot.optimizer.pushdown_projections", "modulename": "sqlglot.optimizer.pushdown_projections", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"fullname": "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION", "modulename": "sqlglot.optimizer.pushdown_projections", "qualname": "DEFAULT_SELECTION", "kind": "function", "doc": "

\n", "signature": "():", "funcdef": "def"}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"fullname": "sqlglot.optimizer.pushdown_projections.pushdown_projections", "modulename": "sqlglot.optimizer.pushdown_projections", "qualname": "pushdown_projections", "kind": "function", "doc": "

Rewrite sqlglot AST to remove unused columns projections.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sql = "SELECT y.a AS a FROM (SELECT x.a AS a, x.b AS b FROM x) AS y"\n>>> expression = sqlglot.parse_one(sql)\n>>> pushdown_projections(expression).sql()\n'SELECT y.a AS a FROM (SELECT x.a AS a FROM x) AS y'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to optimize
  • \n
  • remove_unused_selections (bool): remove selects that are unused
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: optimized expression

\n
\n", "signature": "(expression, schema=None, remove_unused_selections=True):", "funcdef": "def"}, "sqlglot.optimizer.qualify": {"fullname": "sqlglot.optimizer.qualify", "modulename": "sqlglot.optimizer.qualify", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.qualify.qualify": {"fullname": "sqlglot.optimizer.qualify.qualify", "modulename": "sqlglot.optimizer.qualify", "qualname": "qualify", "kind": "function", "doc": "

Rewrite sqlglot AST to have normalized and qualified tables and columns.

\n\n

This step is necessary for all further SQLGlot optimizations.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> schema = {"tbl": {"col": "INT"}}\n>>> expression = sqlglot.parse_one("SELECT col FROM tbl")\n>>> qualify(expression, schema=schema).sql()\n'SELECT "tbl"."col" AS "col" FROM "tbl" AS "tbl"'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: Expression to qualify.
  • \n
  • db: Default database name for tables.
  • \n
  • catalog: Default catalog name for tables.
  • \n
  • schema: Schema to infer column names and types.
  • \n
  • expand_alias_refs: Whether or not to expand references to aliases.
  • \n
  • infer_schema: Whether or not to infer the schema if missing.
  • \n
  • isolate_tables: Whether or not to isolate table selects.
  • \n
  • qualify_columns: Whether or not to qualify columns.
  • \n
  • validate_qualify_columns: Whether or not to validate columns.
  • \n
  • quote_identifiers: Whether or not to run the quote_identifiers step.\nThis step is necessary to ensure correctness for case sensitive queries.\nBut this flag is provided in case this step is performed at a later time.
  • \n
  • identify: If True, quote all identifiers, else only necessary ones.
  • \n
\n\n
Returns:
\n\n
\n

The qualified expression.

\n
\n", "signature": "(\texpression: sqlglot.expressions.Expression,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tdb: Optional[str] = None,\tcatalog: Optional[str] = None,\tschema: Union[dict, sqlglot.schema.Schema, NoneType] = None,\texpand_alias_refs: bool = True,\tinfer_schema: Optional[bool] = None,\tisolate_tables: bool = False,\tqualify_columns: bool = True,\tvalidate_qualify_columns: bool = True,\tquote_identifiers: bool = True,\tidentify: bool = True) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.optimizer.qualify_columns": {"fullname": "sqlglot.optimizer.qualify_columns", "modulename": "sqlglot.optimizer.qualify_columns", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"fullname": "sqlglot.optimizer.qualify_columns.qualify_columns", "modulename": "sqlglot.optimizer.qualify_columns", "qualname": "qualify_columns", "kind": "function", "doc": "

Rewrite sqlglot AST to have fully qualified columns.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> schema = {"tbl": {"col": "INT"}}\n>>> expression = sqlglot.parse_one("SELECT col FROM tbl")\n>>> qualify_columns(expression, schema).sql()\n'SELECT tbl.col AS col FROM tbl'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: expression to qualify
  • \n
  • schema: Database schema
  • \n
  • expand_alias_refs: whether or not to expand references to aliases
  • \n
  • infer_schema: whether or not to infer the schema if missing
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: qualified expression

\n
\n", "signature": "(\texpression: sqlglot.expressions.Expression,\tschema: Union[Dict, sqlglot.schema.Schema],\texpand_alias_refs: bool = True,\tinfer_schema: Optional[bool] = None) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"fullname": "sqlglot.optimizer.qualify_columns.validate_qualify_columns", "modulename": "sqlglot.optimizer.qualify_columns", "qualname": "validate_qualify_columns", "kind": "function", "doc": "

Raise an OptimizeError if any columns aren't qualified

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"fullname": "sqlglot.optimizer.qualify_columns.quote_identifiers", "modulename": "sqlglot.optimizer.qualify_columns", "qualname": "quote_identifiers", "kind": "function", "doc": "

Makes sure all identifiers that need to be quoted are quoted.

\n", "signature": "(\texpression: ~E,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tidentify: bool = True) -> ~E:", "funcdef": "def"}, "sqlglot.optimizer.qualify_columns.Resolver": {"fullname": "sqlglot.optimizer.qualify_columns.Resolver", "modulename": "sqlglot.optimizer.qualify_columns", "qualname": "Resolver", "kind": "class", "doc": "

Helper for resolving columns.

\n\n

This is a class so we can lazily load some things and easily share them across functions.

\n"}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"fullname": "sqlglot.optimizer.qualify_columns.Resolver.__init__", "modulename": "sqlglot.optimizer.qualify_columns", "qualname": "Resolver.__init__", "kind": "function", "doc": "

\n", "signature": "(scope, schema, infer_schema: bool = True)"}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"fullname": "sqlglot.optimizer.qualify_columns.Resolver.get_table", "modulename": "sqlglot.optimizer.qualify_columns", "qualname": "Resolver.get_table", "kind": "function", "doc": "

Get the table for a column name.

\n\n
Arguments:
\n\n
    \n
  • column_name: The column name to find the table for.
  • \n
\n\n
Returns:
\n\n
\n

The table name if it can be found/inferred.

\n
\n", "signature": "(self, column_name: str) -> Optional[sqlglot.expressions.Identifier]:", "funcdef": "def"}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"fullname": "sqlglot.optimizer.qualify_columns.Resolver.all_columns", "modulename": "sqlglot.optimizer.qualify_columns", "qualname": "Resolver.all_columns", "kind": "variable", "doc": "

All available columns of all sources in this scope

\n"}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"fullname": "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns", "modulename": "sqlglot.optimizer.qualify_columns", "qualname": "Resolver.get_source_columns", "kind": "function", "doc": "

Resolve the source columns for a given source name

\n", "signature": "(self, name, only_visible=False):", "funcdef": "def"}, "sqlglot.optimizer.qualify_tables": {"fullname": "sqlglot.optimizer.qualify_tables", "modulename": "sqlglot.optimizer.qualify_tables", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"fullname": "sqlglot.optimizer.qualify_tables.qualify_tables", "modulename": "sqlglot.optimizer.qualify_tables", "qualname": "qualify_tables", "kind": "function", "doc": "

Rewrite sqlglot AST to have fully qualified tables. Additionally, this\nreplaces \"join constructs\" (*) by equivalent SELECT * subqueries.

\n\n
Examples:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one("SELECT 1 FROM tbl")\n>>> qualify_tables(expression, db="db").sql()\n'SELECT 1 FROM db.tbl AS tbl'\n>>>\n>>> expression = sqlglot.parse_one("SELECT * FROM (tbl1 JOIN tbl2 ON id1 = id2)")\n>>> qualify_tables(expression).sql()\n'SELECT * FROM (SELECT * FROM tbl1 AS tbl1 JOIN tbl2 AS tbl2 ON id1 = id2) AS _q_0'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: Expression to qualify
  • \n
  • db: Database name
  • \n
  • catalog: Catalog name
  • \n
  • schema: A schema to populate
  • \n
\n\n
Returns:
\n\n
\n

The qualified expression.

\n
\n\n

(*) See section 7.2.1.2 in https://www.postgresql.org/docs/current/queries-table-expressions.html

\n", "signature": "(\texpression: ~E,\tdb: Optional[str] = None,\tcatalog: Optional[str] = None,\tschema: Optional[sqlglot.schema.Schema] = None) -> ~E:", "funcdef": "def"}, "sqlglot.optimizer.scope": {"fullname": "sqlglot.optimizer.scope", "modulename": "sqlglot.optimizer.scope", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.scope.ScopeType": {"fullname": "sqlglot.optimizer.scope.ScopeType", "modulename": "sqlglot.optimizer.scope", "qualname": "ScopeType", "kind": "class", "doc": "

An enumeration.

\n", "bases": "enum.Enum"}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"fullname": "sqlglot.optimizer.scope.ScopeType.ROOT", "modulename": "sqlglot.optimizer.scope", "qualname": "ScopeType.ROOT", "kind": "variable", "doc": "

\n", "default_value": "<ScopeType.ROOT: 1>"}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"fullname": "sqlglot.optimizer.scope.ScopeType.SUBQUERY", "modulename": "sqlglot.optimizer.scope", "qualname": "ScopeType.SUBQUERY", "kind": "variable", "doc": "

\n", "default_value": "<ScopeType.SUBQUERY: 2>"}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"fullname": "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE", "modulename": "sqlglot.optimizer.scope", "qualname": "ScopeType.DERIVED_TABLE", "kind": "variable", "doc": "

\n", "default_value": "<ScopeType.DERIVED_TABLE: 3>"}, "sqlglot.optimizer.scope.ScopeType.CTE": {"fullname": "sqlglot.optimizer.scope.ScopeType.CTE", "modulename": "sqlglot.optimizer.scope", "qualname": "ScopeType.CTE", "kind": "variable", "doc": "

\n", "default_value": "<ScopeType.CTE: 4>"}, "sqlglot.optimizer.scope.ScopeType.UNION": {"fullname": "sqlglot.optimizer.scope.ScopeType.UNION", "modulename": "sqlglot.optimizer.scope", "qualname": "ScopeType.UNION", "kind": "variable", "doc": "

\n", "default_value": "<ScopeType.UNION: 5>"}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"fullname": "sqlglot.optimizer.scope.ScopeType.UDTF", "modulename": "sqlglot.optimizer.scope", "qualname": "ScopeType.UDTF", "kind": "variable", "doc": "

\n", "default_value": "<ScopeType.UDTF: 6>"}, "sqlglot.optimizer.scope.Scope": {"fullname": "sqlglot.optimizer.scope.Scope", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope", "kind": "class", "doc": "

Selection scope.

\n\n
Attributes:
\n\n
    \n
  • expression (exp.Select|exp.Union): Root expression of this scope
  • \n
  • sources (dict[str, exp.Table|Scope]): Mapping of source name to either\na Table expression or another Scope instance. For example:\n SELECT * FROM x {\"x\": Table(this=\"x\")}\n SELECT * FROM x AS y {\"y\": Table(this=\"x\")}\n SELECT * FROM (SELECT ...) AS y {\"y\": Scope(...)}
  • \n
  • lateral_sources (dict[str, exp.Table|Scope]): Sources from laterals\nFor example:\n SELECT c FROM x LATERAL VIEW EXPLODE (a) AS c;\nThe LATERAL VIEW EXPLODE gets x as a source.
  • \n
  • outer_column_list (list[str]): If this is a derived table or CTE, and the outer query\ndefines a column list of it's alias of this scope, this is that list of columns.\nFor example:\n SELECT * FROM (SELECT ...) AS y(col1, col2)\nThe inner query would have [\"col1\", \"col2\"] for its outer_column_list
  • \n
  • parent (Scope): Parent scope
  • \n
  • scope_type (ScopeType): Type of this scope, relative to it's parent
  • \n
  • subquery_scopes (list[Scope]): List of all child scopes for subqueries
  • \n
  • cte_scopes (list[Scope]): List of all child scopes for CTEs
  • \n
  • derived_table_scopes (list[Scope]): List of all child scopes for derived_tables
  • \n
  • udtf_scopes (list[Scope]): List of all child scopes for user defined tabular functions
  • \n
  • table_scopes (list[Scope]): derived_table_scopes + udtf_scopes, in the order that they're defined
  • \n
  • union_scopes (list[Scope, Scope]): If this Scope is for a Union expression, this will be\na list of the left and right child scopes.
  • \n
\n"}, "sqlglot.optimizer.scope.Scope.__init__": {"fullname": "sqlglot.optimizer.scope.Scope.__init__", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.__init__", "kind": "function", "doc": "

\n", "signature": "(\texpression,\tsources=None,\touter_column_list=None,\tparent=None,\tscope_type=<ScopeType.ROOT: 1>,\tlateral_sources=None)"}, "sqlglot.optimizer.scope.Scope.clear_cache": {"fullname": "sqlglot.optimizer.scope.Scope.clear_cache", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.clear_cache", "kind": "function", "doc": "

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.branch": {"fullname": "sqlglot.optimizer.scope.Scope.branch", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.branch", "kind": "function", "doc": "

Branch from the current scope to a new, inner scope

\n", "signature": "(self, expression, scope_type, chain_sources=None, **kwargs):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.walk": {"fullname": "sqlglot.optimizer.scope.Scope.walk", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.walk", "kind": "function", "doc": "

\n", "signature": "(self, bfs=True):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.find": {"fullname": "sqlglot.optimizer.scope.Scope.find", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.find", "kind": "function", "doc": "

Returns the first node in this scope which matches at least one of the specified types.

\n\n

This does NOT traverse into subscopes.

\n\n
Arguments:
\n\n
    \n
  • expression_types (type): the expression type(s) to match.
  • \n
  • bfs (bool): True to use breadth-first search, False to use depth-first.
  • \n
\n\n
Returns:
\n\n
\n

exp.Expression: the node which matches the criteria or None if no node matching\n the criteria was found.

\n
\n", "signature": "(self, *expression_types, bfs=True):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.find_all": {"fullname": "sqlglot.optimizer.scope.Scope.find_all", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.find_all", "kind": "function", "doc": "

Returns a generator object which visits all nodes in this scope and only yields those that\nmatch at least one of the specified expression types.

\n\n

This does NOT traverse into subscopes.

\n\n
Arguments:
\n\n
    \n
  • expression_types (type): the expression type(s) to match.
  • \n
  • bfs (bool): True to use breadth-first search, False to use depth-first.
  • \n
\n\n
Yields:
\n\n
\n

exp.Expression: nodes

\n
\n", "signature": "(self, *expression_types, bfs=True):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.replace": {"fullname": "sqlglot.optimizer.scope.Scope.replace", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.replace", "kind": "function", "doc": "

Replace old with new.

\n\n

This can be used instead of exp.Expression.replace to ensure the Scope is kept up-to-date.

\n\n
Arguments:
\n\n
    \n
  • old (exp.Expression): old node
  • \n
  • new (exp.Expression): new node
  • \n
\n", "signature": "(self, old, new):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.tables": {"fullname": "sqlglot.optimizer.scope.Scope.tables", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.tables", "kind": "variable", "doc": "

List of tables in this scope.

\n\n
Returns:
\n\n
\n

list[exp.Table]: tables

\n
\n"}, "sqlglot.optimizer.scope.Scope.ctes": {"fullname": "sqlglot.optimizer.scope.Scope.ctes", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.ctes", "kind": "variable", "doc": "

List of CTEs in this scope.

\n\n
Returns:
\n\n
\n

list[exp.CTE]: ctes

\n
\n"}, "sqlglot.optimizer.scope.Scope.derived_tables": {"fullname": "sqlglot.optimizer.scope.Scope.derived_tables", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.derived_tables", "kind": "variable", "doc": "

List of derived tables in this scope.

\n\n
For example:
\n\n
\n

SELECT * FROM (SELECT ...) <- that's a derived table

\n
\n\n
Returns:
\n\n
\n

list[exp.Subquery]: derived tables

\n
\n"}, "sqlglot.optimizer.scope.Scope.udtfs": {"fullname": "sqlglot.optimizer.scope.Scope.udtfs", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.udtfs", "kind": "variable", "doc": "

List of \"User Defined Tabular Functions\" in this scope.

\n\n
Returns:
\n\n
\n

list[exp.UDTF]: UDTFs

\n
\n"}, "sqlglot.optimizer.scope.Scope.subqueries": {"fullname": "sqlglot.optimizer.scope.Scope.subqueries", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.subqueries", "kind": "variable", "doc": "

List of subqueries in this scope.

\n\n
For example:
\n\n
\n

SELECT * FROM x WHERE a IN (SELECT ...) <- that's a subquery

\n
\n\n
Returns:
\n\n
\n

list[exp.Subqueryable]: subqueries

\n
\n"}, "sqlglot.optimizer.scope.Scope.columns": {"fullname": "sqlglot.optimizer.scope.Scope.columns", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.columns", "kind": "variable", "doc": "

List of columns in this scope.

\n\n
Returns:
\n\n
\n

list[exp.Column]: Column instances in this scope, plus any\n Columns that reference this scope from correlated subqueries.

\n
\n"}, "sqlglot.optimizer.scope.Scope.selected_sources": {"fullname": "sqlglot.optimizer.scope.Scope.selected_sources", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.selected_sources", "kind": "variable", "doc": "

Mapping of nodes and sources that are actually selected from in this scope.

\n\n

That is, all tables in a schema are selectable at any point. But a\ntable only becomes a selected source if it's included in a FROM or JOIN clause.

\n\n
Returns:
\n\n
\n

dict[str, (exp.Table|exp.Select, exp.Table|Scope)]: selected sources and nodes

\n
\n"}, "sqlglot.optimizer.scope.Scope.cte_sources": {"fullname": "sqlglot.optimizer.scope.Scope.cte_sources", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.cte_sources", "kind": "variable", "doc": "

Sources that are CTEs.

\n\n
Returns:
\n\n
\n

dict[str, Scope]: Mapping of source alias to Scope

\n
\n"}, "sqlglot.optimizer.scope.Scope.selects": {"fullname": "sqlglot.optimizer.scope.Scope.selects", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.selects", "kind": "variable", "doc": "

Select expressions of this scope.

\n\n

For example, for the following expression:\n SELECT 1 as a, 2 as b FROM x

\n\n

The outputs are the \"1 as a\" and \"2 as b\" expressions.

\n\n
Returns:
\n\n
\n

list[exp.Expression]: expressions

\n
\n"}, "sqlglot.optimizer.scope.Scope.external_columns": {"fullname": "sqlglot.optimizer.scope.Scope.external_columns", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.external_columns", "kind": "variable", "doc": "

Columns that appear to reference sources in outer scopes.

\n\n
Returns:
\n\n
\n

list[exp.Column]: Column instances that don't reference\n sources in the current scope.

\n
\n"}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"fullname": "sqlglot.optimizer.scope.Scope.unqualified_columns", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.unqualified_columns", "kind": "variable", "doc": "

Unqualified columns in the current scope.

\n\n
Returns:
\n\n
\n

list[exp.Column]: Unqualified columns

\n
\n"}, "sqlglot.optimizer.scope.Scope.join_hints": {"fullname": "sqlglot.optimizer.scope.Scope.join_hints", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.join_hints", "kind": "variable", "doc": "

Hints that exist in the scope that reference tables

\n\n
Returns:
\n\n
\n

list[exp.JoinHint]: Join hints that are referenced within the scope

\n
\n"}, "sqlglot.optimizer.scope.Scope.source_columns": {"fullname": "sqlglot.optimizer.scope.Scope.source_columns", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.source_columns", "kind": "function", "doc": "

Get all columns in the current scope for a particular source.

\n\n
Arguments:
\n\n
    \n
  • source_name (str): Name of the source
  • \n
\n\n
Returns:
\n\n
\n

list[exp.Column]: Column instances that reference source_name

\n
\n", "signature": "(self, source_name):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.is_subquery": {"fullname": "sqlglot.optimizer.scope.Scope.is_subquery", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.is_subquery", "kind": "variable", "doc": "

Determine if this scope is a subquery

\n"}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"fullname": "sqlglot.optimizer.scope.Scope.is_derived_table", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.is_derived_table", "kind": "variable", "doc": "

Determine if this scope is a derived table

\n"}, "sqlglot.optimizer.scope.Scope.is_union": {"fullname": "sqlglot.optimizer.scope.Scope.is_union", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.is_union", "kind": "variable", "doc": "

Determine if this scope is a union

\n"}, "sqlglot.optimizer.scope.Scope.is_cte": {"fullname": "sqlglot.optimizer.scope.Scope.is_cte", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.is_cte", "kind": "variable", "doc": "

Determine if this scope is a common table expression

\n"}, "sqlglot.optimizer.scope.Scope.is_root": {"fullname": "sqlglot.optimizer.scope.Scope.is_root", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.is_root", "kind": "variable", "doc": "

Determine if this is the root scope

\n"}, "sqlglot.optimizer.scope.Scope.is_udtf": {"fullname": "sqlglot.optimizer.scope.Scope.is_udtf", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.is_udtf", "kind": "variable", "doc": "

Determine if this scope is a UDTF (User Defined Table Function)

\n"}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"fullname": "sqlglot.optimizer.scope.Scope.is_correlated_subquery", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.is_correlated_subquery", "kind": "variable", "doc": "

Determine if this scope is a correlated subquery

\n"}, "sqlglot.optimizer.scope.Scope.rename_source": {"fullname": "sqlglot.optimizer.scope.Scope.rename_source", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.rename_source", "kind": "function", "doc": "

Rename a source in this scope

\n", "signature": "(self, old_name, new_name):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.add_source": {"fullname": "sqlglot.optimizer.scope.Scope.add_source", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.add_source", "kind": "function", "doc": "

Add a source to this scope

\n", "signature": "(self, name, source):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.remove_source": {"fullname": "sqlglot.optimizer.scope.Scope.remove_source", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.remove_source", "kind": "function", "doc": "

Remove a source from this scope

\n", "signature": "(self, name):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.traverse": {"fullname": "sqlglot.optimizer.scope.Scope.traverse", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.traverse", "kind": "function", "doc": "

Traverse the scope tree from this node.

\n\n
Yields:
\n\n
\n

Scope: scope instances in depth-first-search post-order

\n
\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.ref_count": {"fullname": "sqlglot.optimizer.scope.Scope.ref_count", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.ref_count", "kind": "function", "doc": "

Count the number of times each scope in this tree is referenced.

\n\n
Returns:
\n\n
\n

dict[int, int]: Mapping of Scope instance ID to reference count

\n
\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.optimizer.scope.traverse_scope": {"fullname": "sqlglot.optimizer.scope.traverse_scope", "modulename": "sqlglot.optimizer.scope", "qualname": "traverse_scope", "kind": "function", "doc": "

Traverse an expression by it's \"scopes\".

\n\n

\"Scope\" represents the current context of a Select statement.

\n\n

This is helpful for optimizing queries, where we need more information than\nthe expression tree itself. For example, we might care about the source\nnames within a subquery. Returns a list because a generator could result in\nincomplete properties which is confusing.

\n\n
Examples:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one("SELECT a FROM (SELECT a FROM x) AS y")\n>>> scopes = traverse_scope(expression)\n>>> scopes[0].expression.sql(), list(scopes[0].sources)\n('SELECT a FROM x', ['x'])\n>>> scopes[1].expression.sql(), list(scopes[1].sources)\n('SELECT a FROM (SELECT a FROM x) AS y', ['y'])\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (exp.Expression): expression to traverse
  • \n
\n\n
Returns:
\n\n
\n

list[Scope]: scope instances

\n
\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> List[sqlglot.optimizer.scope.Scope]:", "funcdef": "def"}, "sqlglot.optimizer.scope.build_scope": {"fullname": "sqlglot.optimizer.scope.build_scope", "modulename": "sqlglot.optimizer.scope", "qualname": "build_scope", "kind": "function", "doc": "

Build a scope tree.

\n\n
Arguments:
\n\n
    \n
  • expression (exp.Expression): expression to build the scope tree for
  • \n
\n\n
Returns:
\n\n
\n

Scope: root scope

\n
\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> Optional[sqlglot.optimizer.scope.Scope]:", "funcdef": "def"}, "sqlglot.optimizer.scope.walk_in_scope": {"fullname": "sqlglot.optimizer.scope.walk_in_scope", "modulename": "sqlglot.optimizer.scope", "qualname": "walk_in_scope", "kind": "function", "doc": "

Returns a generator object which visits all nodes in the syntrax tree, stopping at\nnodes that start child scopes.

\n\n
Arguments:
\n\n
    \n
  • expression (exp.Expression):
  • \n
  • bfs (bool): if set to True the BFS traversal order will be applied,\notherwise the DFS traversal will be used instead.
  • \n
\n\n
Yields:
\n\n
\n

tuple[exp.Expression, Optional[exp.Expression], str]: node, parent, arg key

\n
\n", "signature": "(expression, bfs=True):", "funcdef": "def"}, "sqlglot.optimizer.simplify": {"fullname": "sqlglot.optimizer.simplify", "modulename": "sqlglot.optimizer.simplify", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.simplify.simplify": {"fullname": "sqlglot.optimizer.simplify.simplify", "modulename": "sqlglot.optimizer.simplify", "qualname": "simplify", "kind": "function", "doc": "

Rewrite sqlglot AST to simplify expressions.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one("TRUE AND TRUE")\n>>> simplify(expression).sql()\n'TRUE'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to simplify
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: simplified expression

\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.simplify.rewrite_between": {"fullname": "sqlglot.optimizer.simplify.rewrite_between", "modulename": "sqlglot.optimizer.simplify", "qualname": "rewrite_between", "kind": "function", "doc": "

Rewrite x between y and z to x >= y AND x <= z.

\n\n

This is done because comparison simplification is only done on lt/lte/gt/gte.

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.optimizer.simplify.simplify_not": {"fullname": "sqlglot.optimizer.simplify.simplify_not", "modulename": "sqlglot.optimizer.simplify", "qualname": "simplify_not", "kind": "function", "doc": "

Demorgan's Law\nNOT (x OR y) -> NOT x AND NOT y\nNOT (x AND y) -> NOT x OR NOT y

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.simplify.flatten": {"fullname": "sqlglot.optimizer.simplify.flatten", "modulename": "sqlglot.optimizer.simplify", "qualname": "flatten", "kind": "function", "doc": "

A AND (B AND C) -> A AND B AND C\nA OR (B OR C) -> A OR B OR C

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.simplify.simplify_connectors": {"fullname": "sqlglot.optimizer.simplify.simplify_connectors", "modulename": "sqlglot.optimizer.simplify", "qualname": "simplify_connectors", "kind": "function", "doc": "

\n", "signature": "(expression, root=True):", "funcdef": "def"}, "sqlglot.optimizer.simplify.remove_compliments": {"fullname": "sqlglot.optimizer.simplify.remove_compliments", "modulename": "sqlglot.optimizer.simplify", "qualname": "remove_compliments", "kind": "function", "doc": "

Removing compliments.

\n\n

A AND NOT A -> FALSE\nA OR NOT A -> TRUE

\n", "signature": "(expression, root=True):", "funcdef": "def"}, "sqlglot.optimizer.simplify.uniq_sort": {"fullname": "sqlglot.optimizer.simplify.uniq_sort", "modulename": "sqlglot.optimizer.simplify", "qualname": "uniq_sort", "kind": "function", "doc": "

Uniq and sort a connector.

\n\n

C AND A AND B AND B -> A AND B AND C

\n", "signature": "(expression, generate, root=True):", "funcdef": "def"}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"fullname": "sqlglot.optimizer.simplify.absorb_and_eliminate", "modulename": "sqlglot.optimizer.simplify", "qualname": "absorb_and_eliminate", "kind": "function", "doc": "

absorption:\n A AND (A OR B) -> A\n A OR (A AND B) -> A\n A AND (NOT A OR B) -> A AND B\n A OR (NOT A AND B) -> A OR B\nelimination:\n (A AND B) OR (A AND NOT B) -> A\n (A OR B) AND (A OR NOT B) -> A

\n", "signature": "(expression, root=True):", "funcdef": "def"}, "sqlglot.optimizer.simplify.simplify_literals": {"fullname": "sqlglot.optimizer.simplify.simplify_literals", "modulename": "sqlglot.optimizer.simplify", "qualname": "simplify_literals", "kind": "function", "doc": "

\n", "signature": "(expression, root=True):", "funcdef": "def"}, "sqlglot.optimizer.simplify.simplify_parens": {"fullname": "sqlglot.optimizer.simplify.simplify_parens", "modulename": "sqlglot.optimizer.simplify", "qualname": "simplify_parens", "kind": "function", "doc": "

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.simplify.remove_where_true": {"fullname": "sqlglot.optimizer.simplify.remove_where_true", "modulename": "sqlglot.optimizer.simplify", "qualname": "remove_where_true", "kind": "function", "doc": "

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.simplify.always_true": {"fullname": "sqlglot.optimizer.simplify.always_true", "modulename": "sqlglot.optimizer.simplify", "qualname": "always_true", "kind": "function", "doc": "

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.simplify.is_complement": {"fullname": "sqlglot.optimizer.simplify.is_complement", "modulename": "sqlglot.optimizer.simplify", "qualname": "is_complement", "kind": "function", "doc": "

\n", "signature": "(a, b):", "funcdef": "def"}, "sqlglot.optimizer.simplify.is_false": {"fullname": "sqlglot.optimizer.simplify.is_false", "modulename": "sqlglot.optimizer.simplify", "qualname": "is_false", "kind": "function", "doc": "

\n", "signature": "(a: sqlglot.expressions.Expression) -> bool:", "funcdef": "def"}, "sqlglot.optimizer.simplify.is_null": {"fullname": "sqlglot.optimizer.simplify.is_null", "modulename": "sqlglot.optimizer.simplify", "qualname": "is_null", "kind": "function", "doc": "

\n", "signature": "(a: sqlglot.expressions.Expression) -> bool:", "funcdef": "def"}, "sqlglot.optimizer.simplify.eval_boolean": {"fullname": "sqlglot.optimizer.simplify.eval_boolean", "modulename": "sqlglot.optimizer.simplify", "qualname": "eval_boolean", "kind": "function", "doc": "

\n", "signature": "(expression, a, b):", "funcdef": "def"}, "sqlglot.optimizer.simplify.extract_date": {"fullname": "sqlglot.optimizer.simplify.extract_date", "modulename": "sqlglot.optimizer.simplify", "qualname": "extract_date", "kind": "function", "doc": "

\n", "signature": "(cast):", "funcdef": "def"}, "sqlglot.optimizer.simplify.extract_interval": {"fullname": "sqlglot.optimizer.simplify.extract_interval", "modulename": "sqlglot.optimizer.simplify", "qualname": "extract_interval", "kind": "function", "doc": "

\n", "signature": "(interval):", "funcdef": "def"}, "sqlglot.optimizer.simplify.date_literal": {"fullname": "sqlglot.optimizer.simplify.date_literal", "modulename": "sqlglot.optimizer.simplify", "qualname": "date_literal", "kind": "function", "doc": "

\n", "signature": "(date):", "funcdef": "def"}, "sqlglot.optimizer.simplify.boolean_literal": {"fullname": "sqlglot.optimizer.simplify.boolean_literal", "modulename": "sqlglot.optimizer.simplify", "qualname": "boolean_literal", "kind": "function", "doc": "

\n", "signature": "(condition):", "funcdef": "def"}, "sqlglot.optimizer.unnest_subqueries": {"fullname": "sqlglot.optimizer.unnest_subqueries", "modulename": "sqlglot.optimizer.unnest_subqueries", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"fullname": "sqlglot.optimizer.unnest_subqueries.unnest_subqueries", "modulename": "sqlglot.optimizer.unnest_subqueries", "qualname": "unnest_subqueries", "kind": "function", "doc": "

Rewrite sqlglot AST to convert some predicates with subqueries into joins.

\n\n

Convert scalar subqueries into cross joins.\nConvert correlated or vectorized subqueries into a group by so it is not a many to many left join.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one("SELECT * FROM x AS x WHERE (SELECT y.a AS a FROM y AS y WHERE x.a = y.a) = 1 ")\n>>> unnest_subqueries(expression).sql()\n'SELECT * FROM x AS x LEFT JOIN (SELECT y.a AS a FROM y AS y WHERE TRUE GROUP BY y.a) AS _u_0 ON x.a = _u_0.a WHERE _u_0.a = 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to unnest
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: unnested expression

\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.unnest_subqueries.unnest": {"fullname": "sqlglot.optimizer.unnest_subqueries.unnest", "modulename": "sqlglot.optimizer.unnest_subqueries", "qualname": "unnest", "kind": "function", "doc": "

\n", "signature": "(select, parent_select, next_alias_name):", "funcdef": "def"}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"fullname": "sqlglot.optimizer.unnest_subqueries.decorrelate", "modulename": "sqlglot.optimizer.unnest_subqueries", "qualname": "decorrelate", "kind": "function", "doc": "

\n", "signature": "(select, parent_select, external_columns, next_alias_name):", "funcdef": "def"}, "sqlglot.parser": {"fullname": "sqlglot.parser", "modulename": "sqlglot.parser", "kind": "module", "doc": "

\n"}, "sqlglot.parser.parse_var_map": {"fullname": "sqlglot.parser.parse_var_map", "modulename": "sqlglot.parser", "qualname": "parse_var_map", "kind": "function", "doc": "

\n", "signature": "(args: List) -> sqlglot.expressions.StarMap | sqlglot.expressions.VarMap:", "funcdef": "def"}, "sqlglot.parser.parse_like": {"fullname": "sqlglot.parser.parse_like", "modulename": "sqlglot.parser", "qualname": "parse_like", "kind": "function", "doc": "

\n", "signature": "(args: List) -> sqlglot.expressions.Escape | sqlglot.expressions.Like:", "funcdef": "def"}, "sqlglot.parser.binary_range_parser": {"fullname": "sqlglot.parser.binary_range_parser", "modulename": "sqlglot.parser", "qualname": "binary_range_parser", "kind": "function", "doc": "

\n", "signature": "(\texpr_type: Type[sqlglot.expressions.Expression]) -> Callable[[sqlglot.parser.Parser, Optional[sqlglot.expressions.Expression]], Optional[sqlglot.expressions.Expression]]:", "funcdef": "def"}, "sqlglot.parser.Parser": {"fullname": "sqlglot.parser.Parser", "modulename": "sqlglot.parser", "qualname": "Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: The desired error level.\nDefault: ErrorLevel.IMMEDIATE
  • \n
  • error_message_context: Determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 100
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
\n"}, "sqlglot.parser.Parser.__init__": {"fullname": "sqlglot.parser.Parser.__init__", "modulename": "sqlglot.parser", "qualname": "Parser.__init__", "kind": "function", "doc": "

\n", "signature": "(\terror_level: Optional[sqlglot.errors.ErrorLevel] = None,\terror_message_context: int = 100,\tmax_errors: int = 3)"}, "sqlglot.parser.Parser.reset": {"fullname": "sqlglot.parser.Parser.reset", "modulename": "sqlglot.parser", "qualname": "Parser.reset", "kind": "function", "doc": "

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.parser.Parser.parse": {"fullname": "sqlglot.parser.Parser.parse", "modulename": "sqlglot.parser", "qualname": "Parser.parse", "kind": "function", "doc": "

Parses a list of tokens and returns a list of syntax trees, one tree\nper parsed SQL statement.

\n\n
Arguments:
\n\n
    \n
  • raw_tokens: The list of tokens.
  • \n
  • sql: The original SQL string, used to produce helpful debug messages.
  • \n
\n\n
Returns:
\n\n
\n

The list of the produced syntax trees.

\n
\n", "signature": "(\tself,\traw_tokens: List[sqlglot.tokens.Token],\tsql: Optional[str] = None) -> List[Optional[sqlglot.expressions.Expression]]:", "funcdef": "def"}, "sqlglot.parser.Parser.parse_into": {"fullname": "sqlglot.parser.Parser.parse_into", "modulename": "sqlglot.parser", "qualname": "Parser.parse_into", "kind": "function", "doc": "

Parses a list of tokens into a given Expression type. If a collection of Expression\ntypes is given instead, this method will try to parse the token list into each one\nof them, stopping at the first for which the parsing succeeds.

\n\n
Arguments:
\n\n
    \n
  • expression_types: The expression type(s) to try and parse the token list into.
  • \n
  • raw_tokens: The list of tokens.
  • \n
  • sql: The original SQL string, used to produce helpful debug messages.
  • \n
\n\n
Returns:
\n\n
\n

The target Expression.

\n
\n", "signature": "(\tself,\texpression_types: Union[str, Type[sqlglot.expressions.Expression], Collection[Union[str, Type[sqlglot.expressions.Expression]]]],\traw_tokens: List[sqlglot.tokens.Token],\tsql: Optional[str] = None) -> List[Optional[sqlglot.expressions.Expression]]:", "funcdef": "def"}, "sqlglot.parser.Parser.check_errors": {"fullname": "sqlglot.parser.Parser.check_errors", "modulename": "sqlglot.parser", "qualname": "Parser.check_errors", "kind": "function", "doc": "

Logs or raises any found errors, depending on the chosen error level setting.

\n", "signature": "(self) -> None:", "funcdef": "def"}, "sqlglot.parser.Parser.raise_error": {"fullname": "sqlglot.parser.Parser.raise_error", "modulename": "sqlglot.parser", "qualname": "Parser.raise_error", "kind": "function", "doc": "

Appends an error in the list of recorded errors or raises it, depending on the chosen\nerror level setting.

\n", "signature": "(self, message: str, token: Optional[sqlglot.tokens.Token] = None) -> None:", "funcdef": "def"}, "sqlglot.parser.Parser.expression": {"fullname": "sqlglot.parser.Parser.expression", "modulename": "sqlglot.parser", "qualname": "Parser.expression", "kind": "function", "doc": "

Creates a new, validated Expression.

\n\n
Arguments:
\n\n
    \n
  • exp_class: The expression class to instantiate.
  • \n
  • comments: An optional list of comments to attach to the expression.
  • \n
  • kwargs: The arguments to set for the expression along with their respective values.
  • \n
\n\n
Returns:
\n\n
\n

The target expression.

\n
\n", "signature": "(\tself,\texp_class: Type[~E],\tcomments: Optional[List[str]] = None,\t**kwargs) -> ~E:", "funcdef": "def"}, "sqlglot.parser.Parser.validate_expression": {"fullname": "sqlglot.parser.Parser.validate_expression", "modulename": "sqlglot.parser", "qualname": "Parser.validate_expression", "kind": "function", "doc": "

Validates an Expression, making sure that all its mandatory arguments are set.

\n\n
Arguments:
\n\n
    \n
  • expression: The expression to validate.
  • \n
  • args: An optional list of items that was used to instantiate the expression, if it's a Func.
  • \n
\n\n
Returns:
\n\n
\n

The validated expression.

\n
\n", "signature": "(self, expression: ~E, args: Optional[List] = None) -> ~E:", "funcdef": "def"}, "sqlglot.planner": {"fullname": "sqlglot.planner", "modulename": "sqlglot.planner", "kind": "module", "doc": "

\n"}, "sqlglot.planner.Plan": {"fullname": "sqlglot.planner.Plan", "modulename": "sqlglot.planner", "qualname": "Plan", "kind": "class", "doc": "

\n"}, "sqlglot.planner.Plan.__init__": {"fullname": "sqlglot.planner.Plan.__init__", "modulename": "sqlglot.planner", "qualname": "Plan.__init__", "kind": "function", "doc": "

\n", "signature": "(expression: sqlglot.expressions.Expression)"}, "sqlglot.planner.Step": {"fullname": "sqlglot.planner.Step", "modulename": "sqlglot.planner", "qualname": "Step", "kind": "class", "doc": "

\n"}, "sqlglot.planner.Step.from_expression": {"fullname": "sqlglot.planner.Step.from_expression", "modulename": "sqlglot.planner", "qualname": "Step.from_expression", "kind": "function", "doc": "

Builds a DAG of Steps from a SQL expression so that it's easier to execute in an engine.\nNote: the expression's tables and subqueries must be aliased for this method to work. For\nexample, given the following expression:

\n\n

SELECT\n x.a,\n SUM(x.b)\nFROM x AS x\nJOIN y AS y\n ON x.a = y.a\nGROUP BY x.a

\n\n

the following DAG is produced (the expression IDs might differ per execution):

\n\n
    \n
  • Aggregate: x (4347984624)\nContext:\n Aggregations:\n - SUM(x.b)\n Group:\n - x.a\nProjections:\n
      \n
    • x.a
    • \n
    • \"x\".\"\"\nDependencies:\n
        \n
      • Join: x (4347985296)\nContext:\ny:\nOn: x.a = y.a\nProjections:\nDependencies:
      • \n
    • \n
    • Scan: x (4347983136)\nContext:\n Source: x AS x\nProjections:
    • \n
    • Scan: y (4343416624)\nContext:\n Source: y AS y\nProjections:
    • \n
  • \n
\n\n
Arguments:
\n\n
    \n
  • expression: the expression to build the DAG from.
  • \n
  • ctes: a dictionary that maps CTEs to their corresponding Step DAG by name.
  • \n
\n\n
Returns:
\n\n
\n

A Step DAG corresponding to expression.

\n
\n", "signature": "(\tcls,\texpression: sqlglot.expressions.Expression,\tctes: Optional[Dict[str, sqlglot.planner.Step]] = None) -> sqlglot.planner.Step:", "funcdef": "def"}, "sqlglot.planner.Step.add_dependency": {"fullname": "sqlglot.planner.Step.add_dependency", "modulename": "sqlglot.planner", "qualname": "Step.add_dependency", "kind": "function", "doc": "

\n", "signature": "(self, dependency: sqlglot.planner.Step) -> None:", "funcdef": "def"}, "sqlglot.planner.Step.to_s": {"fullname": "sqlglot.planner.Step.to_s", "modulename": "sqlglot.planner", "qualname": "Step.to_s", "kind": "function", "doc": "

\n", "signature": "(self, level: int = 0) -> str:", "funcdef": "def"}, "sqlglot.planner.Scan": {"fullname": "sqlglot.planner.Scan", "modulename": "sqlglot.planner", "qualname": "Scan", "kind": "class", "doc": "

\n", "bases": "Step"}, "sqlglot.planner.Scan.from_expression": {"fullname": "sqlglot.planner.Scan.from_expression", "modulename": "sqlglot.planner", "qualname": "Scan.from_expression", "kind": "function", "doc": "

Builds a DAG of Steps from a SQL expression so that it's easier to execute in an engine.\nNote: the expression's tables and subqueries must be aliased for this method to work. For\nexample, given the following expression:

\n\n

SELECT\n x.a,\n SUM(x.b)\nFROM x AS x\nJOIN y AS y\n ON x.a = y.a\nGROUP BY x.a

\n\n

the following DAG is produced (the expression IDs might differ per execution):

\n\n
    \n
  • Aggregate: x (4347984624)\nContext:\n Aggregations:\n - SUM(x.b)\n Group:\n - x.a\nProjections:\n
      \n
    • x.a
    • \n
    • \"x\".\"\"\nDependencies:\n
        \n
      • Join: x (4347985296)\nContext:\ny:\nOn: x.a = y.a\nProjections:\nDependencies:
      • \n
    • \n
    • Scan: x (4347983136)\nContext:\n Source: x AS x\nProjections:
    • \n
    • Scan: y (4343416624)\nContext:\n Source: y AS y\nProjections:
    • \n
  • \n
\n\n
Arguments:
\n\n
    \n
  • expression: the expression to build the DAG from.
  • \n
  • ctes: a dictionary that maps CTEs to their corresponding Step DAG by name.
  • \n
\n\n
Returns:
\n\n
\n

A Step DAG corresponding to expression.

\n
\n", "signature": "(\tcls,\texpression: sqlglot.expressions.Expression,\tctes: Optional[Dict[str, sqlglot.planner.Step]] = None) -> sqlglot.planner.Step:", "funcdef": "def"}, "sqlglot.planner.Join": {"fullname": "sqlglot.planner.Join", "modulename": "sqlglot.planner", "qualname": "Join", "kind": "class", "doc": "

\n", "bases": "Step"}, "sqlglot.planner.Join.from_joins": {"fullname": "sqlglot.planner.Join.from_joins", "modulename": "sqlglot.planner", "qualname": "Join.from_joins", "kind": "function", "doc": "

\n", "signature": "(\tcls,\tjoins: Iterable[sqlglot.expressions.Join],\tctes: Optional[Dict[str, sqlglot.planner.Step]] = None) -> sqlglot.planner.Step:", "funcdef": "def"}, "sqlglot.planner.Aggregate": {"fullname": "sqlglot.planner.Aggregate", "modulename": "sqlglot.planner", "qualname": "Aggregate", "kind": "class", "doc": "

\n", "bases": "Step"}, "sqlglot.planner.Sort": {"fullname": "sqlglot.planner.Sort", "modulename": "sqlglot.planner", "qualname": "Sort", "kind": "class", "doc": "

\n", "bases": "Step"}, "sqlglot.planner.SetOperation": {"fullname": "sqlglot.planner.SetOperation", "modulename": "sqlglot.planner", "qualname": "SetOperation", "kind": "class", "doc": "

\n", "bases": "Step"}, "sqlglot.planner.SetOperation.__init__": {"fullname": "sqlglot.planner.SetOperation.__init__", "modulename": "sqlglot.planner", "qualname": "SetOperation.__init__", "kind": "function", "doc": "

\n", "signature": "(\top: Type[sqlglot.expressions.Expression],\tleft: str | None,\tright: str | None,\tdistinct: bool = False)"}, "sqlglot.planner.SetOperation.from_expression": {"fullname": "sqlglot.planner.SetOperation.from_expression", "modulename": "sqlglot.planner", "qualname": "SetOperation.from_expression", "kind": "function", "doc": "

Builds a DAG of Steps from a SQL expression so that it's easier to execute in an engine.\nNote: the expression's tables and subqueries must be aliased for this method to work. For\nexample, given the following expression:

\n\n

SELECT\n x.a,\n SUM(x.b)\nFROM x AS x\nJOIN y AS y\n ON x.a = y.a\nGROUP BY x.a

\n\n

the following DAG is produced (the expression IDs might differ per execution):

\n\n
    \n
  • Aggregate: x (4347984624)\nContext:\n Aggregations:\n - SUM(x.b)\n Group:\n - x.a\nProjections:\n
      \n
    • x.a
    • \n
    • \"x\".\"\"\nDependencies:\n
        \n
      • Join: x (4347985296)\nContext:\ny:\nOn: x.a = y.a\nProjections:\nDependencies:
      • \n
    • \n
    • Scan: x (4347983136)\nContext:\n Source: x AS x\nProjections:
    • \n
    • Scan: y (4343416624)\nContext:\n Source: y AS y\nProjections:
    • \n
  • \n
\n\n
Arguments:
\n\n
    \n
  • expression: the expression to build the DAG from.
  • \n
  • ctes: a dictionary that maps CTEs to their corresponding Step DAG by name.
  • \n
\n\n
Returns:
\n\n
\n

A Step DAG corresponding to expression.

\n
\n", "signature": "(\tcls,\texpression: sqlglot.expressions.Expression,\tctes: Optional[Dict[str, sqlglot.planner.Step]] = None) -> sqlglot.planner.Step:", "funcdef": "def"}, "sqlglot.schema.Schema": {"fullname": "sqlglot.schema.Schema", "modulename": "sqlglot.schema", "qualname": "Schema", "kind": "class", "doc": "

Abstract base class for database schemas

\n", "bases": "abc.ABC"}, "sqlglot.schema.Schema.add_table": {"fullname": "sqlglot.schema.Schema.add_table", "modulename": "sqlglot.schema", "qualname": "Schema.add_table", "kind": "function", "doc": "

Register or update a table. Some implementing classes may require column information to also be provided.

\n\n
Arguments:
\n\n
    \n
  • table: the Table expression instance or string representing the table.
  • \n
  • column_mapping: a column mapping that describes the structure of the table.
  • \n
  • dialect: the SQL dialect that will be used to parse table if it's a string.
  • \n
\n", "signature": "(\tself,\ttable: sqlglot.expressions.Table | str,\tcolumn_mapping: Union[Dict, str, sqlglot.dataframe.sql.types.StructType, List, NoneType] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None) -> None:", "funcdef": "def"}, "sqlglot.schema.Schema.column_names": {"fullname": "sqlglot.schema.Schema.column_names", "modulename": "sqlglot.schema", "qualname": "Schema.column_names", "kind": "function", "doc": "

Get the column names for a table.

\n\n
Arguments:
\n\n
    \n
  • table: the Table expression instance.
  • \n
  • only_visible: whether to include invisible columns.
  • \n
  • dialect: the SQL dialect that will be used to parse table if it's a string.
  • \n
\n\n
Returns:
\n\n
\n

The list of column names.

\n
\n", "signature": "(\tself,\ttable: sqlglot.expressions.Table | str,\tonly_visible: bool = False,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None) -> List[str]:", "funcdef": "def"}, "sqlglot.schema.Schema.get_column_type": {"fullname": "sqlglot.schema.Schema.get_column_type", "modulename": "sqlglot.schema", "qualname": "Schema.get_column_type", "kind": "function", "doc": "

Get the sqlglot.exp.DataType type of a column in the schema.

\n\n
Arguments:
\n\n
    \n
  • table: the source table.
  • \n
  • column: the target column.
  • \n
  • dialect: the SQL dialect that will be used to parse table if it's a string.
  • \n
\n\n
Returns:
\n\n
\n

The resulting column type.

\n
\n", "signature": "(\tself,\ttable: sqlglot.expressions.Table | str,\tcolumn: sqlglot.expressions.Column,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None) -> sqlglot.expressions.DataType:", "funcdef": "def"}, "sqlglot.schema.Schema.supported_table_args": {"fullname": "sqlglot.schema.Schema.supported_table_args", "modulename": "sqlglot.schema", "qualname": "Schema.supported_table_args", "kind": "variable", "doc": "

Table arguments this schema support, e.g. (\"this\", \"db\", \"catalog\")

\n", "annotation": ": Tuple[str, ...]"}, "sqlglot.schema.Schema.empty": {"fullname": "sqlglot.schema.Schema.empty", "modulename": "sqlglot.schema", "qualname": "Schema.empty", "kind": "variable", "doc": "

Returns whether or not the schema is empty.

\n", "annotation": ": bool"}, "sqlglot.schema.AbstractMappingSchema": {"fullname": "sqlglot.schema.AbstractMappingSchema", "modulename": "sqlglot.schema", "qualname": "AbstractMappingSchema", "kind": "class", "doc": "

Abstract base class for generic types.

\n\n

A generic type is typically declared by inheriting from\nthis class parameterized with one or more type variables.\nFor example, a generic mapping type might be defined as::

\n\n

class Mapping(Generic[KT, VT]):\n def __getitem__(self, key: KT) -> VT:\n ...\n # Etc.

\n\n

This class can then be used as follows::

\n\n

def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT:\n try:\n return mapping[key]\n except KeyError:\n return default

\n", "bases": "typing.Generic[~T]"}, "sqlglot.schema.AbstractMappingSchema.__init__": {"fullname": "sqlglot.schema.AbstractMappingSchema.__init__", "modulename": "sqlglot.schema", "qualname": "AbstractMappingSchema.__init__", "kind": "function", "doc": "

\n", "signature": "(mapping: Optional[Dict] = None)"}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"fullname": "sqlglot.schema.AbstractMappingSchema.table_parts", "modulename": "sqlglot.schema", "qualname": "AbstractMappingSchema.table_parts", "kind": "function", "doc": "

\n", "signature": "(self, table: sqlglot.expressions.Table) -> List[str]:", "funcdef": "def"}, "sqlglot.schema.AbstractMappingSchema.find": {"fullname": "sqlglot.schema.AbstractMappingSchema.find", "modulename": "sqlglot.schema", "qualname": "AbstractMappingSchema.find", "kind": "function", "doc": "

\n", "signature": "(\tself,\ttable: sqlglot.expressions.Table,\ttrie: Optional[Dict] = None,\traise_on_missing: bool = True) -> Optional[~T]:", "funcdef": "def"}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"fullname": "sqlglot.schema.AbstractMappingSchema.nested_get", "modulename": "sqlglot.schema", "qualname": "AbstractMappingSchema.nested_get", "kind": "function", "doc": "

\n", "signature": "(\tself,\tparts: Sequence[str],\td: Optional[Dict] = None,\traise_on_missing=True) -> Optional[Any]:", "funcdef": "def"}, "sqlglot.schema.MappingSchema": {"fullname": "sqlglot.schema.MappingSchema", "modulename": "sqlglot.schema", "qualname": "MappingSchema", "kind": "class", "doc": "

Schema based on a nested mapping.

\n\n
Arguments:
\n\n
    \n
  • schema: Mapping in one of the following forms:\n
      \n
    1. {table: {col: type}}
    2. \n
    3. {db: {table: {col: type}}}
    4. \n
    5. {catalog: {db: {table: {col: type}}}}
    6. \n
    7. None - Tables will be added later
    8. \n
  • \n
  • visible: Optional mapping of which columns in the schema are visible. If not provided, all columns\nare assumed to be visible. The nesting should mirror that of the schema:\n
      \n
    1. {table: set(cols)}}
    2. \n
    3. {db: {table: set(cols)}}}
    4. \n
    5. {catalog: {db: {table: set(*cols)}}}}
    6. \n
  • \n
  • dialect: The dialect to be used for custom type mappings & parsing string arguments.
  • \n
  • normalize: Whether to normalize identifier names according to the given dialect or not.
  • \n
\n", "bases": "sqlglot.schema.AbstractMappingSchema[typing.Dict[str, str]], Schema"}, "sqlglot.schema.MappingSchema.__init__": {"fullname": "sqlglot.schema.MappingSchema.__init__", "modulename": "sqlglot.schema", "qualname": "MappingSchema.__init__", "kind": "function", "doc": "

\n", "signature": "(\tschema: Optional[Dict] = None,\tvisible: Optional[Dict] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tnormalize: bool = True)"}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"fullname": "sqlglot.schema.MappingSchema.from_mapping_schema", "modulename": "sqlglot.schema", "qualname": "MappingSchema.from_mapping_schema", "kind": "function", "doc": "

\n", "signature": "(\tcls,\tmapping_schema: sqlglot.schema.MappingSchema) -> sqlglot.schema.MappingSchema:", "funcdef": "def"}, "sqlglot.schema.MappingSchema.copy": {"fullname": "sqlglot.schema.MappingSchema.copy", "modulename": "sqlglot.schema", "qualname": "MappingSchema.copy", "kind": "function", "doc": "

\n", "signature": "(self, **kwargs) -> sqlglot.schema.MappingSchema:", "funcdef": "def"}, "sqlglot.schema.MappingSchema.add_table": {"fullname": "sqlglot.schema.MappingSchema.add_table", "modulename": "sqlglot.schema", "qualname": "MappingSchema.add_table", "kind": "function", "doc": "

Register or update a table. Updates are only performed if a new column mapping is provided.

\n\n
Arguments:
\n\n
    \n
  • table: the Table expression instance or string representing the table.
  • \n
  • column_mapping: a column mapping that describes the structure of the table.
  • \n
  • dialect: the SQL dialect that will be used to parse table if it's a string.
  • \n
\n", "signature": "(\tself,\ttable: sqlglot.expressions.Table | str,\tcolumn_mapping: Union[Dict, str, sqlglot.dataframe.sql.types.StructType, List, NoneType] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None) -> None:", "funcdef": "def"}, "sqlglot.schema.MappingSchema.column_names": {"fullname": "sqlglot.schema.MappingSchema.column_names", "modulename": "sqlglot.schema", "qualname": "MappingSchema.column_names", "kind": "function", "doc": "

Get the column names for a table.

\n\n
Arguments:
\n\n
    \n
  • table: the Table expression instance.
  • \n
  • only_visible: whether to include invisible columns.
  • \n
  • dialect: the SQL dialect that will be used to parse table if it's a string.
  • \n
\n\n
Returns:
\n\n
\n

The list of column names.

\n
\n", "signature": "(\tself,\ttable: sqlglot.expressions.Table | str,\tonly_visible: bool = False,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None) -> List[str]:", "funcdef": "def"}, "sqlglot.schema.MappingSchema.get_column_type": {"fullname": "sqlglot.schema.MappingSchema.get_column_type", "modulename": "sqlglot.schema", "qualname": "MappingSchema.get_column_type", "kind": "function", "doc": "

Get the sqlglot.exp.DataType type of a column in the schema.

\n\n
Arguments:
\n\n
    \n
  • table: the source table.
  • \n
  • column: the target column.
  • \n
  • dialect: the SQL dialect that will be used to parse table if it's a string.
  • \n
\n\n
Returns:
\n\n
\n

The resulting column type.

\n
\n", "signature": "(\tself,\ttable: sqlglot.expressions.Table | str,\tcolumn: sqlglot.expressions.Column,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None) -> sqlglot.expressions.DataType:", "funcdef": "def"}, "sqlglot.schema.ensure_schema": {"fullname": "sqlglot.schema.ensure_schema", "modulename": "sqlglot.schema", "qualname": "ensure_schema", "kind": "function", "doc": "

\n", "signature": "(\tschema: Union[sqlglot.schema.Schema, Dict, NoneType],\t**kwargs: Any) -> sqlglot.schema.Schema:", "funcdef": "def"}, "sqlglot.schema.ensure_column_mapping": {"fullname": "sqlglot.schema.ensure_column_mapping", "modulename": "sqlglot.schema", "qualname": "ensure_column_mapping", "kind": "function", "doc": "

\n", "signature": "(\tmapping: Union[Dict, str, sqlglot.dataframe.sql.types.StructType, List, NoneType]) -> Dict:", "funcdef": "def"}, "sqlglot.schema.flatten_schema": {"fullname": "sqlglot.schema.flatten_schema", "modulename": "sqlglot.schema", "qualname": "flatten_schema", "kind": "function", "doc": "

\n", "signature": "(\tschema: Dict,\tdepth: int,\tkeys: Optional[List[str]] = None) -> List[List[str]]:", "funcdef": "def"}, "sqlglot.schema.nested_get": {"fullname": "sqlglot.schema.nested_get", "modulename": "sqlglot.schema", "qualname": "nested_get", "kind": "function", "doc": "

Get a value for a nested dictionary.

\n\n
Arguments:
\n\n
    \n
  • d: the dictionary to search.
  • \n
  • *path: tuples of (name, key), where:\nkey is the key in the dictionary to get.\nname is a string to use in the error if key isn't found.
  • \n
\n\n
Returns:
\n\n
\n

The value or None if it doesn't exist.

\n
\n", "signature": "(\td: Dict,\t*path: Tuple[str, str],\traise_on_missing: bool = True) -> Optional[Any]:", "funcdef": "def"}, "sqlglot.schema.nested_set": {"fullname": "sqlglot.schema.nested_set", "modulename": "sqlglot.schema", "qualname": "nested_set", "kind": "function", "doc": "

In-place set a value for a nested dictionary

\n\n
Example:
\n\n
\n
\n
>>> nested_set({}, ["top_key", "second_key"], "value")\n{'top_key': {'second_key': 'value'}}\n
\n
\n \n
\n
>>> nested_set({"top_key": {"third_key": "third_value"}}, ["top_key", "second_key"], "value")\n{'top_key': {'third_key': 'third_value', 'second_key': 'value'}}\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • d: dictionary to update.
  • \n
  • keys: the keys that makeup the path to value.
  • \n
  • value: the value to set in the dictionary for the given key path.
  • \n
\n\n
Returns:
\n\n
\n

The (possibly) updated dictionary.

\n
\n", "signature": "(d: Dict, keys: Sequence[str], value: Any) -> Dict:", "funcdef": "def"}, "sqlglot.serde": {"fullname": "sqlglot.serde", "modulename": "sqlglot.serde", "kind": "module", "doc": "

\n"}, "sqlglot.serde.dump": {"fullname": "sqlglot.serde.dump", "modulename": "sqlglot.serde", "qualname": "dump", "kind": "function", "doc": "

Recursively dump an AST into a JSON-serializable dict.

\n", "signature": "(\tnode: Union[List[ForwardRef('Node')], sqlglot.expressions.DataType.Type, sqlglot.expressions.Expression, dict, list, str, float, int, bool, NoneType]) -> Union[dict, list, str, float, int, bool, NoneType]:", "funcdef": "def"}, "sqlglot.serde.load": {"fullname": "sqlglot.serde.load", "modulename": "sqlglot.serde", "qualname": "load", "kind": "function", "doc": "

Recursively load a dict (as returned by dump) into an AST.

\n", "signature": "(\tobj: Union[dict, list, str, float, int, bool, NoneType]) -> Union[List[ForwardRef('Node')], sqlglot.expressions.DataType.Type, sqlglot.expressions.Expression, dict, list, str, float, int, bool, NoneType]:", "funcdef": "def"}, "sqlglot.time": {"fullname": "sqlglot.time", "modulename": "sqlglot.time", "kind": "module", "doc": "

\n"}, "sqlglot.time.format_time": {"fullname": "sqlglot.time.format_time", "modulename": "sqlglot.time", "qualname": "format_time", "kind": "function", "doc": "

Converts a time string given a mapping.

\n\n
Examples:
\n\n
\n
\n
>>> format_time("%Y", {"%Y": "YYYY"})\n'YYYY'\n
\n
\n \n

Args:\n mapping: dictionary of time format to target time format.\n trie: optional trie, can be passed in for performance.

\n \n

Returns:\n The converted time string.

\n
\n", "signature": "(\tstring: str,\tmapping: Dict[str, str],\ttrie: Optional[Dict] = None) -> Optional[str]:", "funcdef": "def"}, "sqlglot.tokens": {"fullname": "sqlglot.tokens", "modulename": "sqlglot.tokens", "kind": "module", "doc": "

\n"}, "sqlglot.tokens.TokenType": {"fullname": "sqlglot.tokens.TokenType", "modulename": "sqlglot.tokens", "qualname": "TokenType", "kind": "class", "doc": "

An enumeration.

\n", "bases": "sqlglot.helper.AutoName"}, "sqlglot.tokens.TokenType.L_PAREN": {"fullname": "sqlglot.tokens.TokenType.L_PAREN", "modulename": "sqlglot.tokens", "qualname": "TokenType.L_PAREN", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.L_PAREN: 'L_PAREN'>"}, "sqlglot.tokens.TokenType.R_PAREN": {"fullname": "sqlglot.tokens.TokenType.R_PAREN", "modulename": "sqlglot.tokens", "qualname": "TokenType.R_PAREN", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.R_PAREN: 'R_PAREN'>"}, "sqlglot.tokens.TokenType.L_BRACKET": {"fullname": "sqlglot.tokens.TokenType.L_BRACKET", "modulename": "sqlglot.tokens", "qualname": "TokenType.L_BRACKET", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.L_BRACKET: 'L_BRACKET'>"}, "sqlglot.tokens.TokenType.R_BRACKET": {"fullname": "sqlglot.tokens.TokenType.R_BRACKET", "modulename": "sqlglot.tokens", "qualname": "TokenType.R_BRACKET", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.R_BRACKET: 'R_BRACKET'>"}, "sqlglot.tokens.TokenType.L_BRACE": {"fullname": "sqlglot.tokens.TokenType.L_BRACE", "modulename": "sqlglot.tokens", "qualname": "TokenType.L_BRACE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.L_BRACE: 'L_BRACE'>"}, "sqlglot.tokens.TokenType.R_BRACE": {"fullname": "sqlglot.tokens.TokenType.R_BRACE", "modulename": "sqlglot.tokens", "qualname": "TokenType.R_BRACE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.R_BRACE: 'R_BRACE'>"}, "sqlglot.tokens.TokenType.COMMA": {"fullname": "sqlglot.tokens.TokenType.COMMA", "modulename": "sqlglot.tokens", "qualname": "TokenType.COMMA", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.COMMA: 'COMMA'>"}, "sqlglot.tokens.TokenType.DOT": {"fullname": "sqlglot.tokens.TokenType.DOT", "modulename": "sqlglot.tokens", "qualname": "TokenType.DOT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DOT: 'DOT'>"}, "sqlglot.tokens.TokenType.DASH": {"fullname": "sqlglot.tokens.TokenType.DASH", "modulename": "sqlglot.tokens", "qualname": "TokenType.DASH", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DASH: 'DASH'>"}, "sqlglot.tokens.TokenType.PLUS": {"fullname": "sqlglot.tokens.TokenType.PLUS", "modulename": "sqlglot.tokens", "qualname": "TokenType.PLUS", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PLUS: 'PLUS'>"}, "sqlglot.tokens.TokenType.COLON": {"fullname": "sqlglot.tokens.TokenType.COLON", "modulename": "sqlglot.tokens", "qualname": "TokenType.COLON", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.COLON: 'COLON'>"}, "sqlglot.tokens.TokenType.DCOLON": {"fullname": "sqlglot.tokens.TokenType.DCOLON", "modulename": "sqlglot.tokens", "qualname": "TokenType.DCOLON", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DCOLON: 'DCOLON'>"}, "sqlglot.tokens.TokenType.SEMICOLON": {"fullname": "sqlglot.tokens.TokenType.SEMICOLON", "modulename": "sqlglot.tokens", "qualname": "TokenType.SEMICOLON", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SEMICOLON: 'SEMICOLON'>"}, "sqlglot.tokens.TokenType.STAR": {"fullname": "sqlglot.tokens.TokenType.STAR", "modulename": "sqlglot.tokens", "qualname": "TokenType.STAR", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.STAR: 'STAR'>"}, "sqlglot.tokens.TokenType.BACKSLASH": {"fullname": "sqlglot.tokens.TokenType.BACKSLASH", "modulename": "sqlglot.tokens", "qualname": "TokenType.BACKSLASH", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BACKSLASH: 'BACKSLASH'>"}, "sqlglot.tokens.TokenType.SLASH": {"fullname": "sqlglot.tokens.TokenType.SLASH", "modulename": "sqlglot.tokens", "qualname": "TokenType.SLASH", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SLASH: 'SLASH'>"}, "sqlglot.tokens.TokenType.LT": {"fullname": "sqlglot.tokens.TokenType.LT", "modulename": "sqlglot.tokens", "qualname": "TokenType.LT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LT: 'LT'>"}, "sqlglot.tokens.TokenType.LTE": {"fullname": "sqlglot.tokens.TokenType.LTE", "modulename": "sqlglot.tokens", "qualname": "TokenType.LTE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LTE: 'LTE'>"}, "sqlglot.tokens.TokenType.GT": {"fullname": "sqlglot.tokens.TokenType.GT", "modulename": "sqlglot.tokens", "qualname": "TokenType.GT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.GT: 'GT'>"}, "sqlglot.tokens.TokenType.GTE": {"fullname": "sqlglot.tokens.TokenType.GTE", "modulename": "sqlglot.tokens", "qualname": "TokenType.GTE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.GTE: 'GTE'>"}, "sqlglot.tokens.TokenType.NOT": {"fullname": "sqlglot.tokens.TokenType.NOT", "modulename": "sqlglot.tokens", "qualname": "TokenType.NOT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NOT: 'NOT'>"}, "sqlglot.tokens.TokenType.EQ": {"fullname": "sqlglot.tokens.TokenType.EQ", "modulename": "sqlglot.tokens", "qualname": "TokenType.EQ", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.EQ: 'EQ'>"}, "sqlglot.tokens.TokenType.NEQ": {"fullname": "sqlglot.tokens.TokenType.NEQ", "modulename": "sqlglot.tokens", "qualname": "TokenType.NEQ", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NEQ: 'NEQ'>"}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"fullname": "sqlglot.tokens.TokenType.NULLSAFE_EQ", "modulename": "sqlglot.tokens", "qualname": "TokenType.NULLSAFE_EQ", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>"}, "sqlglot.tokens.TokenType.AND": {"fullname": "sqlglot.tokens.TokenType.AND", "modulename": "sqlglot.tokens", "qualname": "TokenType.AND", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.AND: 'AND'>"}, "sqlglot.tokens.TokenType.OR": {"fullname": "sqlglot.tokens.TokenType.OR", "modulename": "sqlglot.tokens", "qualname": "TokenType.OR", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.OR: 'OR'>"}, "sqlglot.tokens.TokenType.AMP": {"fullname": "sqlglot.tokens.TokenType.AMP", "modulename": "sqlglot.tokens", "qualname": "TokenType.AMP", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.AMP: 'AMP'>"}, "sqlglot.tokens.TokenType.DPIPE": {"fullname": "sqlglot.tokens.TokenType.DPIPE", "modulename": "sqlglot.tokens", "qualname": "TokenType.DPIPE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DPIPE: 'DPIPE'>"}, "sqlglot.tokens.TokenType.PIPE": {"fullname": "sqlglot.tokens.TokenType.PIPE", "modulename": "sqlglot.tokens", "qualname": "TokenType.PIPE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PIPE: 'PIPE'>"}, "sqlglot.tokens.TokenType.CARET": {"fullname": "sqlglot.tokens.TokenType.CARET", "modulename": "sqlglot.tokens", "qualname": "TokenType.CARET", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CARET: 'CARET'>"}, "sqlglot.tokens.TokenType.TILDA": {"fullname": "sqlglot.tokens.TokenType.TILDA", "modulename": "sqlglot.tokens", "qualname": "TokenType.TILDA", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TILDA: 'TILDA'>"}, "sqlglot.tokens.TokenType.ARROW": {"fullname": "sqlglot.tokens.TokenType.ARROW", "modulename": "sqlglot.tokens", "qualname": "TokenType.ARROW", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ARROW: 'ARROW'>"}, "sqlglot.tokens.TokenType.DARROW": {"fullname": "sqlglot.tokens.TokenType.DARROW", "modulename": "sqlglot.tokens", "qualname": "TokenType.DARROW", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DARROW: 'DARROW'>"}, "sqlglot.tokens.TokenType.FARROW": {"fullname": "sqlglot.tokens.TokenType.FARROW", "modulename": "sqlglot.tokens", "qualname": "TokenType.FARROW", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FARROW: 'FARROW'>"}, "sqlglot.tokens.TokenType.HASH": {"fullname": "sqlglot.tokens.TokenType.HASH", "modulename": "sqlglot.tokens", "qualname": "TokenType.HASH", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.HASH: 'HASH'>"}, "sqlglot.tokens.TokenType.HASH_ARROW": {"fullname": "sqlglot.tokens.TokenType.HASH_ARROW", "modulename": "sqlglot.tokens", "qualname": "TokenType.HASH_ARROW", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.HASH_ARROW: 'HASH_ARROW'>"}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"fullname": "sqlglot.tokens.TokenType.DHASH_ARROW", "modulename": "sqlglot.tokens", "qualname": "TokenType.DHASH_ARROW", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DHASH_ARROW: 'DHASH_ARROW'>"}, "sqlglot.tokens.TokenType.LR_ARROW": {"fullname": "sqlglot.tokens.TokenType.LR_ARROW", "modulename": "sqlglot.tokens", "qualname": "TokenType.LR_ARROW", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LR_ARROW: 'LR_ARROW'>"}, "sqlglot.tokens.TokenType.LT_AT": {"fullname": "sqlglot.tokens.TokenType.LT_AT", "modulename": "sqlglot.tokens", "qualname": "TokenType.LT_AT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LT_AT: 'LT_AT'>"}, "sqlglot.tokens.TokenType.AT_GT": {"fullname": "sqlglot.tokens.TokenType.AT_GT", "modulename": "sqlglot.tokens", "qualname": "TokenType.AT_GT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.AT_GT: 'AT_GT'>"}, "sqlglot.tokens.TokenType.DOLLAR": {"fullname": "sqlglot.tokens.TokenType.DOLLAR", "modulename": "sqlglot.tokens", "qualname": "TokenType.DOLLAR", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DOLLAR: 'DOLLAR'>"}, "sqlglot.tokens.TokenType.PARAMETER": {"fullname": "sqlglot.tokens.TokenType.PARAMETER", "modulename": "sqlglot.tokens", "qualname": "TokenType.PARAMETER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PARAMETER: 'PARAMETER'>"}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"fullname": "sqlglot.tokens.TokenType.SESSION_PARAMETER", "modulename": "sqlglot.tokens", "qualname": "TokenType.SESSION_PARAMETER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SESSION_PARAMETER: 'SESSION_PARAMETER'>"}, "sqlglot.tokens.TokenType.DAMP": {"fullname": "sqlglot.tokens.TokenType.DAMP", "modulename": "sqlglot.tokens", "qualname": "TokenType.DAMP", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DAMP: 'DAMP'>"}, "sqlglot.tokens.TokenType.BLOCK_START": {"fullname": "sqlglot.tokens.TokenType.BLOCK_START", "modulename": "sqlglot.tokens", "qualname": "TokenType.BLOCK_START", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BLOCK_START: 'BLOCK_START'>"}, "sqlglot.tokens.TokenType.BLOCK_END": {"fullname": "sqlglot.tokens.TokenType.BLOCK_END", "modulename": "sqlglot.tokens", "qualname": "TokenType.BLOCK_END", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BLOCK_END: 'BLOCK_END'>"}, "sqlglot.tokens.TokenType.SPACE": {"fullname": "sqlglot.tokens.TokenType.SPACE", "modulename": "sqlglot.tokens", "qualname": "TokenType.SPACE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SPACE: 'SPACE'>"}, "sqlglot.tokens.TokenType.BREAK": {"fullname": "sqlglot.tokens.TokenType.BREAK", "modulename": "sqlglot.tokens", "qualname": "TokenType.BREAK", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BREAK: 'BREAK'>"}, "sqlglot.tokens.TokenType.STRING": {"fullname": "sqlglot.tokens.TokenType.STRING", "modulename": "sqlglot.tokens", "qualname": "TokenType.STRING", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.STRING: 'STRING'>"}, "sqlglot.tokens.TokenType.NUMBER": {"fullname": "sqlglot.tokens.TokenType.NUMBER", "modulename": "sqlglot.tokens", "qualname": "TokenType.NUMBER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NUMBER: 'NUMBER'>"}, "sqlglot.tokens.TokenType.IDENTIFIER": {"fullname": "sqlglot.tokens.TokenType.IDENTIFIER", "modulename": "sqlglot.tokens", "qualname": "TokenType.IDENTIFIER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.IDENTIFIER: 'IDENTIFIER'>"}, "sqlglot.tokens.TokenType.DATABASE": {"fullname": "sqlglot.tokens.TokenType.DATABASE", "modulename": "sqlglot.tokens", "qualname": "TokenType.DATABASE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DATABASE: 'DATABASE'>"}, "sqlglot.tokens.TokenType.COLUMN": {"fullname": "sqlglot.tokens.TokenType.COLUMN", "modulename": "sqlglot.tokens", "qualname": "TokenType.COLUMN", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.COLUMN: 'COLUMN'>"}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"fullname": "sqlglot.tokens.TokenType.COLUMN_DEF", "modulename": "sqlglot.tokens", "qualname": "TokenType.COLUMN_DEF", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.COLUMN_DEF: 'COLUMN_DEF'>"}, "sqlglot.tokens.TokenType.SCHEMA": {"fullname": "sqlglot.tokens.TokenType.SCHEMA", "modulename": "sqlglot.tokens", "qualname": "TokenType.SCHEMA", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SCHEMA: 'SCHEMA'>"}, "sqlglot.tokens.TokenType.TABLE": {"fullname": "sqlglot.tokens.TokenType.TABLE", "modulename": "sqlglot.tokens", "qualname": "TokenType.TABLE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TABLE: 'TABLE'>"}, "sqlglot.tokens.TokenType.VAR": {"fullname": "sqlglot.tokens.TokenType.VAR", "modulename": "sqlglot.tokens", "qualname": "TokenType.VAR", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.VAR: 'VAR'>"}, "sqlglot.tokens.TokenType.BIT_STRING": {"fullname": "sqlglot.tokens.TokenType.BIT_STRING", "modulename": "sqlglot.tokens", "qualname": "TokenType.BIT_STRING", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BIT_STRING: 'BIT_STRING'>"}, "sqlglot.tokens.TokenType.HEX_STRING": {"fullname": "sqlglot.tokens.TokenType.HEX_STRING", "modulename": "sqlglot.tokens", "qualname": "TokenType.HEX_STRING", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.HEX_STRING: 'HEX_STRING'>"}, "sqlglot.tokens.TokenType.BYTE_STRING": {"fullname": "sqlglot.tokens.TokenType.BYTE_STRING", "modulename": "sqlglot.tokens", "qualname": "TokenType.BYTE_STRING", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BYTE_STRING: 'BYTE_STRING'>"}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"fullname": "sqlglot.tokens.TokenType.NATIONAL_STRING", "modulename": "sqlglot.tokens", "qualname": "TokenType.NATIONAL_STRING", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NATIONAL_STRING: 'NATIONAL_STRING'>"}, "sqlglot.tokens.TokenType.RAW_STRING": {"fullname": "sqlglot.tokens.TokenType.RAW_STRING", "modulename": "sqlglot.tokens", "qualname": "TokenType.RAW_STRING", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.RAW_STRING: 'RAW_STRING'>"}, "sqlglot.tokens.TokenType.BIT": {"fullname": "sqlglot.tokens.TokenType.BIT", "modulename": "sqlglot.tokens", "qualname": "TokenType.BIT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BIT: 'BIT'>"}, "sqlglot.tokens.TokenType.BOOLEAN": {"fullname": "sqlglot.tokens.TokenType.BOOLEAN", "modulename": "sqlglot.tokens", "qualname": "TokenType.BOOLEAN", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BOOLEAN: 'BOOLEAN'>"}, "sqlglot.tokens.TokenType.TINYINT": {"fullname": "sqlglot.tokens.TokenType.TINYINT", "modulename": "sqlglot.tokens", "qualname": "TokenType.TINYINT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TINYINT: 'TINYINT'>"}, "sqlglot.tokens.TokenType.UTINYINT": {"fullname": "sqlglot.tokens.TokenType.UTINYINT", "modulename": "sqlglot.tokens", "qualname": "TokenType.UTINYINT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UTINYINT: 'UTINYINT'>"}, "sqlglot.tokens.TokenType.SMALLINT": {"fullname": "sqlglot.tokens.TokenType.SMALLINT", "modulename": "sqlglot.tokens", "qualname": "TokenType.SMALLINT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SMALLINT: 'SMALLINT'>"}, "sqlglot.tokens.TokenType.USMALLINT": {"fullname": "sqlglot.tokens.TokenType.USMALLINT", "modulename": "sqlglot.tokens", "qualname": "TokenType.USMALLINT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.USMALLINT: 'USMALLINT'>"}, "sqlglot.tokens.TokenType.INT": {"fullname": "sqlglot.tokens.TokenType.INT", "modulename": "sqlglot.tokens", "qualname": "TokenType.INT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INT: 'INT'>"}, "sqlglot.tokens.TokenType.UINT": {"fullname": "sqlglot.tokens.TokenType.UINT", "modulename": "sqlglot.tokens", "qualname": "TokenType.UINT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UINT: 'UINT'>"}, "sqlglot.tokens.TokenType.BIGINT": {"fullname": "sqlglot.tokens.TokenType.BIGINT", "modulename": "sqlglot.tokens", "qualname": "TokenType.BIGINT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BIGINT: 'BIGINT'>"}, "sqlglot.tokens.TokenType.UBIGINT": {"fullname": "sqlglot.tokens.TokenType.UBIGINT", "modulename": "sqlglot.tokens", "qualname": "TokenType.UBIGINT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UBIGINT: 'UBIGINT'>"}, "sqlglot.tokens.TokenType.INT128": {"fullname": "sqlglot.tokens.TokenType.INT128", "modulename": "sqlglot.tokens", "qualname": "TokenType.INT128", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INT128: 'INT128'>"}, "sqlglot.tokens.TokenType.UINT128": {"fullname": "sqlglot.tokens.TokenType.UINT128", "modulename": "sqlglot.tokens", "qualname": "TokenType.UINT128", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UINT128: 'UINT128'>"}, "sqlglot.tokens.TokenType.INT256": {"fullname": "sqlglot.tokens.TokenType.INT256", "modulename": "sqlglot.tokens", "qualname": "TokenType.INT256", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INT256: 'INT256'>"}, "sqlglot.tokens.TokenType.UINT256": {"fullname": "sqlglot.tokens.TokenType.UINT256", "modulename": "sqlglot.tokens", "qualname": "TokenType.UINT256", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UINT256: 'UINT256'>"}, "sqlglot.tokens.TokenType.FLOAT": {"fullname": "sqlglot.tokens.TokenType.FLOAT", "modulename": "sqlglot.tokens", "qualname": "TokenType.FLOAT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FLOAT: 'FLOAT'>"}, "sqlglot.tokens.TokenType.DOUBLE": {"fullname": "sqlglot.tokens.TokenType.DOUBLE", "modulename": "sqlglot.tokens", "qualname": "TokenType.DOUBLE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DOUBLE: 'DOUBLE'>"}, "sqlglot.tokens.TokenType.DECIMAL": {"fullname": "sqlglot.tokens.TokenType.DECIMAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.DECIMAL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DECIMAL: 'DECIMAL'>"}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"fullname": "sqlglot.tokens.TokenType.BIGDECIMAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.BIGDECIMAL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BIGDECIMAL: 'BIGDECIMAL'>"}, "sqlglot.tokens.TokenType.CHAR": {"fullname": "sqlglot.tokens.TokenType.CHAR", "modulename": "sqlglot.tokens", "qualname": "TokenType.CHAR", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CHAR: 'CHAR'>"}, "sqlglot.tokens.TokenType.NCHAR": {"fullname": "sqlglot.tokens.TokenType.NCHAR", "modulename": "sqlglot.tokens", "qualname": "TokenType.NCHAR", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NCHAR: 'NCHAR'>"}, "sqlglot.tokens.TokenType.VARCHAR": {"fullname": "sqlglot.tokens.TokenType.VARCHAR", "modulename": "sqlglot.tokens", "qualname": "TokenType.VARCHAR", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.VARCHAR: 'VARCHAR'>"}, "sqlglot.tokens.TokenType.NVARCHAR": {"fullname": "sqlglot.tokens.TokenType.NVARCHAR", "modulename": "sqlglot.tokens", "qualname": "TokenType.NVARCHAR", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NVARCHAR: 'NVARCHAR'>"}, "sqlglot.tokens.TokenType.TEXT": {"fullname": "sqlglot.tokens.TokenType.TEXT", "modulename": "sqlglot.tokens", "qualname": "TokenType.TEXT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TEXT: 'TEXT'>"}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"fullname": "sqlglot.tokens.TokenType.MEDIUMTEXT", "modulename": "sqlglot.tokens", "qualname": "TokenType.MEDIUMTEXT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>"}, "sqlglot.tokens.TokenType.LONGTEXT": {"fullname": "sqlglot.tokens.TokenType.LONGTEXT", "modulename": "sqlglot.tokens", "qualname": "TokenType.LONGTEXT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LONGTEXT: 'LONGTEXT'>"}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"fullname": "sqlglot.tokens.TokenType.MEDIUMBLOB", "modulename": "sqlglot.tokens", "qualname": "TokenType.MEDIUMBLOB", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>"}, "sqlglot.tokens.TokenType.LONGBLOB": {"fullname": "sqlglot.tokens.TokenType.LONGBLOB", "modulename": "sqlglot.tokens", "qualname": "TokenType.LONGBLOB", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LONGBLOB: 'LONGBLOB'>"}, "sqlglot.tokens.TokenType.BINARY": {"fullname": "sqlglot.tokens.TokenType.BINARY", "modulename": "sqlglot.tokens", "qualname": "TokenType.BINARY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BINARY: 'BINARY'>"}, "sqlglot.tokens.TokenType.VARBINARY": {"fullname": "sqlglot.tokens.TokenType.VARBINARY", "modulename": "sqlglot.tokens", "qualname": "TokenType.VARBINARY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.VARBINARY: 'VARBINARY'>"}, "sqlglot.tokens.TokenType.JSON": {"fullname": "sqlglot.tokens.TokenType.JSON", "modulename": "sqlglot.tokens", "qualname": "TokenType.JSON", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.JSON: 'JSON'>"}, "sqlglot.tokens.TokenType.JSONB": {"fullname": "sqlglot.tokens.TokenType.JSONB", "modulename": "sqlglot.tokens", "qualname": "TokenType.JSONB", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.JSONB: 'JSONB'>"}, "sqlglot.tokens.TokenType.TIME": {"fullname": "sqlglot.tokens.TokenType.TIME", "modulename": "sqlglot.tokens", "qualname": "TokenType.TIME", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TIME: 'TIME'>"}, "sqlglot.tokens.TokenType.TIMESTAMP": {"fullname": "sqlglot.tokens.TokenType.TIMESTAMP", "modulename": "sqlglot.tokens", "qualname": "TokenType.TIMESTAMP", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TIMESTAMP: 'TIMESTAMP'>"}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"fullname": "sqlglot.tokens.TokenType.TIMESTAMPTZ", "modulename": "sqlglot.tokens", "qualname": "TokenType.TIMESTAMPTZ", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>"}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"fullname": "sqlglot.tokens.TokenType.TIMESTAMPLTZ", "modulename": "sqlglot.tokens", "qualname": "TokenType.TIMESTAMPLTZ", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>"}, "sqlglot.tokens.TokenType.DATETIME": {"fullname": "sqlglot.tokens.TokenType.DATETIME", "modulename": "sqlglot.tokens", "qualname": "TokenType.DATETIME", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DATETIME: 'DATETIME'>"}, "sqlglot.tokens.TokenType.DATETIME64": {"fullname": "sqlglot.tokens.TokenType.DATETIME64", "modulename": "sqlglot.tokens", "qualname": "TokenType.DATETIME64", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DATETIME64: 'DATETIME64'>"}, "sqlglot.tokens.TokenType.DATE": {"fullname": "sqlglot.tokens.TokenType.DATE", "modulename": "sqlglot.tokens", "qualname": "TokenType.DATE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DATE: 'DATE'>"}, "sqlglot.tokens.TokenType.INT4RANGE": {"fullname": "sqlglot.tokens.TokenType.INT4RANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.INT4RANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INT4RANGE: 'INT4RANGE'>"}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"fullname": "sqlglot.tokens.TokenType.INT4MULTIRANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.INT4MULTIRANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>"}, "sqlglot.tokens.TokenType.INT8RANGE": {"fullname": "sqlglot.tokens.TokenType.INT8RANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.INT8RANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INT8RANGE: 'INT8RANGE'>"}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"fullname": "sqlglot.tokens.TokenType.INT8MULTIRANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.INT8MULTIRANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>"}, "sqlglot.tokens.TokenType.NUMRANGE": {"fullname": "sqlglot.tokens.TokenType.NUMRANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.NUMRANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NUMRANGE: 'NUMRANGE'>"}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"fullname": "sqlglot.tokens.TokenType.NUMMULTIRANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.NUMMULTIRANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>"}, "sqlglot.tokens.TokenType.TSRANGE": {"fullname": "sqlglot.tokens.TokenType.TSRANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.TSRANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TSRANGE: 'TSRANGE'>"}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"fullname": "sqlglot.tokens.TokenType.TSMULTIRANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.TSMULTIRANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>"}, "sqlglot.tokens.TokenType.TSTZRANGE": {"fullname": "sqlglot.tokens.TokenType.TSTZRANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.TSTZRANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TSTZRANGE: 'TSTZRANGE'>"}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"fullname": "sqlglot.tokens.TokenType.TSTZMULTIRANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.TSTZMULTIRANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>"}, "sqlglot.tokens.TokenType.DATERANGE": {"fullname": "sqlglot.tokens.TokenType.DATERANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.DATERANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DATERANGE: 'DATERANGE'>"}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"fullname": "sqlglot.tokens.TokenType.DATEMULTIRANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.DATEMULTIRANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>"}, "sqlglot.tokens.TokenType.UUID": {"fullname": "sqlglot.tokens.TokenType.UUID", "modulename": "sqlglot.tokens", "qualname": "TokenType.UUID", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UUID: 'UUID'>"}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"fullname": "sqlglot.tokens.TokenType.GEOGRAPHY", "modulename": "sqlglot.tokens", "qualname": "TokenType.GEOGRAPHY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.GEOGRAPHY: 'GEOGRAPHY'>"}, "sqlglot.tokens.TokenType.NULLABLE": {"fullname": "sqlglot.tokens.TokenType.NULLABLE", "modulename": "sqlglot.tokens", "qualname": "TokenType.NULLABLE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NULLABLE: 'NULLABLE'>"}, "sqlglot.tokens.TokenType.GEOMETRY": {"fullname": "sqlglot.tokens.TokenType.GEOMETRY", "modulename": "sqlglot.tokens", "qualname": "TokenType.GEOMETRY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.GEOMETRY: 'GEOMETRY'>"}, "sqlglot.tokens.TokenType.HLLSKETCH": {"fullname": "sqlglot.tokens.TokenType.HLLSKETCH", "modulename": "sqlglot.tokens", "qualname": "TokenType.HLLSKETCH", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.HLLSKETCH: 'HLLSKETCH'>"}, "sqlglot.tokens.TokenType.HSTORE": {"fullname": "sqlglot.tokens.TokenType.HSTORE", "modulename": "sqlglot.tokens", "qualname": "TokenType.HSTORE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.HSTORE: 'HSTORE'>"}, "sqlglot.tokens.TokenType.SUPER": {"fullname": "sqlglot.tokens.TokenType.SUPER", "modulename": "sqlglot.tokens", "qualname": "TokenType.SUPER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SUPER: 'SUPER'>"}, "sqlglot.tokens.TokenType.SERIAL": {"fullname": "sqlglot.tokens.TokenType.SERIAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.SERIAL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SERIAL: 'SERIAL'>"}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"fullname": "sqlglot.tokens.TokenType.SMALLSERIAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.SMALLSERIAL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SMALLSERIAL: 'SMALLSERIAL'>"}, "sqlglot.tokens.TokenType.BIGSERIAL": {"fullname": "sqlglot.tokens.TokenType.BIGSERIAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.BIGSERIAL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BIGSERIAL: 'BIGSERIAL'>"}, "sqlglot.tokens.TokenType.XML": {"fullname": "sqlglot.tokens.TokenType.XML", "modulename": "sqlglot.tokens", "qualname": "TokenType.XML", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.XML: 'XML'>"}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"fullname": "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER", "modulename": "sqlglot.tokens", "qualname": "TokenType.UNIQUEIDENTIFIER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>"}, "sqlglot.tokens.TokenType.MONEY": {"fullname": "sqlglot.tokens.TokenType.MONEY", "modulename": "sqlglot.tokens", "qualname": "TokenType.MONEY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.MONEY: 'MONEY'>"}, "sqlglot.tokens.TokenType.SMALLMONEY": {"fullname": "sqlglot.tokens.TokenType.SMALLMONEY", "modulename": "sqlglot.tokens", "qualname": "TokenType.SMALLMONEY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SMALLMONEY: 'SMALLMONEY'>"}, "sqlglot.tokens.TokenType.ROWVERSION": {"fullname": "sqlglot.tokens.TokenType.ROWVERSION", "modulename": "sqlglot.tokens", "qualname": "TokenType.ROWVERSION", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ROWVERSION: 'ROWVERSION'>"}, "sqlglot.tokens.TokenType.IMAGE": {"fullname": "sqlglot.tokens.TokenType.IMAGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.IMAGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.IMAGE: 'IMAGE'>"}, "sqlglot.tokens.TokenType.VARIANT": {"fullname": "sqlglot.tokens.TokenType.VARIANT", "modulename": "sqlglot.tokens", "qualname": "TokenType.VARIANT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.VARIANT: 'VARIANT'>"}, "sqlglot.tokens.TokenType.OBJECT": {"fullname": "sqlglot.tokens.TokenType.OBJECT", "modulename": "sqlglot.tokens", "qualname": "TokenType.OBJECT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.OBJECT: 'OBJECT'>"}, "sqlglot.tokens.TokenType.INET": {"fullname": "sqlglot.tokens.TokenType.INET", "modulename": "sqlglot.tokens", "qualname": "TokenType.INET", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INET: 'INET'>"}, "sqlglot.tokens.TokenType.ENUM": {"fullname": "sqlglot.tokens.TokenType.ENUM", "modulename": "sqlglot.tokens", "qualname": "TokenType.ENUM", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ENUM: 'ENUM'>"}, "sqlglot.tokens.TokenType.ALIAS": {"fullname": "sqlglot.tokens.TokenType.ALIAS", "modulename": "sqlglot.tokens", "qualname": "TokenType.ALIAS", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ALIAS: 'ALIAS'>"}, "sqlglot.tokens.TokenType.ALTER": {"fullname": "sqlglot.tokens.TokenType.ALTER", "modulename": "sqlglot.tokens", "qualname": "TokenType.ALTER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ALTER: 'ALTER'>"}, "sqlglot.tokens.TokenType.ALWAYS": {"fullname": "sqlglot.tokens.TokenType.ALWAYS", "modulename": "sqlglot.tokens", "qualname": "TokenType.ALWAYS", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ALWAYS: 'ALWAYS'>"}, "sqlglot.tokens.TokenType.ALL": {"fullname": "sqlglot.tokens.TokenType.ALL", "modulename": "sqlglot.tokens", "qualname": "TokenType.ALL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ALL: 'ALL'>"}, "sqlglot.tokens.TokenType.ANTI": {"fullname": "sqlglot.tokens.TokenType.ANTI", "modulename": "sqlglot.tokens", "qualname": "TokenType.ANTI", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ANTI: 'ANTI'>"}, "sqlglot.tokens.TokenType.ANY": {"fullname": "sqlglot.tokens.TokenType.ANY", "modulename": "sqlglot.tokens", "qualname": "TokenType.ANY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ANY: 'ANY'>"}, "sqlglot.tokens.TokenType.APPLY": {"fullname": "sqlglot.tokens.TokenType.APPLY", "modulename": "sqlglot.tokens", "qualname": "TokenType.APPLY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.APPLY: 'APPLY'>"}, "sqlglot.tokens.TokenType.ARRAY": {"fullname": "sqlglot.tokens.TokenType.ARRAY", "modulename": "sqlglot.tokens", "qualname": "TokenType.ARRAY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ARRAY: 'ARRAY'>"}, "sqlglot.tokens.TokenType.ASC": {"fullname": "sqlglot.tokens.TokenType.ASC", "modulename": "sqlglot.tokens", "qualname": "TokenType.ASC", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ASC: 'ASC'>"}, "sqlglot.tokens.TokenType.ASOF": {"fullname": "sqlglot.tokens.TokenType.ASOF", "modulename": "sqlglot.tokens", "qualname": "TokenType.ASOF", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ASOF: 'ASOF'>"}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"fullname": "sqlglot.tokens.TokenType.AUTO_INCREMENT", "modulename": "sqlglot.tokens", "qualname": "TokenType.AUTO_INCREMENT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>"}, "sqlglot.tokens.TokenType.BEGIN": {"fullname": "sqlglot.tokens.TokenType.BEGIN", "modulename": "sqlglot.tokens", "qualname": "TokenType.BEGIN", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BEGIN: 'BEGIN'>"}, "sqlglot.tokens.TokenType.BETWEEN": {"fullname": "sqlglot.tokens.TokenType.BETWEEN", "modulename": "sqlglot.tokens", "qualname": "TokenType.BETWEEN", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.BETWEEN: 'BETWEEN'>"}, "sqlglot.tokens.TokenType.CACHE": {"fullname": "sqlglot.tokens.TokenType.CACHE", "modulename": "sqlglot.tokens", "qualname": "TokenType.CACHE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CACHE: 'CACHE'>"}, "sqlglot.tokens.TokenType.CASE": {"fullname": "sqlglot.tokens.TokenType.CASE", "modulename": "sqlglot.tokens", "qualname": "TokenType.CASE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CASE: 'CASE'>"}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"fullname": "sqlglot.tokens.TokenType.CHARACTER_SET", "modulename": "sqlglot.tokens", "qualname": "TokenType.CHARACTER_SET", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CHARACTER_SET: 'CHARACTER_SET'>"}, "sqlglot.tokens.TokenType.COLLATE": {"fullname": "sqlglot.tokens.TokenType.COLLATE", "modulename": "sqlglot.tokens", "qualname": "TokenType.COLLATE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.COLLATE: 'COLLATE'>"}, "sqlglot.tokens.TokenType.COMMAND": {"fullname": "sqlglot.tokens.TokenType.COMMAND", "modulename": "sqlglot.tokens", "qualname": "TokenType.COMMAND", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.COMMAND: 'COMMAND'>"}, "sqlglot.tokens.TokenType.COMMENT": {"fullname": "sqlglot.tokens.TokenType.COMMENT", "modulename": "sqlglot.tokens", "qualname": "TokenType.COMMENT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.COMMENT: 'COMMENT'>"}, "sqlglot.tokens.TokenType.COMMIT": {"fullname": "sqlglot.tokens.TokenType.COMMIT", "modulename": "sqlglot.tokens", "qualname": "TokenType.COMMIT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.COMMIT: 'COMMIT'>"}, "sqlglot.tokens.TokenType.CONSTRAINT": {"fullname": "sqlglot.tokens.TokenType.CONSTRAINT", "modulename": "sqlglot.tokens", "qualname": "TokenType.CONSTRAINT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CONSTRAINT: 'CONSTRAINT'>"}, "sqlglot.tokens.TokenType.CREATE": {"fullname": "sqlglot.tokens.TokenType.CREATE", "modulename": "sqlglot.tokens", "qualname": "TokenType.CREATE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CREATE: 'CREATE'>"}, "sqlglot.tokens.TokenType.CROSS": {"fullname": "sqlglot.tokens.TokenType.CROSS", "modulename": "sqlglot.tokens", "qualname": "TokenType.CROSS", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CROSS: 'CROSS'>"}, "sqlglot.tokens.TokenType.CUBE": {"fullname": "sqlglot.tokens.TokenType.CUBE", "modulename": "sqlglot.tokens", "qualname": "TokenType.CUBE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CUBE: 'CUBE'>"}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"fullname": "sqlglot.tokens.TokenType.CURRENT_DATE", "modulename": "sqlglot.tokens", "qualname": "TokenType.CURRENT_DATE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CURRENT_DATE: 'CURRENT_DATE'>"}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"fullname": "sqlglot.tokens.TokenType.CURRENT_DATETIME", "modulename": "sqlglot.tokens", "qualname": "TokenType.CURRENT_DATETIME", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>"}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"fullname": "sqlglot.tokens.TokenType.CURRENT_TIME", "modulename": "sqlglot.tokens", "qualname": "TokenType.CURRENT_TIME", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CURRENT_TIME: 'CURRENT_TIME'>"}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"fullname": "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP", "modulename": "sqlglot.tokens", "qualname": "TokenType.CURRENT_TIMESTAMP", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>"}, "sqlglot.tokens.TokenType.CURRENT_USER": {"fullname": "sqlglot.tokens.TokenType.CURRENT_USER", "modulename": "sqlglot.tokens", "qualname": "TokenType.CURRENT_USER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.CURRENT_USER: 'CURRENT_USER'>"}, "sqlglot.tokens.TokenType.DEFAULT": {"fullname": "sqlglot.tokens.TokenType.DEFAULT", "modulename": "sqlglot.tokens", "qualname": "TokenType.DEFAULT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DEFAULT: 'DEFAULT'>"}, "sqlglot.tokens.TokenType.DELETE": {"fullname": "sqlglot.tokens.TokenType.DELETE", "modulename": "sqlglot.tokens", "qualname": "TokenType.DELETE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DELETE: 'DELETE'>"}, "sqlglot.tokens.TokenType.DESC": {"fullname": "sqlglot.tokens.TokenType.DESC", "modulename": "sqlglot.tokens", "qualname": "TokenType.DESC", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DESC: 'DESC'>"}, "sqlglot.tokens.TokenType.DESCRIBE": {"fullname": "sqlglot.tokens.TokenType.DESCRIBE", "modulename": "sqlglot.tokens", "qualname": "TokenType.DESCRIBE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DESCRIBE: 'DESCRIBE'>"}, "sqlglot.tokens.TokenType.DICTIONARY": {"fullname": "sqlglot.tokens.TokenType.DICTIONARY", "modulename": "sqlglot.tokens", "qualname": "TokenType.DICTIONARY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DICTIONARY: 'DICTIONARY'>"}, "sqlglot.tokens.TokenType.DISTINCT": {"fullname": "sqlglot.tokens.TokenType.DISTINCT", "modulename": "sqlglot.tokens", "qualname": "TokenType.DISTINCT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DISTINCT: 'DISTINCT'>"}, "sqlglot.tokens.TokenType.DIV": {"fullname": "sqlglot.tokens.TokenType.DIV", "modulename": "sqlglot.tokens", "qualname": "TokenType.DIV", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DIV: 'DIV'>"}, "sqlglot.tokens.TokenType.DROP": {"fullname": "sqlglot.tokens.TokenType.DROP", "modulename": "sqlglot.tokens", "qualname": "TokenType.DROP", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.DROP: 'DROP'>"}, "sqlglot.tokens.TokenType.ELSE": {"fullname": "sqlglot.tokens.TokenType.ELSE", "modulename": "sqlglot.tokens", "qualname": "TokenType.ELSE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ELSE: 'ELSE'>"}, "sqlglot.tokens.TokenType.END": {"fullname": "sqlglot.tokens.TokenType.END", "modulename": "sqlglot.tokens", "qualname": "TokenType.END", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.END: 'END'>"}, "sqlglot.tokens.TokenType.ESCAPE": {"fullname": "sqlglot.tokens.TokenType.ESCAPE", "modulename": "sqlglot.tokens", "qualname": "TokenType.ESCAPE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ESCAPE: 'ESCAPE'>"}, "sqlglot.tokens.TokenType.EXCEPT": {"fullname": "sqlglot.tokens.TokenType.EXCEPT", "modulename": "sqlglot.tokens", "qualname": "TokenType.EXCEPT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.EXCEPT: 'EXCEPT'>"}, "sqlglot.tokens.TokenType.EXECUTE": {"fullname": "sqlglot.tokens.TokenType.EXECUTE", "modulename": "sqlglot.tokens", "qualname": "TokenType.EXECUTE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.EXECUTE: 'EXECUTE'>"}, "sqlglot.tokens.TokenType.EXISTS": {"fullname": "sqlglot.tokens.TokenType.EXISTS", "modulename": "sqlglot.tokens", "qualname": "TokenType.EXISTS", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.EXISTS: 'EXISTS'>"}, "sqlglot.tokens.TokenType.FALSE": {"fullname": "sqlglot.tokens.TokenType.FALSE", "modulename": "sqlglot.tokens", "qualname": "TokenType.FALSE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FALSE: 'FALSE'>"}, "sqlglot.tokens.TokenType.FETCH": {"fullname": "sqlglot.tokens.TokenType.FETCH", "modulename": "sqlglot.tokens", "qualname": "TokenType.FETCH", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FETCH: 'FETCH'>"}, "sqlglot.tokens.TokenType.FILTER": {"fullname": "sqlglot.tokens.TokenType.FILTER", "modulename": "sqlglot.tokens", "qualname": "TokenType.FILTER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FILTER: 'FILTER'>"}, "sqlglot.tokens.TokenType.FINAL": {"fullname": "sqlglot.tokens.TokenType.FINAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.FINAL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FINAL: 'FINAL'>"}, "sqlglot.tokens.TokenType.FIRST": {"fullname": "sqlglot.tokens.TokenType.FIRST", "modulename": "sqlglot.tokens", "qualname": "TokenType.FIRST", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FIRST: 'FIRST'>"}, "sqlglot.tokens.TokenType.FOR": {"fullname": "sqlglot.tokens.TokenType.FOR", "modulename": "sqlglot.tokens", "qualname": "TokenType.FOR", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FOR: 'FOR'>"}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"fullname": "sqlglot.tokens.TokenType.FOREIGN_KEY", "modulename": "sqlglot.tokens", "qualname": "TokenType.FOREIGN_KEY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>"}, "sqlglot.tokens.TokenType.FORMAT": {"fullname": "sqlglot.tokens.TokenType.FORMAT", "modulename": "sqlglot.tokens", "qualname": "TokenType.FORMAT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FORMAT: 'FORMAT'>"}, "sqlglot.tokens.TokenType.FROM": {"fullname": "sqlglot.tokens.TokenType.FROM", "modulename": "sqlglot.tokens", "qualname": "TokenType.FROM", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FROM: 'FROM'>"}, "sqlglot.tokens.TokenType.FULL": {"fullname": "sqlglot.tokens.TokenType.FULL", "modulename": "sqlglot.tokens", "qualname": "TokenType.FULL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FULL: 'FULL'>"}, "sqlglot.tokens.TokenType.FUNCTION": {"fullname": "sqlglot.tokens.TokenType.FUNCTION", "modulename": "sqlglot.tokens", "qualname": "TokenType.FUNCTION", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.FUNCTION: 'FUNCTION'>"}, "sqlglot.tokens.TokenType.GLOB": {"fullname": "sqlglot.tokens.TokenType.GLOB", "modulename": "sqlglot.tokens", "qualname": "TokenType.GLOB", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.GLOB: 'GLOB'>"}, "sqlglot.tokens.TokenType.GLOBAL": {"fullname": "sqlglot.tokens.TokenType.GLOBAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.GLOBAL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.GLOBAL: 'GLOBAL'>"}, "sqlglot.tokens.TokenType.GROUP_BY": {"fullname": "sqlglot.tokens.TokenType.GROUP_BY", "modulename": "sqlglot.tokens", "qualname": "TokenType.GROUP_BY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.GROUP_BY: 'GROUP_BY'>"}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"fullname": "sqlglot.tokens.TokenType.GROUPING_SETS", "modulename": "sqlglot.tokens", "qualname": "TokenType.GROUPING_SETS", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.GROUPING_SETS: 'GROUPING_SETS'>"}, "sqlglot.tokens.TokenType.HAVING": {"fullname": "sqlglot.tokens.TokenType.HAVING", "modulename": "sqlglot.tokens", "qualname": "TokenType.HAVING", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.HAVING: 'HAVING'>"}, "sqlglot.tokens.TokenType.HINT": {"fullname": "sqlglot.tokens.TokenType.HINT", "modulename": "sqlglot.tokens", "qualname": "TokenType.HINT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.HINT: 'HINT'>"}, "sqlglot.tokens.TokenType.IF": {"fullname": "sqlglot.tokens.TokenType.IF", "modulename": "sqlglot.tokens", "qualname": "TokenType.IF", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.IF: 'IF'>"}, "sqlglot.tokens.TokenType.ILIKE": {"fullname": "sqlglot.tokens.TokenType.ILIKE", "modulename": "sqlglot.tokens", "qualname": "TokenType.ILIKE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ILIKE: 'ILIKE'>"}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"fullname": "sqlglot.tokens.TokenType.ILIKE_ANY", "modulename": "sqlglot.tokens", "qualname": "TokenType.ILIKE_ANY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ILIKE_ANY: 'ILIKE_ANY'>"}, "sqlglot.tokens.TokenType.IN": {"fullname": "sqlglot.tokens.TokenType.IN", "modulename": "sqlglot.tokens", "qualname": "TokenType.IN", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.IN: 'IN'>"}, "sqlglot.tokens.TokenType.INDEX": {"fullname": "sqlglot.tokens.TokenType.INDEX", "modulename": "sqlglot.tokens", "qualname": "TokenType.INDEX", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INDEX: 'INDEX'>"}, "sqlglot.tokens.TokenType.INNER": {"fullname": "sqlglot.tokens.TokenType.INNER", "modulename": "sqlglot.tokens", "qualname": "TokenType.INNER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INNER: 'INNER'>"}, "sqlglot.tokens.TokenType.INSERT": {"fullname": "sqlglot.tokens.TokenType.INSERT", "modulename": "sqlglot.tokens", "qualname": "TokenType.INSERT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INSERT: 'INSERT'>"}, "sqlglot.tokens.TokenType.INTERSECT": {"fullname": "sqlglot.tokens.TokenType.INTERSECT", "modulename": "sqlglot.tokens", "qualname": "TokenType.INTERSECT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INTERSECT: 'INTERSECT'>"}, "sqlglot.tokens.TokenType.INTERVAL": {"fullname": "sqlglot.tokens.TokenType.INTERVAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.INTERVAL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INTERVAL: 'INTERVAL'>"}, "sqlglot.tokens.TokenType.INTO": {"fullname": "sqlglot.tokens.TokenType.INTO", "modulename": "sqlglot.tokens", "qualname": "TokenType.INTO", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INTO: 'INTO'>"}, "sqlglot.tokens.TokenType.INTRODUCER": {"fullname": "sqlglot.tokens.TokenType.INTRODUCER", "modulename": "sqlglot.tokens", "qualname": "TokenType.INTRODUCER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.INTRODUCER: 'INTRODUCER'>"}, "sqlglot.tokens.TokenType.IRLIKE": {"fullname": "sqlglot.tokens.TokenType.IRLIKE", "modulename": "sqlglot.tokens", "qualname": "TokenType.IRLIKE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.IRLIKE: 'IRLIKE'>"}, "sqlglot.tokens.TokenType.IS": {"fullname": "sqlglot.tokens.TokenType.IS", "modulename": "sqlglot.tokens", "qualname": "TokenType.IS", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.IS: 'IS'>"}, "sqlglot.tokens.TokenType.ISNULL": {"fullname": "sqlglot.tokens.TokenType.ISNULL", "modulename": "sqlglot.tokens", "qualname": "TokenType.ISNULL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ISNULL: 'ISNULL'>"}, "sqlglot.tokens.TokenType.JOIN": {"fullname": "sqlglot.tokens.TokenType.JOIN", "modulename": "sqlglot.tokens", "qualname": "TokenType.JOIN", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.JOIN: 'JOIN'>"}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"fullname": "sqlglot.tokens.TokenType.JOIN_MARKER", "modulename": "sqlglot.tokens", "qualname": "TokenType.JOIN_MARKER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.JOIN_MARKER: 'JOIN_MARKER'>"}, "sqlglot.tokens.TokenType.KEEP": {"fullname": "sqlglot.tokens.TokenType.KEEP", "modulename": "sqlglot.tokens", "qualname": "TokenType.KEEP", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.KEEP: 'KEEP'>"}, "sqlglot.tokens.TokenType.LANGUAGE": {"fullname": "sqlglot.tokens.TokenType.LANGUAGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.LANGUAGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LANGUAGE: 'LANGUAGE'>"}, "sqlglot.tokens.TokenType.LATERAL": {"fullname": "sqlglot.tokens.TokenType.LATERAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.LATERAL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LATERAL: 'LATERAL'>"}, "sqlglot.tokens.TokenType.LEFT": {"fullname": "sqlglot.tokens.TokenType.LEFT", "modulename": "sqlglot.tokens", "qualname": "TokenType.LEFT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LEFT: 'LEFT'>"}, "sqlglot.tokens.TokenType.LIKE": {"fullname": "sqlglot.tokens.TokenType.LIKE", "modulename": "sqlglot.tokens", "qualname": "TokenType.LIKE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LIKE: 'LIKE'>"}, "sqlglot.tokens.TokenType.LIKE_ANY": {"fullname": "sqlglot.tokens.TokenType.LIKE_ANY", "modulename": "sqlglot.tokens", "qualname": "TokenType.LIKE_ANY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LIKE_ANY: 'LIKE_ANY'>"}, "sqlglot.tokens.TokenType.LIMIT": {"fullname": "sqlglot.tokens.TokenType.LIMIT", "modulename": "sqlglot.tokens", "qualname": "TokenType.LIMIT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LIMIT: 'LIMIT'>"}, "sqlglot.tokens.TokenType.LOAD": {"fullname": "sqlglot.tokens.TokenType.LOAD", "modulename": "sqlglot.tokens", "qualname": "TokenType.LOAD", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LOAD: 'LOAD'>"}, "sqlglot.tokens.TokenType.LOCK": {"fullname": "sqlglot.tokens.TokenType.LOCK", "modulename": "sqlglot.tokens", "qualname": "TokenType.LOCK", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.LOCK: 'LOCK'>"}, "sqlglot.tokens.TokenType.MAP": {"fullname": "sqlglot.tokens.TokenType.MAP", "modulename": "sqlglot.tokens", "qualname": "TokenType.MAP", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.MAP: 'MAP'>"}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"fullname": "sqlglot.tokens.TokenType.MATCH_RECOGNIZE", "modulename": "sqlglot.tokens", "qualname": "TokenType.MATCH_RECOGNIZE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>"}, "sqlglot.tokens.TokenType.MERGE": {"fullname": "sqlglot.tokens.TokenType.MERGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.MERGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.MERGE: 'MERGE'>"}, "sqlglot.tokens.TokenType.MOD": {"fullname": "sqlglot.tokens.TokenType.MOD", "modulename": "sqlglot.tokens", "qualname": "TokenType.MOD", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.MOD: 'MOD'>"}, "sqlglot.tokens.TokenType.NATURAL": {"fullname": "sqlglot.tokens.TokenType.NATURAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.NATURAL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NATURAL: 'NATURAL'>"}, "sqlglot.tokens.TokenType.NEXT": {"fullname": "sqlglot.tokens.TokenType.NEXT", "modulename": "sqlglot.tokens", "qualname": "TokenType.NEXT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NEXT: 'NEXT'>"}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"fullname": "sqlglot.tokens.TokenType.NEXT_VALUE_FOR", "modulename": "sqlglot.tokens", "qualname": "TokenType.NEXT_VALUE_FOR", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NEXT_VALUE_FOR: 'NEXT_VALUE_FOR'>"}, "sqlglot.tokens.TokenType.NOTNULL": {"fullname": "sqlglot.tokens.TokenType.NOTNULL", "modulename": "sqlglot.tokens", "qualname": "TokenType.NOTNULL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NOTNULL: 'NOTNULL'>"}, "sqlglot.tokens.TokenType.NULL": {"fullname": "sqlglot.tokens.TokenType.NULL", "modulename": "sqlglot.tokens", "qualname": "TokenType.NULL", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.NULL: 'NULL'>"}, "sqlglot.tokens.TokenType.OFFSET": {"fullname": "sqlglot.tokens.TokenType.OFFSET", "modulename": "sqlglot.tokens", "qualname": "TokenType.OFFSET", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.OFFSET: 'OFFSET'>"}, "sqlglot.tokens.TokenType.ON": {"fullname": "sqlglot.tokens.TokenType.ON", "modulename": "sqlglot.tokens", "qualname": "TokenType.ON", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ON: 'ON'>"}, "sqlglot.tokens.TokenType.ORDER_BY": {"fullname": "sqlglot.tokens.TokenType.ORDER_BY", "modulename": "sqlglot.tokens", "qualname": "TokenType.ORDER_BY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ORDER_BY: 'ORDER_BY'>"}, "sqlglot.tokens.TokenType.ORDERED": {"fullname": "sqlglot.tokens.TokenType.ORDERED", "modulename": "sqlglot.tokens", "qualname": "TokenType.ORDERED", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ORDERED: 'ORDERED'>"}, "sqlglot.tokens.TokenType.ORDINALITY": {"fullname": "sqlglot.tokens.TokenType.ORDINALITY", "modulename": "sqlglot.tokens", "qualname": "TokenType.ORDINALITY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ORDINALITY: 'ORDINALITY'>"}, "sqlglot.tokens.TokenType.OUTER": {"fullname": "sqlglot.tokens.TokenType.OUTER", "modulename": "sqlglot.tokens", "qualname": "TokenType.OUTER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.OUTER: 'OUTER'>"}, "sqlglot.tokens.TokenType.OVER": {"fullname": "sqlglot.tokens.TokenType.OVER", "modulename": "sqlglot.tokens", "qualname": "TokenType.OVER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.OVER: 'OVER'>"}, "sqlglot.tokens.TokenType.OVERLAPS": {"fullname": "sqlglot.tokens.TokenType.OVERLAPS", "modulename": "sqlglot.tokens", "qualname": "TokenType.OVERLAPS", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.OVERLAPS: 'OVERLAPS'>"}, "sqlglot.tokens.TokenType.OVERWRITE": {"fullname": "sqlglot.tokens.TokenType.OVERWRITE", "modulename": "sqlglot.tokens", "qualname": "TokenType.OVERWRITE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.OVERWRITE: 'OVERWRITE'>"}, "sqlglot.tokens.TokenType.PARTITION": {"fullname": "sqlglot.tokens.TokenType.PARTITION", "modulename": "sqlglot.tokens", "qualname": "TokenType.PARTITION", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PARTITION: 'PARTITION'>"}, "sqlglot.tokens.TokenType.PARTITION_BY": {"fullname": "sqlglot.tokens.TokenType.PARTITION_BY", "modulename": "sqlglot.tokens", "qualname": "TokenType.PARTITION_BY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PARTITION_BY: 'PARTITION_BY'>"}, "sqlglot.tokens.TokenType.PERCENT": {"fullname": "sqlglot.tokens.TokenType.PERCENT", "modulename": "sqlglot.tokens", "qualname": "TokenType.PERCENT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PERCENT: 'PERCENT'>"}, "sqlglot.tokens.TokenType.PIVOT": {"fullname": "sqlglot.tokens.TokenType.PIVOT", "modulename": "sqlglot.tokens", "qualname": "TokenType.PIVOT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PIVOT: 'PIVOT'>"}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"fullname": "sqlglot.tokens.TokenType.PLACEHOLDER", "modulename": "sqlglot.tokens", "qualname": "TokenType.PLACEHOLDER", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PLACEHOLDER: 'PLACEHOLDER'>"}, "sqlglot.tokens.TokenType.PRAGMA": {"fullname": "sqlglot.tokens.TokenType.PRAGMA", "modulename": "sqlglot.tokens", "qualname": "TokenType.PRAGMA", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PRAGMA: 'PRAGMA'>"}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"fullname": "sqlglot.tokens.TokenType.PRIMARY_KEY", "modulename": "sqlglot.tokens", "qualname": "TokenType.PRIMARY_KEY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>"}, "sqlglot.tokens.TokenType.PROCEDURE": {"fullname": "sqlglot.tokens.TokenType.PROCEDURE", "modulename": "sqlglot.tokens", "qualname": "TokenType.PROCEDURE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PROCEDURE: 'PROCEDURE'>"}, "sqlglot.tokens.TokenType.PROPERTIES": {"fullname": "sqlglot.tokens.TokenType.PROPERTIES", "modulename": "sqlglot.tokens", "qualname": "TokenType.PROPERTIES", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PROPERTIES: 'PROPERTIES'>"}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"fullname": "sqlglot.tokens.TokenType.PSEUDO_TYPE", "modulename": "sqlglot.tokens", "qualname": "TokenType.PSEUDO_TYPE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>"}, "sqlglot.tokens.TokenType.QUALIFY": {"fullname": "sqlglot.tokens.TokenType.QUALIFY", "modulename": "sqlglot.tokens", "qualname": "TokenType.QUALIFY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.QUALIFY: 'QUALIFY'>"}, "sqlglot.tokens.TokenType.QUOTE": {"fullname": "sqlglot.tokens.TokenType.QUOTE", "modulename": "sqlglot.tokens", "qualname": "TokenType.QUOTE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.QUOTE: 'QUOTE'>"}, "sqlglot.tokens.TokenType.RANGE": {"fullname": "sqlglot.tokens.TokenType.RANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.RANGE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.RANGE: 'RANGE'>"}, "sqlglot.tokens.TokenType.RECURSIVE": {"fullname": "sqlglot.tokens.TokenType.RECURSIVE", "modulename": "sqlglot.tokens", "qualname": "TokenType.RECURSIVE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.RECURSIVE: 'RECURSIVE'>"}, "sqlglot.tokens.TokenType.REPLACE": {"fullname": "sqlglot.tokens.TokenType.REPLACE", "modulename": "sqlglot.tokens", "qualname": "TokenType.REPLACE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.REPLACE: 'REPLACE'>"}, "sqlglot.tokens.TokenType.RETURNING": {"fullname": "sqlglot.tokens.TokenType.RETURNING", "modulename": "sqlglot.tokens", "qualname": "TokenType.RETURNING", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.RETURNING: 'RETURNING'>"}, "sqlglot.tokens.TokenType.REFERENCES": {"fullname": "sqlglot.tokens.TokenType.REFERENCES", "modulename": "sqlglot.tokens", "qualname": "TokenType.REFERENCES", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.REFERENCES: 'REFERENCES'>"}, "sqlglot.tokens.TokenType.RIGHT": {"fullname": "sqlglot.tokens.TokenType.RIGHT", "modulename": "sqlglot.tokens", "qualname": "TokenType.RIGHT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.RIGHT: 'RIGHT'>"}, "sqlglot.tokens.TokenType.RLIKE": {"fullname": "sqlglot.tokens.TokenType.RLIKE", "modulename": "sqlglot.tokens", "qualname": "TokenType.RLIKE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.RLIKE: 'RLIKE'>"}, "sqlglot.tokens.TokenType.ROLLBACK": {"fullname": "sqlglot.tokens.TokenType.ROLLBACK", "modulename": "sqlglot.tokens", "qualname": "TokenType.ROLLBACK", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ROLLBACK: 'ROLLBACK'>"}, "sqlglot.tokens.TokenType.ROLLUP": {"fullname": "sqlglot.tokens.TokenType.ROLLUP", "modulename": "sqlglot.tokens", "qualname": "TokenType.ROLLUP", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ROLLUP: 'ROLLUP'>"}, "sqlglot.tokens.TokenType.ROW": {"fullname": "sqlglot.tokens.TokenType.ROW", "modulename": "sqlglot.tokens", "qualname": "TokenType.ROW", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ROW: 'ROW'>"}, "sqlglot.tokens.TokenType.ROWS": {"fullname": "sqlglot.tokens.TokenType.ROWS", "modulename": "sqlglot.tokens", "qualname": "TokenType.ROWS", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.ROWS: 'ROWS'>"}, "sqlglot.tokens.TokenType.SELECT": {"fullname": "sqlglot.tokens.TokenType.SELECT", "modulename": "sqlglot.tokens", "qualname": "TokenType.SELECT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SELECT: 'SELECT'>"}, "sqlglot.tokens.TokenType.SEMI": {"fullname": "sqlglot.tokens.TokenType.SEMI", "modulename": "sqlglot.tokens", "qualname": "TokenType.SEMI", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SEMI: 'SEMI'>"}, "sqlglot.tokens.TokenType.SEPARATOR": {"fullname": "sqlglot.tokens.TokenType.SEPARATOR", "modulename": "sqlglot.tokens", "qualname": "TokenType.SEPARATOR", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SEPARATOR: 'SEPARATOR'>"}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"fullname": "sqlglot.tokens.TokenType.SERDE_PROPERTIES", "modulename": "sqlglot.tokens", "qualname": "TokenType.SERDE_PROPERTIES", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SERDE_PROPERTIES: 'SERDE_PROPERTIES'>"}, "sqlglot.tokens.TokenType.SET": {"fullname": "sqlglot.tokens.TokenType.SET", "modulename": "sqlglot.tokens", "qualname": "TokenType.SET", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SET: 'SET'>"}, "sqlglot.tokens.TokenType.SETTINGS": {"fullname": "sqlglot.tokens.TokenType.SETTINGS", "modulename": "sqlglot.tokens", "qualname": "TokenType.SETTINGS", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SETTINGS: 'SETTINGS'>"}, "sqlglot.tokens.TokenType.SHOW": {"fullname": "sqlglot.tokens.TokenType.SHOW", "modulename": "sqlglot.tokens", "qualname": "TokenType.SHOW", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SHOW: 'SHOW'>"}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"fullname": "sqlglot.tokens.TokenType.SIMILAR_TO", "modulename": "sqlglot.tokens", "qualname": "TokenType.SIMILAR_TO", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SIMILAR_TO: 'SIMILAR_TO'>"}, "sqlglot.tokens.TokenType.SOME": {"fullname": "sqlglot.tokens.TokenType.SOME", "modulename": "sqlglot.tokens", "qualname": "TokenType.SOME", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.SOME: 'SOME'>"}, "sqlglot.tokens.TokenType.STRUCT": {"fullname": "sqlglot.tokens.TokenType.STRUCT", "modulename": "sqlglot.tokens", "qualname": "TokenType.STRUCT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.STRUCT: 'STRUCT'>"}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"fullname": "sqlglot.tokens.TokenType.TABLE_SAMPLE", "modulename": "sqlglot.tokens", "qualname": "TokenType.TABLE_SAMPLE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>"}, "sqlglot.tokens.TokenType.TEMPORARY": {"fullname": "sqlglot.tokens.TokenType.TEMPORARY", "modulename": "sqlglot.tokens", "qualname": "TokenType.TEMPORARY", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TEMPORARY: 'TEMPORARY'>"}, "sqlglot.tokens.TokenType.TOP": {"fullname": "sqlglot.tokens.TokenType.TOP", "modulename": "sqlglot.tokens", "qualname": "TokenType.TOP", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TOP: 'TOP'>"}, "sqlglot.tokens.TokenType.THEN": {"fullname": "sqlglot.tokens.TokenType.THEN", "modulename": "sqlglot.tokens", "qualname": "TokenType.THEN", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.THEN: 'THEN'>"}, "sqlglot.tokens.TokenType.TRUE": {"fullname": "sqlglot.tokens.TokenType.TRUE", "modulename": "sqlglot.tokens", "qualname": "TokenType.TRUE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.TRUE: 'TRUE'>"}, "sqlglot.tokens.TokenType.UNCACHE": {"fullname": "sqlglot.tokens.TokenType.UNCACHE", "modulename": "sqlglot.tokens", "qualname": "TokenType.UNCACHE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UNCACHE: 'UNCACHE'>"}, "sqlglot.tokens.TokenType.UNION": {"fullname": "sqlglot.tokens.TokenType.UNION", "modulename": "sqlglot.tokens", "qualname": "TokenType.UNION", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UNION: 'UNION'>"}, "sqlglot.tokens.TokenType.UNNEST": {"fullname": "sqlglot.tokens.TokenType.UNNEST", "modulename": "sqlglot.tokens", "qualname": "TokenType.UNNEST", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UNNEST: 'UNNEST'>"}, "sqlglot.tokens.TokenType.UNPIVOT": {"fullname": "sqlglot.tokens.TokenType.UNPIVOT", "modulename": "sqlglot.tokens", "qualname": "TokenType.UNPIVOT", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UNPIVOT: 'UNPIVOT'>"}, "sqlglot.tokens.TokenType.UPDATE": {"fullname": "sqlglot.tokens.TokenType.UPDATE", "modulename": "sqlglot.tokens", "qualname": "TokenType.UPDATE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UPDATE: 'UPDATE'>"}, "sqlglot.tokens.TokenType.USE": {"fullname": "sqlglot.tokens.TokenType.USE", "modulename": "sqlglot.tokens", "qualname": "TokenType.USE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.USE: 'USE'>"}, "sqlglot.tokens.TokenType.USING": {"fullname": "sqlglot.tokens.TokenType.USING", "modulename": "sqlglot.tokens", "qualname": "TokenType.USING", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.USING: 'USING'>"}, "sqlglot.tokens.TokenType.VALUES": {"fullname": "sqlglot.tokens.TokenType.VALUES", "modulename": "sqlglot.tokens", "qualname": "TokenType.VALUES", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.VALUES: 'VALUES'>"}, "sqlglot.tokens.TokenType.VIEW": {"fullname": "sqlglot.tokens.TokenType.VIEW", "modulename": "sqlglot.tokens", "qualname": "TokenType.VIEW", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.VIEW: 'VIEW'>"}, "sqlglot.tokens.TokenType.VOLATILE": {"fullname": "sqlglot.tokens.TokenType.VOLATILE", "modulename": "sqlglot.tokens", "qualname": "TokenType.VOLATILE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.VOLATILE: 'VOLATILE'>"}, "sqlglot.tokens.TokenType.WHEN": {"fullname": "sqlglot.tokens.TokenType.WHEN", "modulename": "sqlglot.tokens", "qualname": "TokenType.WHEN", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.WHEN: 'WHEN'>"}, "sqlglot.tokens.TokenType.WHERE": {"fullname": "sqlglot.tokens.TokenType.WHERE", "modulename": "sqlglot.tokens", "qualname": "TokenType.WHERE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.WHERE: 'WHERE'>"}, "sqlglot.tokens.TokenType.WINDOW": {"fullname": "sqlglot.tokens.TokenType.WINDOW", "modulename": "sqlglot.tokens", "qualname": "TokenType.WINDOW", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.WINDOW: 'WINDOW'>"}, "sqlglot.tokens.TokenType.WITH": {"fullname": "sqlglot.tokens.TokenType.WITH", "modulename": "sqlglot.tokens", "qualname": "TokenType.WITH", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.WITH: 'WITH'>"}, "sqlglot.tokens.TokenType.UNIQUE": {"fullname": "sqlglot.tokens.TokenType.UNIQUE", "modulename": "sqlglot.tokens", "qualname": "TokenType.UNIQUE", "kind": "variable", "doc": "

\n", "default_value": "<TokenType.UNIQUE: 'UNIQUE'>"}, "sqlglot.tokens.Token": {"fullname": "sqlglot.tokens.Token", "modulename": "sqlglot.tokens", "qualname": "Token", "kind": "class", "doc": "

\n"}, "sqlglot.tokens.Token.__init__": {"fullname": "sqlglot.tokens.Token.__init__", "modulename": "sqlglot.tokens", "qualname": "Token.__init__", "kind": "function", "doc": "

Token initializer.

\n\n
Arguments:
\n\n
    \n
  • token_type: The TokenType Enum.
  • \n
  • text: The text of the token.
  • \n
  • line: The line that the token ends on.
  • \n
  • col: The column that the token ends on.
  • \n
  • start: The start index of the token.
  • \n
  • end: The ending index of the token.
  • \n
  • comments: The comments to attach to the token.
  • \n
\n", "signature": "(\ttoken_type: sqlglot.tokens.TokenType,\ttext: str,\tline: int = 1,\tcol: int = 1,\tstart: int = 0,\tend: int = 0,\tcomments: List[str] = [])"}, "sqlglot.tokens.Token.number": {"fullname": "sqlglot.tokens.Token.number", "modulename": "sqlglot.tokens", "qualname": "Token.number", "kind": "function", "doc": "

Returns a NUMBER token with number as its text.

\n", "signature": "(cls, number: int) -> sqlglot.tokens.Token:", "funcdef": "def"}, "sqlglot.tokens.Token.string": {"fullname": "sqlglot.tokens.Token.string", "modulename": "sqlglot.tokens", "qualname": "Token.string", "kind": "function", "doc": "

Returns a STRING token with string as its text.

\n", "signature": "(cls, string: str) -> sqlglot.tokens.Token:", "funcdef": "def"}, "sqlglot.tokens.Token.identifier": {"fullname": "sqlglot.tokens.Token.identifier", "modulename": "sqlglot.tokens", "qualname": "Token.identifier", "kind": "function", "doc": "

Returns an IDENTIFIER token with identifier as its text.

\n", "signature": "(cls, identifier: str) -> sqlglot.tokens.Token:", "funcdef": "def"}, "sqlglot.tokens.Token.var": {"fullname": "sqlglot.tokens.Token.var", "modulename": "sqlglot.tokens", "qualname": "Token.var", "kind": "function", "doc": "

Returns an VAR token with var as its text.

\n", "signature": "(cls, var: str) -> sqlglot.tokens.Token:", "funcdef": "def"}, "sqlglot.tokens.Tokenizer": {"fullname": "sqlglot.tokens.Tokenizer", "modulename": "sqlglot.tokens", "qualname": "Tokenizer", "kind": "class", "doc": "

\n"}, "sqlglot.tokens.Tokenizer.reset": {"fullname": "sqlglot.tokens.Tokenizer.reset", "modulename": "sqlglot.tokens", "qualname": "Tokenizer.reset", "kind": "function", "doc": "

\n", "signature": "(self) -> None:", "funcdef": "def"}, "sqlglot.tokens.Tokenizer.tokenize": {"fullname": "sqlglot.tokens.Tokenizer.tokenize", "modulename": "sqlglot.tokens", "qualname": "Tokenizer.tokenize", "kind": "function", "doc": "

Returns a list of tokens corresponding to the SQL string sql.

\n", "signature": "(self, sql: str) -> List[sqlglot.tokens.Token]:", "funcdef": "def"}, "sqlglot.tokens.Tokenizer.peek": {"fullname": "sqlglot.tokens.Tokenizer.peek", "modulename": "sqlglot.tokens", "qualname": "Tokenizer.peek", "kind": "function", "doc": "

\n", "signature": "(self, i: int = 0) -> str:", "funcdef": "def"}, "sqlglot.transforms": {"fullname": "sqlglot.transforms", "modulename": "sqlglot.transforms", "kind": "module", "doc": "

\n"}, "sqlglot.transforms.unalias_group": {"fullname": "sqlglot.transforms.unalias_group", "modulename": "sqlglot.transforms", "qualname": "unalias_group", "kind": "function", "doc": "

Replace references to select aliases in GROUP BY clauses.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sqlglot.parse_one("SELECT a AS b FROM x GROUP BY b").transform(unalias_group).sql()\n'SELECT a AS b FROM x GROUP BY 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the expression that will be transformed.
  • \n
\n\n
Returns:
\n\n
\n

The transformed expression.

\n
\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transforms.eliminate_distinct_on": {"fullname": "sqlglot.transforms.eliminate_distinct_on", "modulename": "sqlglot.transforms", "qualname": "eliminate_distinct_on", "kind": "function", "doc": "

Convert SELECT DISTINCT ON statements to a subquery with a window function.

\n\n

This is useful for dialects that don't support SELECT DISTINCT ON but support window functions.

\n\n
Arguments:
\n\n
    \n
  • expression: the expression that will be transformed.
  • \n
\n\n
Returns:
\n\n
\n

The transformed expression.

\n
\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transforms.eliminate_qualify": {"fullname": "sqlglot.transforms.eliminate_qualify", "modulename": "sqlglot.transforms", "qualname": "eliminate_qualify", "kind": "function", "doc": "

Convert SELECT statements that contain the QUALIFY clause into subqueries, filtered equivalently.

\n\n

The idea behind this transformation can be seen in Snowflake's documentation for QUALIFY:\nhttps://docs.snowflake.com/en/sql-reference/constructs/qualify

\n\n

Some dialects don't support window functions in the WHERE clause, so we need to include them as\nprojections in the subquery, in order to refer to them in the outer filter using aliases. Also,\nif a column is referenced in the QUALIFY clause but is not selected, we need to include it too,\notherwise we won't be able to refer to it in the outer query's WHERE clause.

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transforms.remove_precision_parameterized_types": {"fullname": "sqlglot.transforms.remove_precision_parameterized_types", "modulename": "sqlglot.transforms", "qualname": "remove_precision_parameterized_types", "kind": "function", "doc": "

Some dialects only allow the precision for parameterized types to be defined in the DDL and not in\nother expressions. This transforms removes the precision from parameterized types in expressions.

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transforms.unnest_to_explode": {"fullname": "sqlglot.transforms.unnest_to_explode", "modulename": "sqlglot.transforms", "qualname": "unnest_to_explode", "kind": "function", "doc": "

Convert cross join unnest into lateral view explode (used in presto -> hive).

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transforms.explode_to_unnest": {"fullname": "sqlglot.transforms.explode_to_unnest", "modulename": "sqlglot.transforms", "qualname": "explode_to_unnest", "kind": "function", "doc": "

Convert explode/posexplode into unnest (used in hive -> presto).

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transforms.remove_target_from_merge": {"fullname": "sqlglot.transforms.remove_target_from_merge", "modulename": "sqlglot.transforms", "qualname": "remove_target_from_merge", "kind": "function", "doc": "

Remove table refs from columns in when statements.

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transforms.remove_within_group_for_percentiles": {"fullname": "sqlglot.transforms.remove_within_group_for_percentiles", "modulename": "sqlglot.transforms", "qualname": "remove_within_group_for_percentiles", "kind": "function", "doc": "

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transforms.add_recursive_cte_column_names": {"fullname": "sqlglot.transforms.add_recursive_cte_column_names", "modulename": "sqlglot.transforms", "qualname": "add_recursive_cte_column_names", "kind": "function", "doc": "

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transforms.epoch_cast_to_ts": {"fullname": "sqlglot.transforms.epoch_cast_to_ts", "modulename": "sqlglot.transforms", "qualname": "epoch_cast_to_ts", "kind": "function", "doc": "

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transforms.preprocess": {"fullname": "sqlglot.transforms.preprocess", "modulename": "sqlglot.transforms", "qualname": "preprocess", "kind": "function", "doc": "

Creates a new transform by chaining a sequence of transformations and converts the resulting\nexpression to SQL, using either the \"_sql\" method corresponding to the resulting expression,\nor the appropriate Generator.TRANSFORMS function (when applicable -- see below).

\n\n
Arguments:
\n\n
    \n
  • transforms: sequence of transform functions. These will be called in order.
  • \n
\n\n
Returns:
\n\n
\n

Function that can be used as a generator transform.

\n
\n", "signature": "(\ttransforms: List[Callable[[sqlglot.expressions.Expression], sqlglot.expressions.Expression]]) -> Callable[[sqlglot.generator.Generator, sqlglot.expressions.Expression], str]:", "funcdef": "def"}, "sqlglot.trie": {"fullname": "sqlglot.trie", "modulename": "sqlglot.trie", "kind": "module", "doc": "

\n"}, "sqlglot.trie.new_trie": {"fullname": "sqlglot.trie.new_trie", "modulename": "sqlglot.trie", "qualname": "new_trie", "kind": "function", "doc": "

Creates a new trie out of a collection of keywords.

\n\n

The trie is represented as a sequence of nested dictionaries keyed by either single character\nstrings, or by 0, which is used to designate that a keyword is in the trie.

\n\n
Example:
\n\n
\n
\n
>>> new_trie(["bla", "foo", "blab"])\n{'b': {'l': {'a': {0: True, 'b': {0: True}}}}, 'f': {'o': {'o': {0: True}}}}\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • keywords: the keywords to create the trie from.
  • \n
  • trie: a trie to mutate instead of creating a new one
  • \n
\n\n
Returns:
\n\n
\n

The trie corresponding to keywords.

\n
\n", "signature": "(\tkeywords: Iterable[Sequence[Hashable]],\ttrie: Optional[Dict] = None) -> Dict:", "funcdef": "def"}, "sqlglot.trie.in_trie": {"fullname": "sqlglot.trie.in_trie", "modulename": "sqlglot.trie", "qualname": "in_trie", "kind": "function", "doc": "

Checks whether a key is in a trie.

\n\n
Examples:
\n\n
\n
\n
>>> in_trie(new_trie(["cat"]), "bob")\n(0, {'c': {'a': {'t': {0: True}}}})\n
\n
\n \n
\n
>>> in_trie(new_trie(["cat"]), "ca")\n(1, {'t': {0: True}})\n
\n
\n \n
\n
>>> in_trie(new_trie(["cat"]), "cat")\n(2, {0: True})\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • trie: the trie to be searched.
  • \n
  • key: the target key.
  • \n
\n\n
Returns:
\n\n
\n

A pair (value, subtrie), where subtrie is the sub-trie we get at the point where the search stops, and value\n is either 0 (search was unsuccessful), 1 (value is a prefix of a keyword in trie) or 2 (key is intrie`).

\n
\n", "signature": "(trie: Dict, key: Sequence[Hashable]) -> Tuple[int, Dict]:", "funcdef": "def"}}, "docInfo": {"sqlglot": {"qualname": 0, "fullname": 1, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 5893}, "sqlglot.pretty": {"qualname": 1, "fullname": 2, "annotation": 0, "default_value": 1, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.schema": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.parse": {"qualname": 1, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 124, "bases": 0, "doc": 84}, "sqlglot.parse_one": {"qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 198, "bases": 0, "doc": 99}, "sqlglot.transpile": {"qualname": 1, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 239, "bases": 0, "doc": 177}, "sqlglot.dataframe": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3543}, "sqlglot.dataframe.sql": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.SparkSession": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.SparkSession.table": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 208, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.SparkSession.sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 259, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.copy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.select": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 48, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.alias": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.where": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 86, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.filter": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 86, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 48, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.agg": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 48, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.join": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 180, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 120, "bases": 0, "doc": 44}, "sqlglot.dataframe.sql.DataFrame.sort": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 120, "bases": 0, "doc": 44}, "sqlglot.dataframe.sql.DataFrame.union": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 66, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 66, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 61, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.intersect": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 66, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 66, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 66, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.distinct": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 38, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.dropna": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 138, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.fillna": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 123, "bases": 0, "doc": 100}, "sqlglot.dataframe.sql.DataFrame.replace": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 217, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 77, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.drop": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 80, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.limit": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.hint": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 77, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.repartition": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 111, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.cache": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.persist": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 60, "bases": 0, "doc": 20}, "sqlglot.dataframe.sql.GroupedData": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 106, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.agg": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 90, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.count": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.mean": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.avg": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.max": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.min": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.sum": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.pivot": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 63, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.ensure_col": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 71, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.ensure_cols": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 98, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 123, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"qualname": 5, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 92, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.binary_op": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 85, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 85, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.unary_op": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.ensure_literal": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 39, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.copy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.set_table_name": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 55, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.alias": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.asc": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.desc": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.when": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 77, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.otherwise": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.isNull": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.isNotNull": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.cast": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 53, "bases": 0, "doc": 27}, "sqlglot.dataframe.sql.Column.startswith": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 78, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.endswith": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 78, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.rlike": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.like": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.ilike": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.substr": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 121, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.isin": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 81, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.between": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 97, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.over": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 66, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameNaFunctions": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 138, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 143, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 177, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Window": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Window.partitionBy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 104, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Window.orderBy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 104, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Window.rowsBetween": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 54, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Window.rangeBetween": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 54, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.WindowSpec": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 38, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.WindowSpec.copy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.WindowSpec.sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 104, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 104, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 54, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 54, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameReader": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameReader.table": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameWriter": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 122, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 27, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 52, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 70, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 71, "bases": 0, "doc": 3}, "sqlglot.dialects": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 764}, "sqlglot.dialects.bigquery": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 97}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 311}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 22, "bases": 0, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 22, "bases": 0, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 22, "bases": 0, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"qualname": 5, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.clickhouse": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.clickhouse.ClickHouse": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 97}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 311}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.safeconcat_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"qualname": 5, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.oncluster_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 88, "bases": 0, "doc": 3}, "sqlglot.dialects.databricks": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.databricks.Databricks": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.databricks.Databricks.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 97}, "sqlglot.dialects.databricks.Databricks.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 311}, "sqlglot.dialects.databricks.Databricks.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 3}, "sqlglot.dialects.dialect": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 5}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 8, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.DRILL": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.HIVE": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.SPARK": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.SPARK2": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.TRINO": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.TSQL": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 105, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.format_time": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 70, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.parse": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.parse_into": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 126, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.generate": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 50, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.transpile": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 37, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.tokenize": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.rename_func": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 58, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.if_sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 67, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"qualname": 5, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 67, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.inline_array_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_ilike_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"qualname": 5, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_tablesample_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_pivot_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_trycast_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_properties_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"qualname": 5, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.str_position_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.struct_extract_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.var_map_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 91, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.format_time_lambda": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 90, "bases": 0, "doc": 71}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 46}, "sqlglot.dialects.dialect.parse_date_delta": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 78, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"qualname": 5, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.date_trunc_to_time": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.locate_to_strposition": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.left_to_substring_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.right_to_substring_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.timestrtotime_sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.datestrtodate_sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.min_or_least": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.max_or_greatest": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.count_if_to_sum": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.trim_sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.str_to_time_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"qualname": 6, "fullname": 9, "annotation": 0, "default_value": 0, "signature": 19, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.concat_to_dpipe_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 67, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.pivot_column_names": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 111, "bases": 0, "doc": 3}, "sqlglot.dialects.drill": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.drill.Drill": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.drill.Drill.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.drill.Drill.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 97}, "sqlglot.dialects.drill.Drill.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 311}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.dialects.duckdb": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.duckdb.DuckDB": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 97}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 311}, "sqlglot.dialects.duckdb.DuckDB.Generator.interval_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 81, "bases": 0, "doc": 3}, "sqlglot.dialects.hive": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.hive.Hive": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.hive.Hive.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.hive.Hive.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 97}, "sqlglot.dialects.hive.Hive.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 311}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"qualname": 5, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.dialects.mysql": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.mysql.MySQL": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.mysql.MySQL.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 97}, "sqlglot.dialects.mysql.MySQL.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 311}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.oracle": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.oracle.Oracle": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.oracle.Oracle.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 97}, "sqlglot.dialects.oracle.Oracle.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 311}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.postgres": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.postgres.Postgres": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.postgres.Postgres.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 97}, "sqlglot.dialects.postgres.Postgres.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 311}, "sqlglot.dialects.presto": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.presto.Presto": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.presto.Presto.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.presto.Presto.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 97}, "sqlglot.dialects.presto.Presto.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 311}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"qualname": 5, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 102, "bases": 0, "doc": 3}, "sqlglot.dialects.redshift": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.redshift.Redshift": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.redshift.Redshift.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 97}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 3}, "sqlglot.dialects.redshift.Redshift.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 311}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 58}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 19}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 75}, "sqlglot.dialects.snowflake": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.snowflake.Snowflake": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 97}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 311}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 36, "bases": 0, "doc": 3}, "sqlglot.dialects.spark": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.spark.Spark": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.spark.Spark.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 97}, "sqlglot.dialects.spark.Spark.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 311}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.spark2": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.spark2.Spark2": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.spark2.Spark2.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 97}, "sqlglot.dialects.spark2.Spark2.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 311}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 54, "bases": 0, "doc": 3}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 3}, "sqlglot.dialects.sqlite": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.sqlite.SQLite": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.sqlite.SQLite.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 97}, "sqlglot.dialects.sqlite.SQLite.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 311}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.starrocks": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.starrocks.StarRocks": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 97}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 311}, "sqlglot.dialects.tableau": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.tableau.Tableau": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.tableau.Tableau.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 311}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.tableau.Tableau.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 97}, "sqlglot.dialects.teradata": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.teradata.Teradata": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.teradata.Teradata.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.teradata.Teradata.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 97}, "sqlglot.dialects.teradata.Teradata.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 311}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 88, "bases": 0, "doc": 3}, "sqlglot.dialects.trino": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.trino.Trino": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.trino.Trino.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 311}, "sqlglot.dialects.trino.Trino.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 3}, "sqlglot.dialects.tsql": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"qualname": 6, "fullname": 9, "annotation": 0, "default_value": 0, "signature": 67, "bases": 0, "doc": 3}, "sqlglot.dialects.tsql.TSQL": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.tsql.TSQL.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 97}, "sqlglot.dialects.tsql.TSQL.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 311}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.diff": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 6444}, "sqlglot.diff.Insert": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.diff.Insert.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.diff.Remove": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.diff.Remove.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.diff.Move": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 14}, "sqlglot.diff.Move.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.diff.Update": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.diff.Update.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.diff.Keep": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 11}, "sqlglot.diff.Keep.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.diff.diff": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 206, "bases": 0, "doc": 306}, "sqlglot.diff.ChangeDistiller": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 46}, "sqlglot.diff.ChangeDistiller.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.diff.ChangeDistiller.diff": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 199, "bases": 0, "doc": 3}, "sqlglot.errors": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.errors.ErrorLevel": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 5}, "sqlglot.errors.ErrorLevel.IGNORE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 6}, "sqlglot.errors.ErrorLevel.WARN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 6}, "sqlglot.errors.ErrorLevel.RAISE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 11}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 12}, "sqlglot.errors.SqlglotError": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 11}, "sqlglot.errors.UnsupportedError": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 11}, "sqlglot.errors.ParseError": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 11}, "sqlglot.errors.ParseError.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.errors.ParseError.new": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 207, "bases": 0, "doc": 3}, "sqlglot.errors.TokenError": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 11}, "sqlglot.errors.OptimizeError": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 11}, "sqlglot.errors.SchemaError": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 11}, "sqlglot.errors.ExecuteError": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 11}, "sqlglot.errors.concat_messages": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 3}, "sqlglot.errors.merge_errors": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.executor": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 2950}, "sqlglot.executor.execute": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 197, "bases": 0, "doc": 115}, "sqlglot.executor.context": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 64}, "sqlglot.executor.context.Context.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 66, "bases": 0, "doc": 21}, "sqlglot.executor.context.Context.eval": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context.eval_tuple": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context.add_columns": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 26, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context.table_iter": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 72, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context.filter": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 19, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context.sort": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 19, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context.set_row": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context.set_index": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context.set_range": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.executor.env": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.env.reverse_key": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.env.reverse_key.__init__": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 9, "bases": 0, "doc": 3}, "sqlglot.executor.env.filter_nulls": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 22, "bases": 0, "doc": 3}, "sqlglot.executor.env.null_if_any": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 13, "bases": 0, "doc": 59}, "sqlglot.executor.env.str_position": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 26, "bases": 0, "doc": 3}, "sqlglot.executor.env.substring": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 3}, "sqlglot.executor.env.cast": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.env.ordered": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 22, "bases": 0, "doc": 3}, "sqlglot.executor.env.interval": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.python": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.execute": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.generate": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 16}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 15}, "sqlglot.executor.python.PythonExecutor.context": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.table": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.scan": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.static": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.scan_table": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.join": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.hash_join": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 28, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.aggregate": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.sort": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.set_operation": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.executor.python.Python": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.executor.python.Python.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.executor.python.Python.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 311}, "sqlglot.executor.table": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.table.Table": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.table.Table.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 30, "bases": 0, "doc": 3}, "sqlglot.executor.table.Table.add_columns": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 26, "bases": 0, "doc": 3}, "sqlglot.executor.table.Table.append": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.table.Table.pop": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.executor.table.TableIter": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.table.TableIter.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 9, "bases": 0, "doc": 3}, "sqlglot.executor.table.RangeReader": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.table.RangeReader.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 9, "bases": 0, "doc": 3}, "sqlglot.executor.table.RowReader": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.table.RowReader.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 20, "bases": 0, "doc": 3}, "sqlglot.executor.table.Tables": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 6, "doc": 87}, "sqlglot.executor.table.ensure_tables": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.expressions": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 61}, "sqlglot.expressions.Expression": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 346}, "sqlglot.expressions.Expression.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.expressions.Expression.this": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 9}, "sqlglot.expressions.Expression.expression": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 9}, "sqlglot.expressions.Expression.expressions": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 9}, "sqlglot.expressions.Expression.text": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 19, "bases": 0, "doc": 32}, "sqlglot.expressions.Expression.is_string": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 11}, "sqlglot.expressions.Expression.is_number": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 11}, "sqlglot.expressions.Expression.is_int": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 11}, "sqlglot.expressions.Expression.is_star": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.expressions.Expression.alias": {"qualname": 2, "fullname": 4, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 18}, "sqlglot.expressions.Expression.output_name": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.Expression.copy": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 10}, "sqlglot.expressions.Expression.add_comments": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.expressions.Expression.append": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 54}, "sqlglot.expressions.Expression.set": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 45}, "sqlglot.expressions.Expression.depth": {"qualname": 2, "fullname": 4, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 9}, "sqlglot.expressions.Expression.iter_expressions": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 14}, "sqlglot.expressions.Expression.find": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 60, "bases": 0, "doc": 83}, "sqlglot.expressions.Expression.find_all": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 60, "bases": 0, "doc": 81}, "sqlglot.expressions.Expression.find_ancestor": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 43, "bases": 0, "doc": 43}, "sqlglot.expressions.Expression.parent_select": {"qualname": 3, "fullname": 5, "annotation": 4, "default_value": 0, "signature": 0, "bases": 0, "doc": 8}, "sqlglot.expressions.Expression.same_parent": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 13}, "sqlglot.expressions.Expression.root": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 10}, "sqlglot.expressions.Expression.walk": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 89}, "sqlglot.expressions.Expression.dfs": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 33}, "sqlglot.expressions.Expression.bfs": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 33}, "sqlglot.expressions.Expression.unnest": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 11}, "sqlglot.expressions.Expression.unalias": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 12}, "sqlglot.expressions.Expression.unnest_operands": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 9}, "sqlglot.expressions.Expression.flatten": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 28}, "sqlglot.expressions.Expression.sql": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 99, "bases": 0, "doc": 61}, "sqlglot.expressions.Expression.transform": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 111}, "sqlglot.expressions.Expression.replace": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 81}, "sqlglot.expressions.Expression.pop": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 22}, "sqlglot.expressions.Expression.assert_is": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 162}, "sqlglot.expressions.Expression.error_messages": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 43, "bases": 0, "doc": 79}, "sqlglot.expressions.Expression.dump": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 11}, "sqlglot.expressions.Expression.load": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 17}, "sqlglot.expressions.Condition": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Condition.and_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 168, "bases": 0, "doc": 183}, "sqlglot.expressions.Condition.or_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 168, "bases": 0, "doc": 183}, "sqlglot.expressions.Condition.not_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 28, "bases": 0, "doc": 108}, "sqlglot.expressions.Condition.as_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 178, "bases": 0, "doc": 3}, "sqlglot.expressions.Condition.isin": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 110, "bases": 0, "doc": 3}, "sqlglot.expressions.Condition.between": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 73, "bases": 0, "doc": 3}, "sqlglot.expressions.Condition.is_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 58, "bases": 0, "doc": 3}, "sqlglot.expressions.Condition.like": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 58, "bases": 0, "doc": 3}, "sqlglot.expressions.Condition.ilike": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 58, "bases": 0, "doc": 3}, "sqlglot.expressions.Condition.eq": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.expressions.Condition.neq": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.expressions.Condition.rlike": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 58, "bases": 0, "doc": 3}, "sqlglot.expressions.Predicate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 13}, "sqlglot.expressions.DerivedTable": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Unionable": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Unionable.union": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 160, "bases": 0, "doc": 201}, "sqlglot.expressions.Unionable.intersect": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 160, "bases": 0, "doc": 201}, "sqlglot.expressions.Unionable.except_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 160, "bases": 0, "doc": 202}, "sqlglot.expressions.UDTF": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.Cache": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Uncache": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Create": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Clone": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Describe": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Pragma": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Set": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SetItem": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Show": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.UserDefinedFunction": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CharacterSet": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.With": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.WithinGroup": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CTE": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TableAlias": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.BitString": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.HexString": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ByteString": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RawString": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Column": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Column.output_name": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.Column.parts": {"qualname": 2, "fullname": 4, "annotation": 4, "default_value": 0, "signature": 0, "bases": 0, "doc": 15}, "sqlglot.expressions.Column.to_dot": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 10}, "sqlglot.expressions.ColumnPosition": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ColumnDef": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AlterColumn": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RenameTable": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SetTag": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Comment": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.MergeTreeTTLAction": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.MergeTreeTTL": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ColumnConstraintKind": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AutoIncrementColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CaseSpecificColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CharacterSetColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CheckColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CollateColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CommentColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CompressColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DateFormatColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DefaultColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.EncodeColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.GeneratedAsIdentityColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.InlineLengthColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.NotNullColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.OnUpdateColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.PrimaryKeyColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TitleColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.UniqueColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.UppercaseColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.PathColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Constraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Delete": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Delete.delete": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 160, "bases": 0, "doc": 150}, "sqlglot.expressions.Delete.where": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 226}, "sqlglot.expressions.Delete.returning": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 160, "bases": 0, "doc": 194}, "sqlglot.expressions.Drop": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Filter": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Check": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Directory": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ForeignKey": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.PrimaryKey": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Into": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.From": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Having": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Hint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.JoinHint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Identifier": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Identifier.output_name": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.Index": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Insert": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Insert.with_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 236, "bases": 0, "doc": 291}, "sqlglot.expressions.OnConflict": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Returning": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Introducer": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.National": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LoadData": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Partition": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Fetch": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Group": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Lambda": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Limit": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Literal": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Literal.number": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 3}, "sqlglot.expressions.Literal.string": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 3}, "sqlglot.expressions.Literal.output_name": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.Join": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Join.on": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 239}, "sqlglot.expressions.Join.using": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 241}, "sqlglot.expressions.Lateral": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.MatchRecognize": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Final": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Offset": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Order": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Cluster": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Distribute": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Sort": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Ordered": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Property": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AlgorithmProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AutoIncrementProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.BlockCompressionProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CharacterSetProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ChecksumProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CollateProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DataBlocksizeProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DefinerProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DistKeyProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DistStyleProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.EngineProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ToTableProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ExecuteAsProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ExternalProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.FallbackProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.FileFormatProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.FreespaceProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.InputOutputFormat": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.IsolatedLoadingProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.JournalProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LanguageProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DictProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DictSubProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DictRange": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.OnCluster": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LikeProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LocationProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LockingProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LogProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.MaterializedProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.MergeBlockRatioProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.NoPrimaryIndexProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.OnCommitProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.PartitionedByProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ReturnsProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RowFormatProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RowFormatDelimitedProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RowFormatSerdeProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SchemaCommentProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SerdeProperties": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SetProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SettingsProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SortKeyProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SqlSecurityProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StabilityProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TemporaryProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TransientProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.VolatileProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.WithDataProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.WithJournalTableProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Properties": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Properties.Location": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 5}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.Properties.Location.POST_NAME": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.Properties.Location.POST_WITH": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.Properties.from_dict": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 3}, "sqlglot.expressions.Qualify": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Return": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Reference": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Tuple": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Tuple.isin": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 110, "bases": 0, "doc": 3}, "sqlglot.expressions.Subqueryable": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Subqueryable.subquery": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 89, "bases": 0, "doc": 213}, "sqlglot.expressions.Subqueryable.limit": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 166, "bases": 0, "doc": 3}, "sqlglot.expressions.Subqueryable.with_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 236, "bases": 0, "doc": 301}, "sqlglot.expressions.Table": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Table.parts": {"qualname": 2, "fullname": 4, "annotation": 4, "default_value": 0, "signature": 0, "bases": 0, "doc": 14}, "sqlglot.expressions.SystemTime": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Union": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Union.limit": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 166, "bases": 0, "doc": 221}, "sqlglot.expressions.Union.select": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 245}, "sqlglot.expressions.Union.is_star": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.expressions.Except": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Intersect": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Unnest": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Update": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Values": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Var": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Schema": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Lock": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Select": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Select.from_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 160, "bases": 0, "doc": 199}, "sqlglot.expressions.Select.group_by": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 275}, "sqlglot.expressions.Select.order_by": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 250}, "sqlglot.expressions.Select.sort_by": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 264}, "sqlglot.expressions.Select.cluster_by": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 264}, "sqlglot.expressions.Select.limit": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 166, "bases": 0, "doc": 219}, "sqlglot.expressions.Select.offset": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 166, "bases": 0, "doc": 219}, "sqlglot.expressions.Select.select": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 196}, "sqlglot.expressions.Select.lateral": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 235}, "sqlglot.expressions.Select.join": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 372, "bases": 0, "doc": 621}, "sqlglot.expressions.Select.where": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 253}, "sqlglot.expressions.Select.having": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 275}, "sqlglot.expressions.Select.window": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 3}, "sqlglot.expressions.Select.qualify": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 186, "bases": 0, "doc": 3}, "sqlglot.expressions.Select.distinct": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 102, "bases": 0, "doc": 157}, "sqlglot.expressions.Select.ctas": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 184, "bases": 0, "doc": 223}, "sqlglot.expressions.Select.lock": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 61, "bases": 0, "doc": 331}, "sqlglot.expressions.Select.hint": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 154, "bases": 0, "doc": 205}, "sqlglot.expressions.Select.is_star": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.expressions.Subquery": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.Subquery.unnest": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 8}, "sqlglot.expressions.Subquery.is_star": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.expressions.Subquery.output_name": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.TableSample": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Tag": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 15}, "sqlglot.expressions.Pivot": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Window": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.WindowSpec": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Where": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Star": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Star.output_name": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.Parameter": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SessionParameter": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Placeholder": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Null": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Boolean": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DataTypeSize": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DataType": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DataType.Type": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 5}, "sqlglot.expressions.DataType.Type.ARRAY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.BIGINT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.BINARY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.BIT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.CHAR": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.DATE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.DATETIME": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.DATETIME64": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.ENUM": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TSRANGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.DATERANGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.DECIMAL": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.DOUBLE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.FLOAT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.HSTORE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.IMAGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.INET": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.INT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.INT128": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.INT256": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.INTERVAL": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.JSON": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.JSONB": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.MAP": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.MONEY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.NCHAR": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.NULL": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.NULLABLE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.OBJECT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.SERIAL": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.SET": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.SMALLINT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.STRUCT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.SUPER": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TEXT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TIME": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TINYINT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.UBIGINT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.UINT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.USMALLINT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.UTINYINT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.UINT128": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.UINT256": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.UUID": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.VARBINARY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.VARCHAR": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.VARIANT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.XML": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.build": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 157, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.is_type": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 65, "bases": 0, "doc": 3}, "sqlglot.expressions.PseudoType": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SubqueryPredicate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.All": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Any": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Exists": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Command": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Transaction": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Commit": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Rollback": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AlterTable": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AddConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DropPartition": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Binary": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Add": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Connector": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.And": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Or": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.BitwiseAnd": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.BitwiseLeftShift": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.BitwiseOr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.BitwiseRightShift": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.BitwiseXor": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Div": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Overlaps": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Dot": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Dot.output_name": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.Dot.build": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 52, "bases": 0, "doc": 12}, "sqlglot.expressions.DPipe": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SafeDPipe": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.EQ": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.NullSafeEQ": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.NullSafeNEQ": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.Distance": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Escape": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Glob": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.GT": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.GTE": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.ILike": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.ILikeAny": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.IntDiv": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Is": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.Kwarg": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 12}, "sqlglot.expressions.Like": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.LikeAny": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.LT": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.LTE": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.Mod": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Mul": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.NEQ": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.SimilarTo": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.Slice": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Sub": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayOverlaps": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Unary": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.BitwiseNot": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Not": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Paren": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Paren.output_name": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.Neg": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Alias": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Alias.output_name": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.Aliases": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AtTimeZone": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Between": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Bracket": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Distinct": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.In": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TimeUnit": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 10}, "sqlglot.expressions.TimeUnit.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.expressions.Interval": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.IgnoreNulls": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RespectNulls": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Func": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 128}, "sqlglot.expressions.Func.from_arg_list": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.expressions.Func.sql_names": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.expressions.Func.sql_name": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.expressions.Func.default_parser_mappings": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.expressions.AggFunc": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ParameterizedAgg": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Abs": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Anonymous": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Hll": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ApproxDistinct": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Array": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ToChar": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.GenerateSeries": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayAgg": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayAll": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayAny": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayConcat": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayContains": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.ArrayContained": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayFilter": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayJoin": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArraySize": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArraySort": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArraySum": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayUnionAgg": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Avg": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AnyValue": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Case": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Case.when": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 117, "bases": 0, "doc": 3}, "sqlglot.expressions.Case.else_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 84, "bases": 0, "doc": 3}, "sqlglot.expressions.Cast": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Cast.output_name": {"qualname": 3, "fullname": 5, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.Cast.is_type": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 65, "bases": 0, "doc": 3}, "sqlglot.expressions.CastToStrType": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Collate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TryCast": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Ceil": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Coalesce": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Concat": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SafeConcat": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ConcatWs": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Count": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CountIf": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CurrentDate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CurrentDatetime": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CurrentTime": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CurrentTimestamp": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CurrentUser": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DateAdd": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.DateSub": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.DateDiff": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.DateTrunc": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DatetimeAdd": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.DatetimeSub": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.DatetimeDiff": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.DatetimeTrunc": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.DayOfWeek": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DayOfMonth": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DayOfYear": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.WeekOfYear": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LastDateOfMonth": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Extract": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TimestampAdd": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.TimestampSub": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.TimestampDiff": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.TimestampTrunc": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.TimeAdd": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.TimeSub": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.TimeDiff": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.TimeTrunc": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.DateFromParts": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DateStrToDate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DateToDateStr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DateToDi": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Date": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Day": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Decode": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DiToDate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Encode": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Exp": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Explode": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Floor": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.FromBase64": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ToBase64": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Greatest": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.GroupConcat": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Hex": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.If": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Initcap": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.JSONKeyValue": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.JSONObject": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.OpenJSONColumnDef": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.OpenJSON": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.JSONBContains": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.JSONExtract": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.JSONExtractScalar": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.JSONBExtract": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.JSONBExtractScalar": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.JSONFormat": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Least": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Left": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Right": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Length": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Levenshtein": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Ln": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Log": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Log2": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Log10": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LogicalOr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LogicalAnd": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Lower": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Map": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StarMap": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.VarMap": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.MatchAgainst": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Max": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.MD5": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Min": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Month": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Nvl2": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Posexplode": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Pow": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.PercentileCont": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.PercentileDisc": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Quantile": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ApproxQuantile": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RangeN": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ReadCSV": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Reduce": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RegexpExtract": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RegexpLike": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RegexpILike": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RegexpSplit": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Repeat": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Round": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RowNumber": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SafeDivide": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SetAgg": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SHA": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SHA2": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SortArray": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Split": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Substring": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StandardHash": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StrPosition": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StrToDate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StrToTime": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StrToUnix": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.NumberToStr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.FromBase": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Struct": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StructExtract": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Sum": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Sqrt": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Stddev": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StddevPop": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StddevSamp": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TimeToStr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TimeToTimeStr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TimeToUnix": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TimeStrToDate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TimeStrToTime": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TimeStrToUnix": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Trim": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TsOrDsAdd": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.TsOrDsToDateStr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TsOrDsToDate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TsOrDiToDi": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Unhex": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.UnixToStr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.UnixToTime": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.UnixToTimeStr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Upper": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Variance": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.VariancePop": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Week": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.XMLTable": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Year": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Use": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Merge": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.When": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.NextValueFor": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.maybe_parse": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 270, "bases": 0, "doc": 231}, "sqlglot.expressions.union": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 187, "bases": 0, "doc": 216}, "sqlglot.expressions.intersect": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 187, "bases": 0, "doc": 216}, "sqlglot.expressions.except_": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 187, "bases": 0, "doc": 217}, "sqlglot.expressions.select": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 138, "bases": 0, "doc": 206}, "sqlglot.expressions.from_": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 136, "bases": 0, "doc": 205}, "sqlglot.expressions.update": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 234, "bases": 0, "doc": 263}, "sqlglot.expressions.delete": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 228, "bases": 0, "doc": 164}, "sqlglot.expressions.insert": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 259, "bases": 0, "doc": 197}, "sqlglot.expressions.condition": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 154, "bases": 0, "doc": 347}, "sqlglot.expressions.and_": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 162, "bases": 0, "doc": 192}, "sqlglot.expressions.or_": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 162, "bases": 0, "doc": 192}, "sqlglot.expressions.not_": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 154, "bases": 0, "doc": 159}, "sqlglot.expressions.paren": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 70, "bases": 0, "doc": 122}, "sqlglot.expressions.to_identifier": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 69}, "sqlglot.expressions.to_interval": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 16}, "sqlglot.expressions.to_table": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 149, "bases": 0, "doc": 101}, "sqlglot.expressions.to_column": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 55, "bases": 0, "doc": 62}, "sqlglot.expressions.alias_": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 242, "bases": 0, "doc": 305}, "sqlglot.expressions.subquery": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 182, "bases": 0, "doc": 188}, "sqlglot.expressions.column": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 208, "bases": 0, "doc": 74}, "sqlglot.expressions.cast": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 108, "bases": 0, "doc": 123}, "sqlglot.expressions.table_": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 208, "bases": 0, "doc": 75}, "sqlglot.expressions.values": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 131, "bases": 0, "doc": 143}, "sqlglot.expressions.var": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 58, "bases": 0, "doc": 168}, "sqlglot.expressions.rename_table": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 75, "bases": 0, "doc": 50}, "sqlglot.expressions.convert": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 74}, "sqlglot.expressions.replace_children": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 57, "bases": 0, "doc": 18}, "sqlglot.expressions.column_table_names": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 127}, "sqlglot.expressions.table_name": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 147}, "sqlglot.expressions.replace_tables": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 62, "bases": 0, "doc": 183}, "sqlglot.expressions.replace_placeholders": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 56, "bases": 0, "doc": 246}, "sqlglot.expressions.expand": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 91, "bases": 0, "doc": 338}, "sqlglot.expressions.func": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 122, "bases": 0, "doc": 272}, "sqlglot.expressions.true": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 20, "bases": 0, "doc": 8}, "sqlglot.expressions.false": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 20, "bases": 0, "doc": 8}, "sqlglot.expressions.null": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 20, "bases": 0, "doc": 7}, "sqlglot.generator": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.generator.Generator": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 311}, "sqlglot.generator.Generator.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 270, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.generate": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 76, "bases": 0, "doc": 82}, "sqlglot.generator.Generator.unsupported": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.sep": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.seg": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.pad_comment": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.maybe_comment": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 88, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.wrap": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.no_identify": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.normalize_func": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.indent": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 106, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.sql": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 96, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.uncache_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.cache_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.characterset_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.column_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.columnposition_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.columndef_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 54, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.columnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 20, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 36, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.createable_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 88, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.create_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.clone_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.describe_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.prepend_ctes": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.with_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.cte_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.tablealias_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bitstring_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.hexstring_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bytestring_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.rawstring_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.datatypesize_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.datatype_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.directory_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.delete_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.drop_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.except_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.except_op": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.fetch_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.filter_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.hint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.index_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.identifier_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.inputoutputformat_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.national_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 55, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.partition_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.properties_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.root_properties": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.properties": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 117, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.with_properties": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.locate_properties": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 77, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.property_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.likeproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.fallbackproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.journalproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.freespaceproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.checksumproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.lockingproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.withdataproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.insert_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.intersect_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.intersect_op": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.introducer_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.pseudotype_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.onconflict_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.returning_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.table_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 55, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.tablesample_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 74, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.pivot_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.tuple_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.update_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.values_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.var_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.into_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.from_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.group_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.having_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.join_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.lambda_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 59, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.lateral_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.limit_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.offset_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.setitem_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.set_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.pragma_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.lock_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.literal_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.loaddata_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.null_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 22, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.boolean_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.order_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.cluster_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.distribute_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.sort_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.ordered_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.matchrecognize_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.query_modifiers": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.offset_limit_modifiers": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 102, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.after_having_modifiers": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.after_limit_modifiers": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.select_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.schema_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.schema_columns_sql": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.star_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.parameter_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.sessionparameter_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.placeholder_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.subquery_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 55, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.qualify_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.union_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.union_op": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.unnest_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.where_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.window_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.partition_by_sql": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 52, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.windowspec_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.withingroup_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.between_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bracket_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.all_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.any_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.exists_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.case_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.constraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.nextvaluefor_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.extract_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.trim_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.safeconcat_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.check_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.foreignkey_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.primarykey_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.if_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.matchagainst_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.jsonobject_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.openjson_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.in_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.in_unnest_op": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.interval_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.return_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.reference_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.anonymous_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.paren_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.neg_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.not_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.alias_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.aliases_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.attimezone_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.add_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.and_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.connector_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bitwiseand_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bitwisenot_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bitwiseor_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bitwisexor_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.cast_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.currentdate_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.collate_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.command_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.comment_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.mergetreettl_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.transaction_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.commit_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.rollback_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.altercolumn_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.renametable_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.altertable_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.droppartition_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.addconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.distinct_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.ignorenulls_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.respectnulls_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.intdiv_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.dpipe_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.safedpipe_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.div_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.overlaps_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.distance_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.dot_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.eq_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.escape_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.glob_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.gt_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.gte_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.ilike_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.ilikeany_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.is_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.like_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.likeany_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.similarto_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.lt_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.lte_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.mod_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.mul_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.neq_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.nullsafeeq_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.nullsafeneq_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.or_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.slice_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.sub_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.trycast_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.use_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.binary": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.function_fallback_sql": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.func": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 109, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.format_args": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 54, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.text_width": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.format_time": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.expressions": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 179, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.op_expressions": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 65, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.naked_property": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.set_operation": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.tag_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.token_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.joinhint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.kwarg_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.when_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.merge_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.tochar_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.dictproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.dictrange_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.dictsubproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.oncluster_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.cached_generator": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 65, "bases": 0, "doc": 7}, "sqlglot.helper": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.helper.AutoName": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 25}, "sqlglot.helper.seq_get": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 45, "bases": 0, "doc": 27}, "sqlglot.helper.ensure_list": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 66}, "sqlglot.helper.ensure_collection": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 66}, "sqlglot.helper.csv": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 55}, "sqlglot.helper.subclasses": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 103, "bases": 0, "doc": 84}, "sqlglot.helper.apply_index_offset": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 76, "bases": 0, "doc": 98}, "sqlglot.helper.camel_to_snake_case": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 19, "bases": 0, "doc": 16}, "sqlglot.helper.while_changing": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 67, "bases": 0, "doc": 58}, "sqlglot.helper.tsort": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 47, "bases": 0, "doc": 53}, "sqlglot.helper.open_file": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 19}, "sqlglot.helper.csv_reader": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 30, "bases": 0, "doc": 53}, "sqlglot.helper.find_new_name": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 50}, "sqlglot.helper.name_sequence": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 27, "bases": 0, "doc": 20}, "sqlglot.helper.object_to_dict": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 26, "bases": 0, "doc": 12}, "sqlglot.helper.split_num_words": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 74, "bases": 0, "doc": 312}, "sqlglot.helper.is_iterable": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 19, "bases": 0, "doc": 132}, "sqlglot.helper.flatten": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 45, "bases": 0, "doc": 195}, "sqlglot.helper.dict_depth": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 19, "bases": 0, "doc": 194}, "sqlglot.helper.first": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 16}, "sqlglot.helper.case_sensitive": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 89, "bases": 0, "doc": 14}, "sqlglot.helper.should_identify": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 113, "bases": 0, "doc": 102}, "sqlglot.lineage": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.lineage.Node": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.lineage.Node.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 120, "bases": 0, "doc": 3}, "sqlglot.lineage.Node.walk": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 30, "bases": 0, "doc": 3}, "sqlglot.lineage.Node.to_html": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 3}, "sqlglot.lineage.lineage": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 253, "bases": 0, "doc": 106}, "sqlglot.lineage.LineageHTML": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 18}, "sqlglot.lineage.LineageHTML.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 132, "bases": 0, "doc": 3}, "sqlglot.optimizer": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.annotate_types": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.annotate_types.annotate_types": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 215, "bases": 0, "doc": 305}, "sqlglot.optimizer.annotate_types.TypeAnnotator": {"qualname": 1, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"qualname": 3, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 170, "bases": 0, "doc": 3}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 28, "bases": 0, "doc": 3}, "sqlglot.optimizer.canonicalize": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.canonicalize.canonicalize": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 46}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 39, "bases": 0, "doc": 3}, "sqlglot.optimizer.canonicalize.coerce_type": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 39, "bases": 0, "doc": 3}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.optimizer.eliminate_ctes": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 166}, "sqlglot.optimizer.eliminate_joins": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 195}, "sqlglot.optimizer.eliminate_joins.join_condition": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 46}, "sqlglot.optimizer.eliminate_subqueries": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 278}, "sqlglot.optimizer.isolate_table_selects": {"qualname": 0, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"qualname": 3, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.optimizer.merge_subqueries": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 276}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 3}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"qualname": 3, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 3}, "sqlglot.optimizer.normalize": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.normalize.normalize": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 64, "bases": 0, "doc": 188}, "sqlglot.optimizer.normalize.normalized": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.optimizer.normalize.normalization_distance": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 171}, "sqlglot.optimizer.normalize.distributive_law": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 27, "bases": 0, "doc": 39}, "sqlglot.optimizer.normalize_identifiers": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 100, "bases": 0, "doc": 187}, "sqlglot.optimizer.optimize_joins": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 132}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 13}, "sqlglot.optimizer.optimize_joins.normalize": {"qualname": 1, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 13}, "sqlglot.optimizer.optimize_joins.other_table_names": {"qualname": 3, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.optimizer.optimizer": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.optimizer.optimize": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 580, "bases": 0, "doc": 221}, "sqlglot.optimizer.pushdown_predicates": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 198}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"qualname": 1, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 3}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 20}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 31}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"qualname": 3, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 3}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.optimizer.pushdown_projections": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 7, "bases": 0, "doc": 3}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 33, "bases": 0, "doc": 201}, "sqlglot.optimizer.qualify": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.qualify.qualify": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 350, "bases": 0, "doc": 392}, "sqlglot.optimizer.qualify_columns": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 118, "bases": 0, "doc": 232}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"qualname": 3, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 13}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 118, "bases": 0, "doc": 14}, "sqlglot.optimizer.qualify_columns.Resolver": {"qualname": 1, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 27}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"qualname": 3, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 32, "bases": 0, "doc": 3}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"qualname": 3, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 50}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"qualname": 3, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 11}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"qualname": 4, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 27, "bases": 0, "doc": 13}, "sqlglot.optimizer.qualify_tables": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 106, "bases": 0, "doc": 313}, "sqlglot.optimizer.scope": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.ScopeType": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 5}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 7, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 7, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 8, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.ScopeType.CTE": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 7, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.ScopeType.UNION": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 7, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 7, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.Scope": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 328}, "sqlglot.optimizer.scope.Scope.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 84, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.Scope.clear_cache": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.Scope.branch": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 12}, "sqlglot.optimizer.scope.Scope.walk": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.Scope.find": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 97}, "sqlglot.optimizer.scope.Scope.find_all": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 90}, "sqlglot.optimizer.scope.Scope.replace": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 63}, "sqlglot.optimizer.scope.Scope.tables": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 21}, "sqlglot.optimizer.scope.Scope.ctes": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 21}, "sqlglot.optimizer.scope.Scope.derived_tables": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 43}, "sqlglot.optimizer.scope.Scope.udtfs": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 24}, "sqlglot.optimizer.scope.Scope.subqueries": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 44}, "sqlglot.optimizer.scope.Scope.columns": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 36}, "sqlglot.optimizer.scope.Scope.selected_sources": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 69}, "sqlglot.optimizer.scope.Scope.cte_sources": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 24}, "sqlglot.optimizer.scope.Scope.selects": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 52}, "sqlglot.optimizer.scope.Scope.external_columns": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 35}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 22}, "sqlglot.optimizer.scope.Scope.join_hints": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 30}, "sqlglot.optimizer.scope.Scope.source_columns": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 17, "bases": 0, "doc": 52}, "sqlglot.optimizer.scope.Scope.is_subquery": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 9}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.optimizer.scope.Scope.is_union": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 9}, "sqlglot.optimizer.scope.Scope.is_cte": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 11}, "sqlglot.optimizer.scope.Scope.is_root": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 9}, "sqlglot.optimizer.scope.Scope.is_udtf": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 14}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.optimizer.scope.Scope.rename_source": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 8}, "sqlglot.optimizer.scope.Scope.add_source": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 8}, "sqlglot.optimizer.scope.Scope.remove_source": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 8}, "sqlglot.optimizer.scope.Scope.traverse": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 28}, "sqlglot.optimizer.scope.Scope.ref_count": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 34}, "sqlglot.optimizer.scope.traverse_scope": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 330}, "sqlglot.optimizer.scope.build_scope": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 41}, "sqlglot.optimizer.scope.walk_in_scope": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 83}, "sqlglot.optimizer.simplify": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.simplify": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 138}, "sqlglot.optimizer.simplify.rewrite_between": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 31}, "sqlglot.optimizer.simplify.simplify_not": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 25}, "sqlglot.optimizer.simplify.flatten": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 24}, "sqlglot.optimizer.simplify.simplify_connectors": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.remove_compliments": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 19}, "sqlglot.optimizer.simplify.uniq_sort": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 26, "bases": 0, "doc": 23}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 58}, "sqlglot.optimizer.simplify.simplify_literals": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.simplify_parens": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.remove_where_true": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.always_true": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.is_complement": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.is_false": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.is_null": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.eval_boolean": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.extract_date": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.extract_interval": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.date_literal": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.boolean_literal": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.unnest_subqueries": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 227}, "sqlglot.optimizer.unnest_subqueries.unnest": {"qualname": 1, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"qualname": 1, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 30, "bases": 0, "doc": 3}, "sqlglot.parser": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.parser.parse_var_map": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 45, "bases": 0, "doc": 3}, "sqlglot.parser.parse_like": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 45, "bases": 0, "doc": 3}, "sqlglot.parser.binary_range_parser": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 93, "bases": 0, "doc": 3}, "sqlglot.parser.Parser": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 97}, "sqlglot.parser.Parser.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 78, "bases": 0, "doc": 3}, "sqlglot.parser.Parser.reset": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.parser.Parser.parse": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 87, "bases": 0, "doc": 72}, "sqlglot.parser.Parser.parse_into": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 159, "bases": 0, "doc": 111}, "sqlglot.parser.Parser.check_errors": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 14, "bases": 0, "doc": 16}, "sqlglot.parser.Parser.raise_error": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 57, "bases": 0, "doc": 22}, "sqlglot.parser.Parser.expression": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 73, "bases": 0, "doc": 74}, "sqlglot.parser.Parser.validate_expression": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 68}, "sqlglot.planner": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.planner.Plan": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.planner.Plan.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.planner.Step": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.planner.Step.from_expression": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 90, "bases": 0, "doc": 209}, "sqlglot.planner.Step.add_dependency": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.planner.Step.to_s": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 3}, "sqlglot.planner.Scan": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.planner.Scan.from_expression": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 90, "bases": 0, "doc": 209}, "sqlglot.planner.Join": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.planner.Join.from_joins": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 96, "bases": 0, "doc": 3}, "sqlglot.planner.Aggregate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.planner.Sort": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.planner.SetOperation": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.planner.SetOperation.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 83, "bases": 0, "doc": 3}, "sqlglot.planner.SetOperation.from_expression": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 90, "bases": 0, "doc": 209}, "sqlglot.schema.Schema": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 8}, "sqlglot.schema.Schema.add_table": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 187, "bases": 0, "doc": 83}, "sqlglot.schema.Schema.column_names": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 143, "bases": 0, "doc": 79}, "sqlglot.schema.Schema.get_column_type": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 149, "bases": 0, "doc": 79}, "sqlglot.schema.Schema.supported_table_args": {"qualname": 4, "fullname": 6, "annotation": 3, "default_value": 0, "signature": 0, "bases": 0, "doc": 16}, "sqlglot.schema.Schema.empty": {"qualname": 2, "fullname": 4, "annotation": 2, "default_value": 0, "signature": 0, "bases": 0, "doc": 11}, "sqlglot.schema.AbstractMappingSchema": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 87}, "sqlglot.schema.AbstractMappingSchema.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 27, "bases": 0, "doc": 3}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.schema.AbstractMappingSchema.find": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 88, "bases": 0, "doc": 3}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 75, "bases": 0, "doc": 3}, "sqlglot.schema.MappingSchema": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 6, "doc": 155}, "sqlglot.schema.MappingSchema.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 146, "bases": 0, "doc": 3}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 47, "bases": 0, "doc": 3}, "sqlglot.schema.MappingSchema.copy": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 3}, "sqlglot.schema.MappingSchema.add_table": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 187, "bases": 0, "doc": 83}, "sqlglot.schema.MappingSchema.column_names": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 143, "bases": 0, "doc": 79}, "sqlglot.schema.MappingSchema.get_column_type": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 149, "bases": 0, "doc": 79}, "sqlglot.schema.ensure_schema": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 71, "bases": 0, "doc": 3}, "sqlglot.schema.ensure_column_mapping": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 70, "bases": 0, "doc": 3}, "sqlglot.schema.flatten_schema": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 69, "bases": 0, "doc": 3}, "sqlglot.schema.nested_get": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 71, "bases": 0, "doc": 86}, "sqlglot.schema.nested_set": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 45, "bases": 0, "doc": 276}, "sqlglot.serde": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.serde.dump": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 161, "bases": 0, "doc": 12}, "sqlglot.serde.load": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 161, "bases": 0, "doc": 16}, "sqlglot.time": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.time.format_time": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 73, "bases": 0, "doc": 108}, "sqlglot.tokens": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 5}, "sqlglot.tokens.TokenType.L_PAREN": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.R_PAREN": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.L_BRACKET": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.R_BRACKET": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.L_BRACE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.R_BRACE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COMMA": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DOT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DASH": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PLUS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COLON": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DCOLON": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SEMICOLON": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.STAR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BACKSLASH": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SLASH": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LTE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GTE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NOT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.EQ": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NEQ": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.AND": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.OR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.AMP": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DPIPE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PIPE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CARET": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TILDA": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ARROW": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DARROW": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FARROW": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.HASH": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.HASH_ARROW": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LR_ARROW": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LT_AT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.AT_GT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DOLLAR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PARAMETER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DAMP": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BLOCK_START": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BLOCK_END": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SPACE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BREAK": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.STRING": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NUMBER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.IDENTIFIER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DATABASE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COLUMN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SCHEMA": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TABLE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.VAR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BIT_STRING": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.HEX_STRING": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BYTE_STRING": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.RAW_STRING": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BIT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BOOLEAN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TINYINT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UTINYINT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SMALLINT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.USMALLINT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UINT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BIGINT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UBIGINT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INT128": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UINT128": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INT256": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UINT256": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FLOAT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DOUBLE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DECIMAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CHAR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NCHAR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.VARCHAR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NVARCHAR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TEXT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LONGTEXT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LONGBLOB": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BINARY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.VARBINARY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.JSON": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.JSONB": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TIME": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TIMESTAMP": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DATETIME": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DATETIME64": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DATE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INT4RANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INT8RANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NUMRANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TSRANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TSTZRANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DATERANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UUID": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NULLABLE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GEOMETRY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.HLLSKETCH": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.HSTORE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SUPER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SERIAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BIGSERIAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.XML": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.MONEY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SMALLMONEY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ROWVERSION": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.IMAGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.VARIANT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.OBJECT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INET": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ENUM": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ALIAS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ALTER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ALWAYS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ALL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ANTI": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ANY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.APPLY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ARRAY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ASC": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ASOF": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BEGIN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BETWEEN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CACHE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CASE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COLLATE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COMMAND": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COMMENT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COMMIT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CONSTRAINT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CREATE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CROSS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CUBE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CURRENT_USER": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DEFAULT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DELETE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DESC": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DESCRIBE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DICTIONARY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DISTINCT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DIV": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DROP": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ELSE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.END": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ESCAPE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.EXCEPT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.EXECUTE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.EXISTS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FALSE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FETCH": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FILTER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FINAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FIRST": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FOR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FORMAT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FROM": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FULL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FUNCTION": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GLOB": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GLOBAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GROUP_BY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.HAVING": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.HINT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.IF": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ILIKE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.IN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INDEX": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INNER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INSERT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INTERSECT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INTERVAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INTO": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INTRODUCER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.IRLIKE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.IS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ISNULL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.JOIN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.KEEP": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LANGUAGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LATERAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LEFT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LIKE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LIKE_ANY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LIMIT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LOAD": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LOCK": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.MAP": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.MERGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.MOD": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NATURAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NEXT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 13, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NOTNULL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NULL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.OFFSET": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ON": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ORDER_BY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ORDERED": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ORDINALITY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.OUTER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.OVER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.OVERLAPS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.OVERWRITE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PARTITION": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PARTITION_BY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PERCENT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PIVOT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PRAGMA": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PROCEDURE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PROPERTIES": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.QUALIFY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.QUOTE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.RANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.RECURSIVE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.REPLACE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.RETURNING": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.REFERENCES": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.RIGHT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.RLIKE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ROLLBACK": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ROLLUP": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ROW": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ROWS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SELECT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SEMI": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SEPARATOR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SET": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SETTINGS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SHOW": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SOME": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.STRUCT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TEMPORARY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TOP": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.THEN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TRUE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UNCACHE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UNION": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UNNEST": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UNPIVOT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UPDATE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.USE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.USING": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.VALUES": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.VIEW": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.VOLATILE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.WHEN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.WHERE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.WINDOW": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.WITH": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UNIQUE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.Token": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.Token.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 133, "bases": 0, "doc": 96}, "sqlglot.tokens.Token.number": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 14}, "sqlglot.tokens.Token.string": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 14}, "sqlglot.tokens.Token.identifier": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 14}, "sqlglot.tokens.Token.var": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 14}, "sqlglot.tokens.Tokenizer": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.Tokenizer.reset": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 14, "bases": 0, "doc": 3}, "sqlglot.tokens.Tokenizer.tokenize": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 16}, "sqlglot.tokens.Tokenizer.peek": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 3}, "sqlglot.transforms": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.transforms.unalias_group": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 146}, "sqlglot.transforms.eliminate_distinct_on": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 66}, "sqlglot.transforms.eliminate_qualify": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 111}, "sqlglot.transforms.remove_precision_parameterized_types": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 33}, "sqlglot.transforms.unnest_to_explode": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 16}, "sqlglot.transforms.explode_to_unnest": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 12}, "sqlglot.transforms.remove_target_from_merge": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 11}, "sqlglot.transforms.remove_within_group_for_percentiles": {"qualname": 5, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.transforms.add_recursive_cte_column_names": {"qualname": 5, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.transforms.epoch_cast_to_ts": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.transforms.preprocess": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 94, "bases": 0, "doc": 84}, "sqlglot.trie": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.trie.new_trie": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 54, "bases": 0, "doc": 200}, "sqlglot.trie.in_trie": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 47, "bases": 0, "doc": 299}}, "length": 1866, "save": true}, "index": {"qualname": {"root": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 52, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.pretty": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1}, "sqlglot.dialects.presto.Presto": {"tf": 1}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 1}}, "df": 9}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Predicate": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}}, "df": 2}}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.prepend_ctes": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.transforms.preprocess": {"tf": 1}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.expressions.Properties": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}}, "df": 22}}}, "y": {"docs": {"sqlglot.expressions.Property": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.naked_property": {"tf": 1}}, "df": 3}}}}}, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}}, "df": 1}}}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.expressions.Pragma": {"tf": 1}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PRAGMA": {"tf": 1}}, "df": 3}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}}, "df": 1, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.PrimaryKey": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.PrimaryKeyColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.parser.parse_like": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 11, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.parser.binary_range_parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.parser.Parser.reset": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 31}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}}, "df": 3}}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Partition": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}}, "df": 5, "b": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}}, "df": 2}}, "s": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.expressions.PartitionedByProperty": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}, "s": {"docs": {"sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}}, "df": 3}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Parameter": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.expressions.ParameterizedAgg": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.expressions.Paren": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}}, "df": 7, "t": {"docs": {"sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}}, "df": 2}, "s": {"docs": {"sqlglot.optimizer.simplify.simplify_parens": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.PathColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "d": {"docs": {"sqlglot.generator.Generator.pad_comment": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.PERCENT": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.PercentileCont": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.PercentileDisc": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1}}, "df": 1}}}}}}}}}, "e": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.tokens.Tokenizer.peek": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.expressions.Pivot": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}}, "df": 6}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.PIPE": {"tf": 1}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.expressions.Placeholder": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.expressions.replace_placeholders": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {"sqlglot.planner.Plan": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}}, "df": 2}}, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.PLUS": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}}, "df": 7, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1}, "sqlglot.dialects.postgres.Postgres": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}}, "df": 5}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.executor.env.str_position": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Posexplode": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {"sqlglot.executor.table.Table.pop": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}}, "df": 2}, "w": {"docs": {"sqlglot.expressions.Pow": {"tf": 1}}, "df": 1}}, "y": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.python.Python": {"tf": 1}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.python.PythonExecutor": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.static": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}}, "df": 17}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.PseudoType": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}}, "df": 5}}}}}}}}, "s": {"docs": {"sqlglot.planner.Step.to_s": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Schema": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_columns_sql": {"tf": 1}, "sqlglot.schema.Schema": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.Schema.empty": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}}, "df": 15, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.SchemaError": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SchemaCommentProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}}, "df": 1}}}, "n": {"docs": {"sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.planner.Scan": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}}, "df": 5}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 36, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.ScopeType": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}}, "df": 7}}}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"2": {"docs": {"sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1}, "sqlglot.dialects.spark2.Spark2": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"tf": 1}}, "df": 7}, "docs": {"sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1}, "sqlglot.dialects.spark.Spark": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}}, "df": 5, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.SparkSession": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}}, "df": 4}}}}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.SPACE": {"tf": 1}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Split": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2}}}}, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.safeconcat_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.oncluster_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.concat_to_dpipe_sql": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.createable_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.clone_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1}, "sqlglot.generator.Generator.returning_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1}, "sqlglot.generator.Generator.set_sql": {"tf": 1}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.null_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_columns_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.safeconcat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.comment_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.safedpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.oncluster_sql": {"tf": 1}}, "df": 288, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}}, "df": 10}}}, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.SqlglotError": {"tf": 1}}, "df": 1}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SqlSecurityProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Sqrt": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}}, "df": 26, "s": {"docs": {"sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}}, "df": 2}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {"sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Set": {"tf": 1}, "sqlglot.expressions.DataType.Type.SET": {"tf": 1}, "sqlglot.generator.Generator.set_sql": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}}, "df": 13, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.expressions.SetTag": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.SETTINGS": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SettingsProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.SetItem": {"tf": 1}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1}}, "df": 2}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SetProperty": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.SetAgg": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.planner.SetOperation": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 3}}}}}}}}}, "s": {"docs": {"sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.SerdeProperties": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}}, "df": 2}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.SessionParameter": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}, "p": {"docs": {"sqlglot.generator.Generator.sep": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}}, "df": 1}}}}}}}, "g": {"docs": {"sqlglot.generator.Generator.seg": {"tf": 1}}, "df": 1}, "q": {"docs": {"sqlglot.helper.seq_get": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.name_sequence": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.case_sensitive": {"tf": 1}}, "df": 1}}}}}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.tokens.TokenType.SEMI": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.executor.context.Context.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.expressions.Sort": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}, "sqlglot.planner.Sort": {"tf": 1}}, "df": 8, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SortKeyProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SortArray": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}}, "df": 5, "s": {"docs": {"sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.SOME": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.expressions.Sum": {"tf": 1}}, "df": 3}, "b": {"docs": {"sqlglot.expressions.Sub": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.Column.substr": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1}, "sqlglot.expressions.Substring": {"tf": 1}}, "df": 4}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subquery": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}}, "df": 10, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Subqueryable": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}}, "df": 4}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.SubqueryPredicate": {"tf": 1}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}}, "df": 4}}}}}}}, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.subclasses": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}}, "df": 2}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.schema.Schema.supported_table_args": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.Star": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}}, "df": 8, "t": {"docs": {"sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe.sql.Column.startswith": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}}, "df": 4}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.StarMap": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.executor.python.PythonExecutor.static": {"tf": 1}}, "df": 1}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.StabilityProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.StandardHash": {"tf": 1}}, "df": 1}}}}}}}}}}, "r": {"docs": {"sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.executor.env.str_position": {"tf": 1}}, "df": 3, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.Struct": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}}, "df": 4, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.StructExtract": {"tf": 1}}, "df": 1}}}}}}}}}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.expressions.StrPosition": {"tf": 1}}, "df": 3}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}}, "df": 9}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.StrToDate": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.StrToTime": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.StrToUnix": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.expressions.Stddev": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.StddevPop": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.StddevSamp": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.planner.Step": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}}, "df": 4}}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}}, "df": 1}}}}}}}}}, "f": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.safeconcat_sql": {"tf": 1}, "sqlglot.expressions.SafeConcat": {"tf": 1}, "sqlglot.generator.Generator.safeconcat_sql": {"tf": 1}}, "df": 3}}}}}}, "d": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.SafeDPipe": {"tf": 1}, "sqlglot.generator.Generator.safedpipe_sql": {"tf": 1}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.SafeDivide": {"tf": 1}}, "df": 1}}}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression.same_parent": {"tf": 1}}, "df": 1}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}}, "df": 10}}}}}}}, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.camel_to_snake_case": {"tf": 1}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.expressions.Show": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}}, "df": 3}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.should_identify": {"tf": 1}}, "df": 1}}}}, "a": {"2": {"docs": {"sqlglot.expressions.SHA2": {"tf": 1}}, "df": 1}, "docs": {"sqlglot.expressions.SHA": {"tf": 1}}, "df": 1}}, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.expressions.SystemTime": {"tf": 1}}, "df": 2}}}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}}, "df": 2}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}}, "df": 2}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}}, "df": 2}}}}}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.expressions.SimilarTo": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}}, "df": 2}}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_parens": {"tf": 1}}, "df": 5}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Slice": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.SLASH": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}}, "df": 3, "e": {"docs": {"sqlglot.parse_one": {"tf": 1}}, "df": 1}, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.oncluster_sql": {"tf": 1}, "sqlglot.expressions.OnCluster": {"tf": 1}, "sqlglot.generator.Generator.oncluster_sql": {"tf": 1}}, "df": 3}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.OnConflict": {"tf": 1}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1}}, "df": 2}}}}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.OnCommitProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.OnUpdateColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}, "r": {"docs": {"sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Or": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}}, "df": 9, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Order": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}}, "df": 4, "b": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.env.ordered": {"tf": 1}, "sqlglot.expressions.Ordered": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}}, "df": 4}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1}, "sqlglot.dialects.oracle.Oracle": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1}}, "df": 8}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}}, "df": 3, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Overlaps": {"tf": 1}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1}, "sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1}}, "df": 3}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {"sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}}, "df": 13, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 2, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.OptimizeError": {"tf": 1}}, "df": 1}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}}, "df": 2}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.unnest_operands": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1, "j": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.OpenJSON": {"tf": 1}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.OpenJSONColumnDef": {"tf": 1}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}}, "df": 1, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}}, "df": 1}}}}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.expressions.Offset": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}}, "df": 9}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}}, "df": 10}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.OUTER": {"tf": 1}}, "df": 1}}}}, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}}, "df": 3}}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.expressions.Transaction": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}}, "df": 5}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.Expression.transform": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.TransientProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 2}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1}, "sqlglot.dialects.trino.Trino": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1}}, "df": 4}}, "m": {"docs": {"sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.expressions.Trim": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}}, "df": 3}, "e": {"docs": {"sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 2}}, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.expressions.TryCast": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}}, "df": 3}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}}, "df": 1}}, "e": {"docs": {"sqlglot.expressions.true": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.optimizer.simplify.always_true": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}}, "df": 4}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.table.Table": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.append": {"tf": 1}, "sqlglot.executor.table.Table.pop": {"tf": 1}, "sqlglot.expressions.Table": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}}, "df": 30, "a": {"docs": {}, "df": 0, "u": {"docs": {"sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1}, "sqlglot.dialects.tableau.Tableau": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}}, "df": 6}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.TableAlias": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}}, "df": 2}}}}}, "s": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}}, "df": 7, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.expressions.TableSample": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}}, "df": 4}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.table.TableIter": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}}, "df": 2}}}}}}}, "g": {"docs": {"sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}}, "df": 2}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.transforms.remove_target_from_merge": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {"sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.concat_to_dpipe_sql": {"tf": 1}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 1}}, "df": 23, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.tokens.Token": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 7, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 2, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Tokenizer": {"tf": 1}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1}, "sqlglot.tokens.Tokenizer": {"tf": 1}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.tokens.Tokenizer.peek": {"tf": 1}}, "df": 22}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.TokenError": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType": {"tf": 1}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}, "sqlglot.tokens.TokenType.DASH": {"tf": 1}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1}, "sqlglot.tokens.TokenType.COLON": {"tf": 1}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}, "sqlglot.tokens.TokenType.AMP": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1}, "sqlglot.tokens.TokenType.CARET": {"tf": 1}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LT_AT": {"tf": 1}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.DAMP": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.DATABASE": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BIT": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}, "sqlglot.tokens.TokenType.UINT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT128": {"tf": 1}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1}, "sqlglot.tokens.TokenType.INT256": {"tf": 1}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}, "sqlglot.tokens.TokenType.INET": {"tf": 1}, "sqlglot.tokens.TokenType.ENUM": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}, "sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.tokens.TokenType.FULL": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}, "sqlglot.tokens.TokenType.INNER": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1}, "sqlglot.tokens.TokenType.KEEP": {"tf": 1}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD": {"tf": 1}, "sqlglot.tokens.TokenType.LOCK": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}, "sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}, "sqlglot.tokens.TokenType.PRAGMA": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}, "sqlglot.tokens.TokenType.RETURNING": {"tf": 1}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}, "sqlglot.tokens.TokenType.SETTINGS": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.tokens.TokenType.SOME": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}, "sqlglot.tokens.TokenType.TOP": {"tf": 1}, "sqlglot.tokens.TokenType.THEN": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}}, "df": 289}}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ToTableProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.ToChar": {"tf": 1}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1}}, "df": 2}}}}, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"6": {"4": {"docs": {"sqlglot.expressions.ToBase64": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}}}, "p": {"docs": {"sqlglot.tokens.TokenType.TOP": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1}, "sqlglot.dialects.teradata.Teradata": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Tokenizer": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"tf": 1}}, "df": 11}}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.generator.Generator.text_width": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}}, "df": 5}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.TemporaryProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "s": {"docs": {"sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 1}}, "df": 2, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1}, "sqlglot.dialects.tsql.TSQL": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}}, "df": 8}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1}}, "df": 2}}}}}}}}}}, "t": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1}}, "df": 2}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.TsOrDsAdd": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.TsOrDsToDate": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.TsOrDsToDateStr": {"tf": 1}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.expressions.TsOrDiToDi": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {"sqlglot.helper.tsort": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}}, "df": 9, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}}, "df": 3, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1}, "sqlglot.expressions.TimestampTrunc": {"tf": 1}}, "df": 2}}}}, "z": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}}, "df": 2}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "z": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.TimestampAdd": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.TimestampSub": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.TimestampDiff": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.expressions.TimeStrToTime": {"tf": 1}}, "df": 2}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.TimeStrToDate": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.TimeStrToUnix": {"tf": 1}}, "df": 1}}}}}}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.TimeSub": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.TimeAdd": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.TimeDiff": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.TimeTrunc": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.TimeToStr": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.TimeToTimeStr": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.TimeToUnix": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.TitleColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.tokens.TokenType.TILDA": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.context.Context.eval_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.expressions.Tuple": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}}, "df": 5}}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.this": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.THEN": {"tf": 1}}, "df": 1}}}, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1}, "sqlglot.expressions.DataType.Type.ENUM": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INET": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SET": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}}, "df": 84, "s": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 2}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.annotate_types.TypeAnnotator": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1}}, "df": 3}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.expressions.Create": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}}, "df": 5, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"tf": 1}, "sqlglot.generator.Generator.createable_sql": {"tf": 1}}, "df": 3}}}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.CROSS": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}}, "df": 6}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.expressions.Coalesce": {"tf": 1}}, "df": 2}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.expressions.Count": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 6, "i": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.CountIf": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {"sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.expressions.Column": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1}}, "df": 53, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.expressions.ColumnDef": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}}, "df": 3}}}, "s": {"docs": {"sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.generator.Generator.schema_columns_sql": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}}, "df": 11}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.ColumnPosition": {"tf": 1}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1}}, "df": 2}}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}}, "df": 2, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.ColumnConstraintKind": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "s": {"docs": {"sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}}, "df": 1}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Collate": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}}, "df": 3, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CollateColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.CollateProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.helper.ensure_collection": {"tf": 1}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.COLON": {"tf": 1}}, "df": 1}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.expressions.Commit": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}}, "df": 4}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.expressions.Comment": {"tf": 1}, "sqlglot.generator.Generator.pad_comment": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.generator.Generator.comment_sql": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.expressions.Expression.add_comments": {"tf": 1}}, "df": 1}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CommentColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "a": {"docs": {"sqlglot.tokens.TokenType.COMMA": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Command": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}}, "df": 3}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CompressColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.simplify.is_complement": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.expressions.Constraint": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}}, "df": 4}}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.concat_to_dpipe_sql": {"tf": 1}, "sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.expressions.Concat": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}}, "df": 4, "w": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.ConcatWs": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.context.Context.eval": {"tf": 1}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 1}, "sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.executor.context.Context.filter": {"tf": 1}, "sqlglot.executor.context.Context.sort": {"tf": 1}, "sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}}, "df": 12}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Condition": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Condition.like": {"tf": 1}, "sqlglot.expressions.Condition.ilike": {"tf": 1}, "sqlglot.expressions.Condition.eq": {"tf": 1}, "sqlglot.expressions.Condition.neq": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}}, "df": 15}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Connector": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1}}, "df": 1}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.convert": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1}, "sqlglot.expressions.Cache": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}}, "df": 5, "d": {"docs": {"sqlglot.generator.cached_generator": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.executor.env.cast": {"tf": 1}, "sqlglot.expressions.Cast": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 1}}, "df": 10, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.CastToStrType": {"tf": 1}}, "df": 1}}}}}}}}}, "s": {"docs": {"sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}}, "df": 1}}, "e": {"docs": {"sqlglot.expressions.Case": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}}, "df": 7, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CaseSpecificColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.helper.camel_to_snake_case": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}}, "df": 1}}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.CARET": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.safeconcat_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.oncluster_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1}}, "df": 12}}}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Clone": {"tf": 1}, "sqlglot.generator.Generator.clone_sql": {"tf": 1}}, "df": 2}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Cluster": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}}, "df": 3}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.expressions.CTE": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1}}, "df": 8, "s": {"docs": {"sqlglot.generator.Generator.prepend_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}}, "df": 4}}, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Select.ctas": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1}}, "df": 6, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.CurrentDate": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}}, "df": 2, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.CurrentDatetime": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.CurrentTime": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.CurrentTimestamp": {"tf": 1}}, "df": 1}}}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.CurrentUser": {"tf": 1}}, "df": 1}}}}}}}}}, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.CUBE": {"tf": 1}}, "df": 1}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}}, "df": 3}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.helper.while_changing": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {"sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CharacterSet": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CharacterSetColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.CharacterSetProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.Check": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}}, "df": 3, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CheckColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ChecksumProperty": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.replace_children": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Ceil": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 33, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrameNaFunctions": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}}, "df": 5}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrameReader": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}}, "df": 3}}}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}}, "df": 7}}}}}}}}}}}, "b": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.databricks.Databricks": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Tokenizer": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1}}, "df": 5}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataBlocksizeProperty": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.DATABASE": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.expressions.DataType": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1}, "sqlglot.expressions.DataType.Type.ENUM": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INET": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SET": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}}, "df": 85, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataTypeSize": {"tf": 1}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "e": {"docs": {"sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.Date": {"tf": 1}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1}, "sqlglot.optimizer.simplify.date_literal": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}}, "df": 12, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.expressions.DateStrToDate": {"tf": 1}}, "df": 2}}}}}}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DateSub": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.expressions.DateDiff": {"tf": 1}}, "df": 3}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DateFormatColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.DateFromParts": {"tf": 1}}, "df": 1}}}}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"6": {"4": {"docs": {"sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {"sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}}, "df": 3, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DatetimeAdd": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DatetimeSub": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.DatetimeDiff": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.DatetimeTrunc": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.DateTrunc": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DateToDateStr": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {"sqlglot.expressions.DateToDi": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1}}, "df": 2}}}}}}}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DateAdd": {"tf": 1}}, "df": 1}}}}}, "y": {"docs": {"sqlglot.expressions.Day": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.DayOfWeek": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.DayOfMonth": {"tf": 1}}, "df": 1}}}}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DayOfYear": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.DASH": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.DARROW": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.DAMP": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Distinct": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}}, "df": 7}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Distribute": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}}, "df": 2}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.normalize.distributive_law": {"tf": 1}}, "df": 1}}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DistKeyProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DistStyleProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Distance": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 3}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1}, "sqlglot.dialects.dialect.Dialect": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1}}, "df": 11, "s": {"docs": {"sqlglot.dialects.dialect.Dialects": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1}}, "df": 22}}}}}}, "v": {"docs": {"sqlglot.expressions.Div": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}}, "df": 2}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Directory": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}}, "df": 2}}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}}, "df": 3, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DictProperty": {"tf": 1}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DictSubProperty": {"tf": 1}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DictRange": {"tf": 1}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DiToDate": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.expressions.Drop": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}}, "df": 5, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}}, "df": 1}}}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}}, "df": 1}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DropPartition": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1}, "sqlglot.dialects.drill.Drill": {"tf": 1}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}}, "df": 6}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}}, "df": 4, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.expressions.Describe": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}}, "df": 4}}}}}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Delete": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}}, "df": 7}}}}, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}}, "df": 2}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.planner.Step.add_dependency": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}}, "df": 4, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DerivedTable": {"tf": 1}}, "df": 1}}}}}}}}}}, "f": {"docs": {"sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}}, "df": 3, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DefaultColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DefinerProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Decode": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 1}}}}}}}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}}, "df": 7}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}}, "df": 2}}}, "s": {"docs": {"sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}}, "df": 1}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.concat_to_dpipe_sql": {"tf": 1}, "sqlglot.expressions.DPipe": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}}, "df": 4}}}}, "f": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.dfs": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.Dot": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}}, "df": 6}, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}}, "df": 2}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 1}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.DCOLON": {"tf": 1}}, "df": 1}}}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.expressions.In": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 7, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 35, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Initcap": {"tf": 1}}, "df": 1}}}}}, "t": {"1": {"2": {"8": {"docs": {"sqlglot.expressions.DataType.Type.INT128": {"tf": 1}, "sqlglot.tokens.TokenType.INT128": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"5": {"6": {"docs": {"sqlglot.expressions.DataType.Type.INT256": {"tf": 1}, "sqlglot.tokens.TokenType.INT256": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "4": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1}}, "df": 2}}}}}}}}}}}, "8": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1}}, "df": 2}}}}}}}}}}}, "docs": {"sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Intersect": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}}, "df": 9, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1}}, "df": 1}}}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.executor.env.interval": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.Interval": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}}, "df": 10}}}}}, "o": {"docs": {"sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.expressions.Into": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}}, "df": 5}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Introducer": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}}, "df": 3}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.expressions.IntDiv": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}}, "df": 2}}}}, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.expressions.Insert": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}}, "df": 7, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.InlineLengthColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.expressions.Index": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}}, "df": 6}, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.indent": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.InputOutputFormat": {"tf": 1}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.INET": {"tf": 1}, "sqlglot.tokens.TokenType.INET": {"tf": 1}}, "df": 2}}, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.INNER": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Is": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.simplify.is_complement": {"tf": 1}, "sqlglot.optimizer.simplify.is_false": {"tf": 1}, "sqlglot.optimizer.simplify.is_null": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}}, "df": 25, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}}, "df": 3}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1}}, "df": 1, "d": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.IsolatedLoadingProperty": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.expressions.Condition.ilike": {"tf": 1}, "sqlglot.expressions.ILike": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1}}, "df": 7, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ILikeAny": {"tf": 1}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1}}, "df": 2}}}}}}}, "f": {"docs": {"sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.If": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}}, "df": 7}, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.IgnoreNulls": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}}, "df": 2}}}}}}}}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.is_iterable": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Identifier": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}}, "df": 2}}}}, "y": {"docs": {"sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}}, "df": 2}}}}}}}, "r": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Alias": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}}, "df": 9, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Aliases": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}}, "df": 3}}}}}, "l": {"docs": {"sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.All": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}}, "df": 6}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.ALTER": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.AlterColumn": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}}, "df": 2}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.AlterTable": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}}, "df": 2}}}}}}}}, "g": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.AlgorithmProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.simplify.always_true": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}}, "df": 2}}}}}, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}}, "df": 2, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.planner.Aggregate": {"tf": 1}}, "df": 2}}}}}}, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.AggFunc": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.expressions.Avg": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.expressions.Anonymous": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}}, "df": 3}}}}}}}, "y": {"docs": {"sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Any": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1}}, "df": 6, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.AnyValue": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.find_ancestor": {"tf": 1}}, "df": 1}}}}}}, "d": {"docs": {"sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.And": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}}, "df": 6}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1}}, "df": 2}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.tokens.TokenType.ANTI": {"tf": 1}}, "df": 1}}}, "s": {"docs": {"sqlglot.expressions.Condition.as_": {"tf": 1}}, "df": 1, "c": {"docs": {"sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}}, "df": 4}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.assert_is": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.tokens.TokenType.ASOF": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.Array": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}}, "df": 5, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.expressions.ArrayAgg": {"tf": 1}}, "df": 2}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.ArrayAll": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ArrayAny": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.ArrayOverlaps": {"tf": 1}}, "df": 1}}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ArrayConcat": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.ArrayContains": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.ArrayContained": {"tf": 1}}, "df": 1}}}}}}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.ArrayFilter": {"tf": 1}}, "df": 1}}}}}}, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.ArrayJoin": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.ArraySize": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ArraySort": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.ArraySum": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.ArrayUnionAgg": {"tf": 1}}, "df": 1}}}}}}}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}}, "df": 6}}}, "g": {"docs": {"sqlglot.expressions.Func.from_arg_list": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}}, "df": 2}}}, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}}, "df": 4}}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}}, "df": 1, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ApproxDistinct": {"tf": 1}}, "df": 1}}}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.ApproxQuantile": {"tf": 1}}, "df": 1}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.table.Table.append": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}}, "df": 2}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.expressions.Expression.add_comments": {"tf": 1}, "sqlglot.expressions.Add": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1}}, "df": 11, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.AddConstraint": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.AutoIncrementColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.AutoIncrementProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.AutoName": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {"sqlglot.tokens.TokenType.LT_AT": {"tf": 1}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1}}, "df": 2, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.AtTimeZone": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "b": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Abs": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}}, "df": 5}}}}}}}}}}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.AMP": {"tf": 1}}, "df": 1}}}, "w": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Where": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}}, "df": 7}}, "n": {"docs": {"sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.When": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}}, "df": 5}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.while_changing": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.expressions.With": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}}, "df": 13, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1}}, "df": 1, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.WithinGroup": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}}, "df": 2}}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.WithDataProperty": {"tf": 1}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.WithJournalTableProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dataframe.sql.Window": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Window": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}}, "df": 9, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dataframe.sql.WindowSpec": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.expressions.WindowSpec": {"tf": 1}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1}}, "df": 10}}}}}}}}, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.generator.Generator.text_width": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.errors.ErrorLevel.WARN": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 4}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.Week": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.WeekOfYear": {"tf": 1}}, "df": 1}}}}}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.generator.Generator.wrap": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1}}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.executor.context.Context.filter": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.expressions.Filter": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}}, "df": 6}}}, "l": {"docs": {"sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.FileFormatProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}}, "df": 4}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}}, "df": 7}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Final": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}}, "df": 2}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.normalize_func": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}}, "df": 10, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}}, "df": 3}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.FULL": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1}}, "df": 4, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}}, "df": 6}}}, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}}, "df": 1, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ForeignKey": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.simplify.flatten": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1}}, "df": 4}}}}}, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}}, "df": 2}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Floor": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.From": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}}, "df": 13, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"6": {"4": {"docs": {"sqlglot.expressions.FromBase64": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot.expressions.FromBase": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.FreespaceProperty": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Fetch": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}}, "df": 3}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.FallbackProperty": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.false": {"tf": 1}, "sqlglot.optimizer.simplify.is_false": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}}, "df": 3}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.FARROW": {"tf": 1}}, "df": 1}}}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Group": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1}}, "df": 6, "b": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.GroupedData": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}}, "df": 10}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.expressions.GroupConcat": {"tf": 1}}, "df": 2}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.expressions.Greatest": {"tf": 1}}, "df": 2}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.safeconcat_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.oncluster_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.unsupported": {"tf": 1}, "sqlglot.generator.Generator.sep": {"tf": 1}, "sqlglot.generator.Generator.seg": {"tf": 1}, "sqlglot.generator.Generator.pad_comment": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.generator.Generator.wrap": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.generator.Generator.normalize_func": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.createable_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.clone_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1}, "sqlglot.generator.Generator.returning_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1}, "sqlglot.generator.Generator.set_sql": {"tf": 1}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.null_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_columns_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.safeconcat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.comment_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.safedpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.text_width": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}, "sqlglot.generator.Generator.naked_property": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.oncluster_sql": {"tf": 1}, "sqlglot.generator.cached_generator": {"tf": 1}}, "df": 320}}, "e": {"docs": {"sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}}, "df": 5, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.expressions.GenerateSeries": {"tf": 1}}, "df": 2}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.expressions.GeneratedAsIdentityColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}}, "df": 3}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}, "t": {"docs": {"sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 8}, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}}, "df": 2}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}}, "df": 2}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.Glob": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}}, "df": 3, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {"sqlglot.expressions.GT": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1}}, "df": 4, "e": {"docs": {"sqlglot.expressions.GTE": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}}, "df": 3}}}, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.expressions.Join": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.planner.Join": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1}}, "df": 15, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JoinHint": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}}, "df": 2}}}}, "s": {"docs": {"sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}}, "df": 4}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.JournalProperty": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}}, "df": 4, "b": {"docs": {"sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.JSONBContains": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JSONBExtract": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.JSONBExtractScalar": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.JSONKeyValue": {"tf": 1}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1}}, "df": 2}}}}}}}}, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JSONObject": {"tf": 1}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1}}, "df": 2}}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JSONExtract": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.JSONExtractScalar": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JSONFormat": {"tf": 1}}, "df": 1}}}}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.union": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Union": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}}, "df": 12, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1}}, "df": 1}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Unionable": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}}, "df": 4}}}}, "b": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {"sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}}, "df": 1}, "q": {"docs": {"sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.UniqueColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.UnixToStr": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.UnixToTime": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.UnixToTimeStr": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.expressions.Unary": {"tf": 1}}, "df": 2}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 2}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Unnest": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}}, "df": 12}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}, "sqlglot.generator.Generator.unsupported": {"tf": 1}}, "df": 2, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.UnsupportedError": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Uncache": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}}, "df": 3}}}}}, "k": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}}, "df": 1}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.Unhex": {"tf": 1}}, "df": 1}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}}, "df": 1}}}}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.expressions.Update": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}}, "df": 7}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Upper": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.UppercaseColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.UDTF": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Use": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}}, "df": 3, "r": {"docs": {"sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1}}, "df": 1, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.UserDefinedFunction": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}}, "df": 2}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1}}, "df": 2}}}}}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1}}, "df": 2}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"1": {"2": {"8": {"docs": {"sqlglot.expressions.DataType.Type.UINT128": {"tf": 1}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"5": {"6": {"docs": {"sqlglot.expressions.DataType.Type.UINT256": {"tf": 1}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {"sqlglot.expressions.DataType.Type.UINT": {"tf": 1}, "sqlglot.tokens.TokenType.UINT": {"tf": 1}}, "df": 2}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1}}, "df": 2}}}}}}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}}, "df": 2}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Except": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}}, "df": 8, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1}}, "df": 1}}}}}}}, "p": {"docs": {"sqlglot.expressions.Exp": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.add_comments": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 46, "s": {"docs": {"sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}}, "df": 4}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Explode": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}}, "df": 3}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.expand": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.expressions.Extract": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1}}, "df": 7}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ExternalProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.ExecuteError": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ExecuteAsProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Exists": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}}, "df": 3}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}}, "df": 9}}}}, "d": {"docs": {"sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe.sql.Column.endswith": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Encode": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.EncodeColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.EngineProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.DataType.Type.ENUM": {"tf": 1}, "sqlglot.tokens.TokenType.ENUM": {"tf": 1}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}}, "df": 5}}}}}, "s": {"docs": {"sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}}, "df": 2}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor.context.Context.eval": {"tf": 1}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}}, "df": 3}}}, "q": {"docs": {"sqlglot.expressions.Condition.eq": {"tf": 1}, "sqlglot.expressions.EQ": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}}, "df": 5}, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Escape": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}}, "df": 3}}}}}, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 6}}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.schema.Schema.empty": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.transforms.epoch_cast_to_ts": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {"sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}}, "df": 9}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Repeat": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1}, "sqlglot.dialects.redshift.Redshift": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}}, "df": 8}}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Reduce": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}}, "df": 3, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.RenameTable": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1}}, "df": 3}}}}}}, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Return": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.expressions.ReturnsProperty": {"tf": 1}}, "df": 2}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Returning": {"tf": 1}, "sqlglot.generator.Generator.returning_sql": {"tf": 1}, "sqlglot.tokens.TokenType.RETURNING": {"tf": 1}}, "df": 4}}}}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1}}, "df": 9}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.env.reverse_key": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}}, "df": 2}}}}}, "f": {"docs": {"sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Reference": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.RespectNulls": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}}, "df": 5}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.parser.Parser.reset": {"tf": 1}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.expressions.ReadCSV": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.helper.csv_reader": {"tf": 1}}, "df": 1}}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.RegexpExtract": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.RegexpLike": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.RegexpILike": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.RegexpSplit": {"tf": 1}}, "df": 1}}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}}, "df": 1}}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}}, "df": 3}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.tokens.TokenType.ROWS": {"tf": 1}}, "df": 1, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}}, "df": 2}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.table.RowReader": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}}, "df": 2}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.RowFormatProperty": {"tf": 1}}, "df": 1}}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.RowFormatDelimitedProperty": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.RowFormatSerdeProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}}, "df": 2}}}}}}}, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.RowNumber": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.expressions.Rollback": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}}, "df": 4}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}}, "df": 4}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Round": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.parser.binary_range_parser": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}}, "df": 3, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}}, "df": 2}}}}}}}, "n": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.expressions.RangeN": {"tf": 1}}, "df": 2}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.table.RangeReader": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}}, "df": 2}}}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 3}}}, "w": {"docs": {"sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.RawString": {"tf": 1}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1}}, "df": 2}}}}}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.expressions.Right": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}}, "df": 3}}}}}, "l": {"docs": {"sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.expressions.Limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}}, "df": 11}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.expressions.Literal": {"tf": 1}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.optimizer.simplify.date_literal": {"tf": 1}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1}}, "df": 8, "s": {"docs": {"sqlglot.optimizer.simplify.simplify_literals": {"tf": 1}}, "df": 1}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.expressions.Condition.like": {"tf": 1}, "sqlglot.expressions.Like": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.parser.parse_like": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1}}, "df": 7, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LikeProperty": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LikeAny": {"tf": 1}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1}}, "df": 2}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.lineage.lineage": {"tf": 1}}, "df": 1, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}}, "df": 2}}}}}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}}, "df": 2, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.LastDateOfMonth": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.expressions.Lambda": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}}, "df": 3}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Lateral": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}}, "df": 4}}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LanguageProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "w": {"docs": {"sqlglot.optimizer.normalize.distributive_law": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}}, "df": 3}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}}, "df": 9, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LocationProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "k": {"docs": {"sqlglot.expressions.Lock": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LOCK": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LockingProperty": {"tf": 1}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD": {"tf": 1}}, "df": 3, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.expressions.LoadData": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}}, "df": 2}}}}}}, "g": {"1": {"0": {"docs": {"sqlglot.expressions.Log10": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "2": {"docs": {"sqlglot.expressions.Log2": {"tf": 1}}, "df": 1}, "docs": {"sqlglot.expressions.Log": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LogProperty": {"tf": 1}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.LogicalOr": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.LogicalAnd": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}}, "df": 2}}}}}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Lower": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.expressions.Left": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}}, "df": 3}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.expressions.Least": {"tf": 1}}, "df": 3}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Length": {"tf": 1}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Levenshtein": {"tf": 1}}, "df": 1}}}}}}}}}}, "t": {"docs": {"sqlglot.expressions.LT": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1}, "sqlglot.tokens.TokenType.LT_AT": {"tf": 1}}, "df": 4, "e": {"docs": {"sqlglot.expressions.LTE": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}}, "df": 3}}, "n": {"docs": {"sqlglot.expressions.Ln": {"tf": 1}}, "df": 1}, "r": {"docs": {"sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}}, "df": 1}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.expressions.Hint": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}}, "df": 5, "s": {"docs": {"sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}}, "df": 1}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1}, "sqlglot.dialects.hive.Hive": {"tf": 1}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}}, "df": 9}}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.expressions.Having": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}}, "df": 6}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}}, "df": 3}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.Hex": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.HexString": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}}, "df": 2}}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Hll": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}}, "df": 2}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}}, "df": 2}}}}}, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.lineage.Node.to_html": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}}, "df": 2}}}}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.expressions.Merge": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}}, "df": 8, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.MergeTreeTTL": {"tf": 1}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.MergeTreeTTLAction": {"tf": 1}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.MergeBlockRatioProperty": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}}, "df": 2}}}}}}}}}, "a": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.expressions.Max": {"tf": 1}}, "df": 3}, "p": {"docs": {"sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.Map": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}}, "df": 5, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}}, "df": 7}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.MatchRecognize": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.MatchAgainst": {"tf": 1}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.MaterializedProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "y": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.expressions.Min": {"tf": 1}}, "df": 3}}, "o": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.expressions.Mod": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}}, "df": 4, "e": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}}, "df": 7}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}}, "df": 2}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Month": {"tf": 1}}, "df": 1}}}}, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1}, "sqlglot.dialects.mysql.MySQL": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}}, "df": 6}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Mul": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}}, "df": 2}}, "d": {"5": {"docs": {"sqlglot.expressions.MD5": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.Binary": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.parser.binary_range_parser": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}}, "df": 7}}}}, "g": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.bigquery.BigQuery": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1}}, "df": 13}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1}}, "df": 2}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}}, "df": 2}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}}, "df": 2}}}}}}}, "t": {"docs": {"sqlglot.expressions.DataType.Type.BIT": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BIT": {"tf": 1}}, "df": 3, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.BitString": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}}, "df": 2}}}}}}, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.BitwiseAnd": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.BitwiseLeftShift": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.BitwiseOr": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}}, "df": 2}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.BitwiseRightShift": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}}, "df": 2}}}}}}}}}}, "x": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.BitwiseXor": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}}, "df": 2}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.BitwiseNot": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Between": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}}, "df": 6}}}}}, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.BEGIN": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.bfs": {"tf": 1}}, "df": 1}}, "y": {"docs": {"sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}}, "df": 8, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.ByteString": {"tf": 1}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.BlockCompressionProperty": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Boolean": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}}, "df": 6}}}}}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}}, "df": 3}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Bracket": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}}, "df": 4}}}, "e": {"docs": {"sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.optimizer.scope.Scope.branch": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.tokens.TokenType.BREAK": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}}, "df": 16, "s": {"docs": {"sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1}}, "df": 7}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.National": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1}}, "df": 3}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.NATURAL": {"tf": 1}}, "df": 1}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.naked_property": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Null": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator.null_sql": {"tf": 1}, "sqlglot.optimizer.simplify.is_null": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}}, "df": 7, "s": {"docs": {"sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}}, "df": 5, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "q": {"docs": {"sqlglot.expressions.NullSafeEQ": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "q": {"docs": {"sqlglot.expressions.NullSafeNEQ": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}}, "df": 2}}}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}}, "df": 2}}}}}}, "m": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}}, "df": 4, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.NumberToStr": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1}}, "df": 2}}}}}}}}}}}}, "o": {"docs": {"sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}}, "df": 10, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.generator.Generator.normalize_func": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}}, "df": 5, "d": {"docs": {"sqlglot.optimizer.normalize.normalized": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 1}}}}}}}}}}}, "t": {"docs": {"sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Not": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}}, "df": 6, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.NotNullColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.NoPrimaryIndexProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.lineage.Node": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 3}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}}, "df": 4}}}}, "q": {"docs": {"sqlglot.expressions.Condition.neq": {"tf": 1}, "sqlglot.expressions.NEQ": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}}, "df": 4}, "g": {"docs": {"sqlglot.expressions.Neg": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}}, "df": 2}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}}, "df": 2, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.NextValueFor": {"tf": 1}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}}, "df": 2}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}}, "df": 2}}}}}}, "l": {"2": {"docs": {"sqlglot.expressions.Nvl2": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.expressions.Var": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 7, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}}, "df": 2}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}}, "df": 2}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Variance": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.VariancePop": {"tf": 1}}, "df": 1}}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.VarMap": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.expressions.Values": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}}, "df": 5}}}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 2}}}}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.VolatileProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.VIEW": {"tf": 1}}, "df": 1}}}}, "x": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}}, "df": 2, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.expressions.XMLTable": {"tf": 1}}, "df": 2}}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.diff.Keep": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.tokens.TokenType.KEEP": {"tf": 1}}, "df": 3}}, "y": {"docs": {"sqlglot.executor.env.reverse_key": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}}, "df": 4}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}}, "df": 2}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Qualify": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 9}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Quantile": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.query_modifiers": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}}, "df": 2}}}}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Year": {"tf": 1}}, "df": 1}}}}}}, "fullname": {"root": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 52, "s": {"docs": {"sqlglot.planner.Step.to_s": {"tf": 1}}, "df": 1, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dataframe.sql.Column": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.dataframe.sql.Window": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.safeconcat_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.oncluster_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.concat_to_dpipe_sql": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.createable_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.clone_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1}, "sqlglot.generator.Generator.returning_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1}, "sqlglot.generator.Generator.set_sql": {"tf": 1}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.null_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_columns_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.safeconcat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.comment_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.safedpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.oncluster_sql": {"tf": 1}}, "df": 393, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.pretty": {"tf": 1}, "sqlglot.schema": {"tf": 1}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dataframe.sql.Column": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.dataframe.sql.Window": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.clickhouse": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.safeconcat_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.oncluster_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.databricks": {"tf": 1}, "sqlglot.dialects.databricks.Databricks": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Tokenizer": {"tf": 1}, "sqlglot.dialects.dialect": {"tf": 1}, "sqlglot.dialects.dialect.Dialects": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialect": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1}, "sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.concat_to_dpipe_sql": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.dialects.drill": {"tf": 1}, "sqlglot.dialects.drill.Drill": {"tf": 1}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.dialects.duckdb": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.dialects.hive": {"tf": 1}, "sqlglot.dialects.hive.Hive": {"tf": 1}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.dialects.mysql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.oracle": {"tf": 1}, "sqlglot.dialects.oracle.Oracle": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres": {"tf": 1}, "sqlglot.dialects.postgres.Postgres": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto": {"tf": 1}, "sqlglot.dialects.presto.Presto": {"tf": 1}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.dialects.redshift": {"tf": 1}, "sqlglot.dialects.redshift.Redshift": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark": {"tf": 1}, "sqlglot.dialects.spark.Spark": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.spark2": {"tf": 1}, "sqlglot.dialects.spark2.Spark2": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.starrocks": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau": {"tf": 1}, "sqlglot.dialects.tableau.Tableau": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata": {"tf": 1}, "sqlglot.dialects.teradata.Teradata": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Tokenizer": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.trino": {"tf": 1}, "sqlglot.dialects.trino.Trino": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.errors": {"tf": 1}, "sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}, "sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.context.Context.eval": {"tf": 1}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 1}, "sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.executor.context.Context.filter": {"tf": 1}, "sqlglot.executor.context.Context.sort": {"tf": 1}, "sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.executor.env": {"tf": 1}, "sqlglot.executor.env.reverse_key": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.env.str_position": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1}, "sqlglot.executor.env.cast": {"tf": 1}, "sqlglot.executor.env.ordered": {"tf": 1}, "sqlglot.executor.env.interval": {"tf": 1}, "sqlglot.executor.python": {"tf": 1}, "sqlglot.executor.python.PythonExecutor": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.static": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.executor.python.Python": {"tf": 1}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.executor.table": {"tf": 1}, "sqlglot.executor.table.Table": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.append": {"tf": 1}, "sqlglot.executor.table.Table.pop": {"tf": 1}, "sqlglot.executor.table.TableIter": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}, "sqlglot.executor.table.RangeReader": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1}, "sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.add_comments": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Condition": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Condition.like": {"tf": 1}, "sqlglot.expressions.Condition.ilike": {"tf": 1}, "sqlglot.expressions.Condition.eq": {"tf": 1}, "sqlglot.expressions.Condition.neq": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1}, "sqlglot.expressions.Predicate": {"tf": 1}, "sqlglot.expressions.DerivedTable": {"tf": 1}, "sqlglot.expressions.Unionable": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.UDTF": {"tf": 1}, "sqlglot.expressions.Cache": {"tf": 1}, "sqlglot.expressions.Uncache": {"tf": 1}, "sqlglot.expressions.Create": {"tf": 1}, "sqlglot.expressions.Clone": {"tf": 1}, "sqlglot.expressions.Describe": {"tf": 1}, "sqlglot.expressions.Pragma": {"tf": 1}, "sqlglot.expressions.Set": {"tf": 1}, "sqlglot.expressions.SetItem": {"tf": 1}, "sqlglot.expressions.Show": {"tf": 1}, "sqlglot.expressions.UserDefinedFunction": {"tf": 1}, "sqlglot.expressions.CharacterSet": {"tf": 1}, "sqlglot.expressions.With": {"tf": 1}, "sqlglot.expressions.WithinGroup": {"tf": 1}, "sqlglot.expressions.CTE": {"tf": 1}, "sqlglot.expressions.TableAlias": {"tf": 1}, "sqlglot.expressions.BitString": {"tf": 1}, "sqlglot.expressions.HexString": {"tf": 1}, "sqlglot.expressions.ByteString": {"tf": 1}, "sqlglot.expressions.RawString": {"tf": 1}, "sqlglot.expressions.Column": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.ColumnPosition": {"tf": 1}, "sqlglot.expressions.ColumnDef": {"tf": 1}, "sqlglot.expressions.AlterColumn": {"tf": 1}, "sqlglot.expressions.RenameTable": {"tf": 1}, "sqlglot.expressions.SetTag": {"tf": 1}, "sqlglot.expressions.Comment": {"tf": 1}, "sqlglot.expressions.MergeTreeTTLAction": {"tf": 1}, "sqlglot.expressions.MergeTreeTTL": {"tf": 1}, "sqlglot.expressions.ColumnConstraint": {"tf": 1}, "sqlglot.expressions.ColumnConstraintKind": {"tf": 1}, "sqlglot.expressions.AutoIncrementColumnConstraint": {"tf": 1}, "sqlglot.expressions.CaseSpecificColumnConstraint": {"tf": 1}, "sqlglot.expressions.CharacterSetColumnConstraint": {"tf": 1}, "sqlglot.expressions.CheckColumnConstraint": {"tf": 1}, "sqlglot.expressions.CollateColumnConstraint": {"tf": 1}, "sqlglot.expressions.CommentColumnConstraint": {"tf": 1}, "sqlglot.expressions.CompressColumnConstraint": {"tf": 1}, "sqlglot.expressions.DateFormatColumnConstraint": {"tf": 1}, "sqlglot.expressions.DefaultColumnConstraint": {"tf": 1}, "sqlglot.expressions.EncodeColumnConstraint": {"tf": 1}, "sqlglot.expressions.GeneratedAsIdentityColumnConstraint": {"tf": 1}, "sqlglot.expressions.InlineLengthColumnConstraint": {"tf": 1}, "sqlglot.expressions.NotNullColumnConstraint": {"tf": 1}, "sqlglot.expressions.OnUpdateColumnConstraint": {"tf": 1}, "sqlglot.expressions.PrimaryKeyColumnConstraint": {"tf": 1}, "sqlglot.expressions.TitleColumnConstraint": {"tf": 1}, "sqlglot.expressions.UniqueColumnConstraint": {"tf": 1}, "sqlglot.expressions.UppercaseColumnConstraint": {"tf": 1}, "sqlglot.expressions.PathColumnConstraint": {"tf": 1}, "sqlglot.expressions.Constraint": {"tf": 1}, "sqlglot.expressions.Delete": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Drop": {"tf": 1}, "sqlglot.expressions.Filter": {"tf": 1}, "sqlglot.expressions.Check": {"tf": 1}, "sqlglot.expressions.Directory": {"tf": 1}, "sqlglot.expressions.ForeignKey": {"tf": 1}, "sqlglot.expressions.PrimaryKey": {"tf": 1}, "sqlglot.expressions.Into": {"tf": 1}, "sqlglot.expressions.From": {"tf": 1}, "sqlglot.expressions.Having": {"tf": 1}, "sqlglot.expressions.Hint": {"tf": 1}, "sqlglot.expressions.JoinHint": {"tf": 1}, "sqlglot.expressions.Identifier": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Index": {"tf": 1}, "sqlglot.expressions.Insert": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.OnConflict": {"tf": 1}, "sqlglot.expressions.Returning": {"tf": 1}, "sqlglot.expressions.Introducer": {"tf": 1}, "sqlglot.expressions.National": {"tf": 1}, "sqlglot.expressions.LoadData": {"tf": 1}, "sqlglot.expressions.Partition": {"tf": 1}, "sqlglot.expressions.Fetch": {"tf": 1}, "sqlglot.expressions.Group": {"tf": 1}, "sqlglot.expressions.Lambda": {"tf": 1}, "sqlglot.expressions.Limit": {"tf": 1}, "sqlglot.expressions.Literal": {"tf": 1}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Lateral": {"tf": 1}, "sqlglot.expressions.MatchRecognize": {"tf": 1}, "sqlglot.expressions.Final": {"tf": 1}, "sqlglot.expressions.Offset": {"tf": 1}, "sqlglot.expressions.Order": {"tf": 1}, "sqlglot.expressions.Cluster": {"tf": 1}, "sqlglot.expressions.Distribute": {"tf": 1}, "sqlglot.expressions.Sort": {"tf": 1}, "sqlglot.expressions.Ordered": {"tf": 1}, "sqlglot.expressions.Property": {"tf": 1}, "sqlglot.expressions.AlgorithmProperty": {"tf": 1}, "sqlglot.expressions.AutoIncrementProperty": {"tf": 1}, "sqlglot.expressions.BlockCompressionProperty": {"tf": 1}, "sqlglot.expressions.CharacterSetProperty": {"tf": 1}, "sqlglot.expressions.ChecksumProperty": {"tf": 1}, "sqlglot.expressions.CollateProperty": {"tf": 1}, "sqlglot.expressions.DataBlocksizeProperty": {"tf": 1}, "sqlglot.expressions.DefinerProperty": {"tf": 1}, "sqlglot.expressions.DistKeyProperty": {"tf": 1}, "sqlglot.expressions.DistStyleProperty": {"tf": 1}, "sqlglot.expressions.EngineProperty": {"tf": 1}, "sqlglot.expressions.ToTableProperty": {"tf": 1}, "sqlglot.expressions.ExecuteAsProperty": {"tf": 1}, "sqlglot.expressions.ExternalProperty": {"tf": 1}, "sqlglot.expressions.FallbackProperty": {"tf": 1}, "sqlglot.expressions.FileFormatProperty": {"tf": 1}, "sqlglot.expressions.FreespaceProperty": {"tf": 1}, "sqlglot.expressions.InputOutputFormat": {"tf": 1}, "sqlglot.expressions.IsolatedLoadingProperty": {"tf": 1}, "sqlglot.expressions.JournalProperty": {"tf": 1}, "sqlglot.expressions.LanguageProperty": {"tf": 1}, "sqlglot.expressions.DictProperty": {"tf": 1}, "sqlglot.expressions.DictSubProperty": {"tf": 1}, "sqlglot.expressions.DictRange": {"tf": 1}, "sqlglot.expressions.OnCluster": {"tf": 1}, "sqlglot.expressions.LikeProperty": {"tf": 1}, "sqlglot.expressions.LocationProperty": {"tf": 1}, "sqlglot.expressions.LockingProperty": {"tf": 1}, "sqlglot.expressions.LogProperty": {"tf": 1}, "sqlglot.expressions.MaterializedProperty": {"tf": 1}, "sqlglot.expressions.MergeBlockRatioProperty": {"tf": 1}, "sqlglot.expressions.NoPrimaryIndexProperty": {"tf": 1}, "sqlglot.expressions.OnCommitProperty": {"tf": 1}, "sqlglot.expressions.PartitionedByProperty": {"tf": 1}, "sqlglot.expressions.ReturnsProperty": {"tf": 1}, "sqlglot.expressions.RowFormatProperty": {"tf": 1}, "sqlglot.expressions.RowFormatDelimitedProperty": {"tf": 1}, "sqlglot.expressions.RowFormatSerdeProperty": {"tf": 1}, "sqlglot.expressions.SchemaCommentProperty": {"tf": 1}, "sqlglot.expressions.SerdeProperties": {"tf": 1}, "sqlglot.expressions.SetProperty": {"tf": 1}, "sqlglot.expressions.SettingsProperty": {"tf": 1}, "sqlglot.expressions.SortKeyProperty": {"tf": 1}, "sqlglot.expressions.SqlSecurityProperty": {"tf": 1}, "sqlglot.expressions.StabilityProperty": {"tf": 1}, "sqlglot.expressions.TemporaryProperty": {"tf": 1}, "sqlglot.expressions.TransientProperty": {"tf": 1}, "sqlglot.expressions.VolatileProperty": {"tf": 1}, "sqlglot.expressions.WithDataProperty": {"tf": 1}, "sqlglot.expressions.WithJournalTableProperty": {"tf": 1}, "sqlglot.expressions.Properties": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.expressions.Qualify": {"tf": 1}, "sqlglot.expressions.Return": {"tf": 1}, "sqlglot.expressions.Reference": {"tf": 1}, "sqlglot.expressions.Tuple": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.Subqueryable": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Table": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.SystemTime": {"tf": 1}, "sqlglot.expressions.Union": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Except": {"tf": 1}, "sqlglot.expressions.Intersect": {"tf": 1}, "sqlglot.expressions.Unnest": {"tf": 1}, "sqlglot.expressions.Update": {"tf": 1}, "sqlglot.expressions.Values": {"tf": 1}, "sqlglot.expressions.Var": {"tf": 1}, "sqlglot.expressions.Schema": {"tf": 1}, "sqlglot.expressions.Lock": {"tf": 1}, "sqlglot.expressions.Select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.TableSample": {"tf": 1}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Pivot": {"tf": 1}, "sqlglot.expressions.Window": {"tf": 1}, "sqlglot.expressions.WindowSpec": {"tf": 1}, "sqlglot.expressions.Where": {"tf": 1}, "sqlglot.expressions.Star": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Parameter": {"tf": 1}, "sqlglot.expressions.SessionParameter": {"tf": 1}, "sqlglot.expressions.Placeholder": {"tf": 1}, "sqlglot.expressions.Null": {"tf": 1}, "sqlglot.expressions.Boolean": {"tf": 1}, "sqlglot.expressions.DataTypeSize": {"tf": 1}, "sqlglot.expressions.DataType": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1}, "sqlglot.expressions.DataType.Type.ENUM": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INET": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SET": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.PseudoType": {"tf": 1}, "sqlglot.expressions.SubqueryPredicate": {"tf": 1}, "sqlglot.expressions.All": {"tf": 1}, "sqlglot.expressions.Any": {"tf": 1}, "sqlglot.expressions.Exists": {"tf": 1}, "sqlglot.expressions.Command": {"tf": 1}, "sqlglot.expressions.Transaction": {"tf": 1}, "sqlglot.expressions.Commit": {"tf": 1}, "sqlglot.expressions.Rollback": {"tf": 1}, "sqlglot.expressions.AlterTable": {"tf": 1}, "sqlglot.expressions.AddConstraint": {"tf": 1}, "sqlglot.expressions.DropPartition": {"tf": 1}, "sqlglot.expressions.Binary": {"tf": 1}, "sqlglot.expressions.Add": {"tf": 1}, "sqlglot.expressions.Connector": {"tf": 1}, "sqlglot.expressions.And": {"tf": 1}, "sqlglot.expressions.Or": {"tf": 1}, "sqlglot.expressions.BitwiseAnd": {"tf": 1}, "sqlglot.expressions.BitwiseLeftShift": {"tf": 1}, "sqlglot.expressions.BitwiseOr": {"tf": 1}, "sqlglot.expressions.BitwiseRightShift": {"tf": 1}, "sqlglot.expressions.BitwiseXor": {"tf": 1}, "sqlglot.expressions.Div": {"tf": 1}, "sqlglot.expressions.Overlaps": {"tf": 1}, "sqlglot.expressions.Dot": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.expressions.DPipe": {"tf": 1}, "sqlglot.expressions.SafeDPipe": {"tf": 1}, "sqlglot.expressions.EQ": {"tf": 1}, "sqlglot.expressions.NullSafeEQ": {"tf": 1}, "sqlglot.expressions.NullSafeNEQ": {"tf": 1}, "sqlglot.expressions.Distance": {"tf": 1}, "sqlglot.expressions.Escape": {"tf": 1}, "sqlglot.expressions.Glob": {"tf": 1}, "sqlglot.expressions.GT": {"tf": 1}, "sqlglot.expressions.GTE": {"tf": 1}, "sqlglot.expressions.ILike": {"tf": 1}, "sqlglot.expressions.ILikeAny": {"tf": 1}, "sqlglot.expressions.IntDiv": {"tf": 1}, "sqlglot.expressions.Is": {"tf": 1}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.Like": {"tf": 1}, "sqlglot.expressions.LikeAny": {"tf": 1}, "sqlglot.expressions.LT": {"tf": 1}, "sqlglot.expressions.LTE": {"tf": 1}, "sqlglot.expressions.Mod": {"tf": 1}, "sqlglot.expressions.Mul": {"tf": 1}, "sqlglot.expressions.NEQ": {"tf": 1}, "sqlglot.expressions.SimilarTo": {"tf": 1}, "sqlglot.expressions.Slice": {"tf": 1}, "sqlglot.expressions.Sub": {"tf": 1}, "sqlglot.expressions.ArrayOverlaps": {"tf": 1}, "sqlglot.expressions.Unary": {"tf": 1}, "sqlglot.expressions.BitwiseNot": {"tf": 1}, "sqlglot.expressions.Not": {"tf": 1}, "sqlglot.expressions.Paren": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Neg": {"tf": 1}, "sqlglot.expressions.Alias": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Aliases": {"tf": 1}, "sqlglot.expressions.AtTimeZone": {"tf": 1}, "sqlglot.expressions.Between": {"tf": 1}, "sqlglot.expressions.Bracket": {"tf": 1}, "sqlglot.expressions.Distinct": {"tf": 1}, "sqlglot.expressions.In": {"tf": 1}, "sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}, "sqlglot.expressions.Interval": {"tf": 1}, "sqlglot.expressions.IgnoreNulls": {"tf": 1}, "sqlglot.expressions.RespectNulls": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.expressions.AggFunc": {"tf": 1}, "sqlglot.expressions.ParameterizedAgg": {"tf": 1}, "sqlglot.expressions.Abs": {"tf": 1}, "sqlglot.expressions.Anonymous": {"tf": 1}, "sqlglot.expressions.Hll": {"tf": 1}, "sqlglot.expressions.ApproxDistinct": {"tf": 1}, "sqlglot.expressions.Array": {"tf": 1}, "sqlglot.expressions.ToChar": {"tf": 1}, "sqlglot.expressions.GenerateSeries": {"tf": 1}, "sqlglot.expressions.ArrayAgg": {"tf": 1}, "sqlglot.expressions.ArrayAll": {"tf": 1}, "sqlglot.expressions.ArrayAny": {"tf": 1}, "sqlglot.expressions.ArrayConcat": {"tf": 1}, "sqlglot.expressions.ArrayContains": {"tf": 1}, "sqlglot.expressions.ArrayContained": {"tf": 1}, "sqlglot.expressions.ArrayFilter": {"tf": 1}, "sqlglot.expressions.ArrayJoin": {"tf": 1}, "sqlglot.expressions.ArraySize": {"tf": 1}, "sqlglot.expressions.ArraySort": {"tf": 1}, "sqlglot.expressions.ArraySum": {"tf": 1}, "sqlglot.expressions.ArrayUnionAgg": {"tf": 1}, "sqlglot.expressions.Avg": {"tf": 1}, "sqlglot.expressions.AnyValue": {"tf": 1}, "sqlglot.expressions.Case": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.Cast": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.expressions.CastToStrType": {"tf": 1}, "sqlglot.expressions.Collate": {"tf": 1}, "sqlglot.expressions.TryCast": {"tf": 1}, "sqlglot.expressions.Ceil": {"tf": 1}, "sqlglot.expressions.Coalesce": {"tf": 1}, "sqlglot.expressions.Concat": {"tf": 1}, "sqlglot.expressions.SafeConcat": {"tf": 1}, "sqlglot.expressions.ConcatWs": {"tf": 1}, "sqlglot.expressions.Count": {"tf": 1}, "sqlglot.expressions.CountIf": {"tf": 1}, "sqlglot.expressions.CurrentDate": {"tf": 1}, "sqlglot.expressions.CurrentDatetime": {"tf": 1}, "sqlglot.expressions.CurrentTime": {"tf": 1}, "sqlglot.expressions.CurrentTimestamp": {"tf": 1}, "sqlglot.expressions.CurrentUser": {"tf": 1}, "sqlglot.expressions.DateAdd": {"tf": 1}, "sqlglot.expressions.DateSub": {"tf": 1}, "sqlglot.expressions.DateDiff": {"tf": 1}, "sqlglot.expressions.DateTrunc": {"tf": 1}, "sqlglot.expressions.DatetimeAdd": {"tf": 1}, "sqlglot.expressions.DatetimeSub": {"tf": 1}, "sqlglot.expressions.DatetimeDiff": {"tf": 1}, "sqlglot.expressions.DatetimeTrunc": {"tf": 1}, "sqlglot.expressions.DayOfWeek": {"tf": 1}, "sqlglot.expressions.DayOfMonth": {"tf": 1}, "sqlglot.expressions.DayOfYear": {"tf": 1}, "sqlglot.expressions.WeekOfYear": {"tf": 1}, "sqlglot.expressions.LastDateOfMonth": {"tf": 1}, "sqlglot.expressions.Extract": {"tf": 1}, "sqlglot.expressions.TimestampAdd": {"tf": 1}, "sqlglot.expressions.TimestampSub": {"tf": 1}, "sqlglot.expressions.TimestampDiff": {"tf": 1}, "sqlglot.expressions.TimestampTrunc": {"tf": 1}, "sqlglot.expressions.TimeAdd": {"tf": 1}, "sqlglot.expressions.TimeSub": {"tf": 1}, "sqlglot.expressions.TimeDiff": {"tf": 1}, "sqlglot.expressions.TimeTrunc": {"tf": 1}, "sqlglot.expressions.DateFromParts": {"tf": 1}, "sqlglot.expressions.DateStrToDate": {"tf": 1}, "sqlglot.expressions.DateToDateStr": {"tf": 1}, "sqlglot.expressions.DateToDi": {"tf": 1}, "sqlglot.expressions.Date": {"tf": 1}, "sqlglot.expressions.Day": {"tf": 1}, "sqlglot.expressions.Decode": {"tf": 1}, "sqlglot.expressions.DiToDate": {"tf": 1}, "sqlglot.expressions.Encode": {"tf": 1}, "sqlglot.expressions.Exp": {"tf": 1}, "sqlglot.expressions.Explode": {"tf": 1}, "sqlglot.expressions.Floor": {"tf": 1}, "sqlglot.expressions.FromBase64": {"tf": 1}, "sqlglot.expressions.ToBase64": {"tf": 1}, "sqlglot.expressions.Greatest": {"tf": 1}, "sqlglot.expressions.GroupConcat": {"tf": 1}, "sqlglot.expressions.Hex": {"tf": 1}, "sqlglot.expressions.If": {"tf": 1}, "sqlglot.expressions.Initcap": {"tf": 1}, "sqlglot.expressions.JSONKeyValue": {"tf": 1}, "sqlglot.expressions.JSONObject": {"tf": 1}, "sqlglot.expressions.OpenJSONColumnDef": {"tf": 1}, "sqlglot.expressions.OpenJSON": {"tf": 1}, "sqlglot.expressions.JSONBContains": {"tf": 1}, "sqlglot.expressions.JSONExtract": {"tf": 1}, "sqlglot.expressions.JSONExtractScalar": {"tf": 1}, "sqlglot.expressions.JSONBExtract": {"tf": 1}, "sqlglot.expressions.JSONBExtractScalar": {"tf": 1}, "sqlglot.expressions.JSONFormat": {"tf": 1}, "sqlglot.expressions.Least": {"tf": 1}, "sqlglot.expressions.Left": {"tf": 1}, "sqlglot.expressions.Right": {"tf": 1}, "sqlglot.expressions.Length": {"tf": 1}, "sqlglot.expressions.Levenshtein": {"tf": 1}, "sqlglot.expressions.Ln": {"tf": 1}, "sqlglot.expressions.Log": {"tf": 1}, "sqlglot.expressions.Log2": {"tf": 1}, "sqlglot.expressions.Log10": {"tf": 1}, "sqlglot.expressions.LogicalOr": {"tf": 1}, "sqlglot.expressions.LogicalAnd": {"tf": 1}, "sqlglot.expressions.Lower": {"tf": 1}, "sqlglot.expressions.Map": {"tf": 1}, "sqlglot.expressions.StarMap": {"tf": 1}, "sqlglot.expressions.VarMap": {"tf": 1}, "sqlglot.expressions.MatchAgainst": {"tf": 1}, "sqlglot.expressions.Max": {"tf": 1}, "sqlglot.expressions.MD5": {"tf": 1}, "sqlglot.expressions.Min": {"tf": 1}, "sqlglot.expressions.Month": {"tf": 1}, "sqlglot.expressions.Nvl2": {"tf": 1}, "sqlglot.expressions.Posexplode": {"tf": 1}, "sqlglot.expressions.Pow": {"tf": 1}, "sqlglot.expressions.PercentileCont": {"tf": 1}, "sqlglot.expressions.PercentileDisc": {"tf": 1}, "sqlglot.expressions.Quantile": {"tf": 1}, "sqlglot.expressions.ApproxQuantile": {"tf": 1}, "sqlglot.expressions.RangeN": {"tf": 1}, "sqlglot.expressions.ReadCSV": {"tf": 1}, "sqlglot.expressions.Reduce": {"tf": 1}, "sqlglot.expressions.RegexpExtract": {"tf": 1}, "sqlglot.expressions.RegexpLike": {"tf": 1}, "sqlglot.expressions.RegexpILike": {"tf": 1}, "sqlglot.expressions.RegexpSplit": {"tf": 1}, "sqlglot.expressions.Repeat": {"tf": 1}, "sqlglot.expressions.Round": {"tf": 1}, "sqlglot.expressions.RowNumber": {"tf": 1}, "sqlglot.expressions.SafeDivide": {"tf": 1}, "sqlglot.expressions.SetAgg": {"tf": 1}, "sqlglot.expressions.SHA": {"tf": 1}, "sqlglot.expressions.SHA2": {"tf": 1}, "sqlglot.expressions.SortArray": {"tf": 1}, "sqlglot.expressions.Split": {"tf": 1}, "sqlglot.expressions.Substring": {"tf": 1}, "sqlglot.expressions.StandardHash": {"tf": 1}, "sqlglot.expressions.StrPosition": {"tf": 1}, "sqlglot.expressions.StrToDate": {"tf": 1}, "sqlglot.expressions.StrToTime": {"tf": 1}, "sqlglot.expressions.StrToUnix": {"tf": 1}, "sqlglot.expressions.NumberToStr": {"tf": 1}, "sqlglot.expressions.FromBase": {"tf": 1}, "sqlglot.expressions.Struct": {"tf": 1}, "sqlglot.expressions.StructExtract": {"tf": 1}, "sqlglot.expressions.Sum": {"tf": 1}, "sqlglot.expressions.Sqrt": {"tf": 1}, "sqlglot.expressions.Stddev": {"tf": 1}, "sqlglot.expressions.StddevPop": {"tf": 1}, "sqlglot.expressions.StddevSamp": {"tf": 1}, "sqlglot.expressions.TimeToStr": {"tf": 1}, "sqlglot.expressions.TimeToTimeStr": {"tf": 1}, "sqlglot.expressions.TimeToUnix": {"tf": 1}, "sqlglot.expressions.TimeStrToDate": {"tf": 1}, "sqlglot.expressions.TimeStrToTime": {"tf": 1}, "sqlglot.expressions.TimeStrToUnix": {"tf": 1}, "sqlglot.expressions.Trim": {"tf": 1}, "sqlglot.expressions.TsOrDsAdd": {"tf": 1}, "sqlglot.expressions.TsOrDsToDateStr": {"tf": 1}, "sqlglot.expressions.TsOrDsToDate": {"tf": 1}, "sqlglot.expressions.TsOrDiToDi": {"tf": 1}, "sqlglot.expressions.Unhex": {"tf": 1}, "sqlglot.expressions.UnixToStr": {"tf": 1}, "sqlglot.expressions.UnixToTime": {"tf": 1}, "sqlglot.expressions.UnixToTimeStr": {"tf": 1}, "sqlglot.expressions.Upper": {"tf": 1}, "sqlglot.expressions.Variance": {"tf": 1}, "sqlglot.expressions.VariancePop": {"tf": 1}, "sqlglot.expressions.Week": {"tf": 1}, "sqlglot.expressions.XMLTable": {"tf": 1}, "sqlglot.expressions.Year": {"tf": 1}, "sqlglot.expressions.Use": {"tf": 1}, "sqlglot.expressions.Merge": {"tf": 1}, "sqlglot.expressions.When": {"tf": 1}, "sqlglot.expressions.NextValueFor": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.expressions.false": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.unsupported": {"tf": 1}, "sqlglot.generator.Generator.sep": {"tf": 1}, "sqlglot.generator.Generator.seg": {"tf": 1}, "sqlglot.generator.Generator.pad_comment": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.generator.Generator.wrap": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.generator.Generator.normalize_func": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.createable_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.clone_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1}, "sqlglot.generator.Generator.returning_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1}, "sqlglot.generator.Generator.set_sql": {"tf": 1}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.null_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_columns_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.safeconcat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.comment_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.safedpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.text_width": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}, "sqlglot.generator.Generator.naked_property": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.oncluster_sql": {"tf": 1}, "sqlglot.generator.cached_generator": {"tf": 1}, "sqlglot.helper": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.lineage": {"tf": 1}, "sqlglot.lineage.Node": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer": {"tf": 1}, "sqlglot.optimizer.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1}, "sqlglot.optimizer.canonicalize": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}, "sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalized": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.optimizer.optimizer": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}, "sqlglot.optimizer.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1}, "sqlglot.optimizer.simplify.flatten": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_parens": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.optimizer.simplify.always_true": {"tf": 1}, "sqlglot.optimizer.simplify.is_complement": {"tf": 1}, "sqlglot.optimizer.simplify.is_false": {"tf": 1}, "sqlglot.optimizer.simplify.is_null": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1}, "sqlglot.optimizer.simplify.date_literal": {"tf": 1}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}, "sqlglot.parser": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.parser.parse_like": {"tf": 1}, "sqlglot.parser.binary_range_parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.parser.Parser.reset": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner": {"tf": 1}, "sqlglot.planner.Plan": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}, "sqlglot.planner.Step": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}, "sqlglot.planner.Scan": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.Aggregate": {"tf": 1}, "sqlglot.planner.Sort": {"tf": 1}, "sqlglot.planner.SetOperation": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.Schema.empty": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}, "sqlglot.serde": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.time": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens": {"tf": 1}, "sqlglot.tokens.TokenType": {"tf": 1}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}, "sqlglot.tokens.TokenType.DASH": {"tf": 1}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1}, "sqlglot.tokens.TokenType.COLON": {"tf": 1}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}, "sqlglot.tokens.TokenType.AMP": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1}, "sqlglot.tokens.TokenType.CARET": {"tf": 1}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LT_AT": {"tf": 1}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.DAMP": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.DATABASE": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BIT": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}, "sqlglot.tokens.TokenType.UINT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT128": {"tf": 1}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1}, "sqlglot.tokens.TokenType.INT256": {"tf": 1}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}, "sqlglot.tokens.TokenType.INET": {"tf": 1}, "sqlglot.tokens.TokenType.ENUM": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}, "sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.tokens.TokenType.FULL": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}, "sqlglot.tokens.TokenType.INNER": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1}, "sqlglot.tokens.TokenType.KEEP": {"tf": 1}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD": {"tf": 1}, "sqlglot.tokens.TokenType.LOCK": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}, "sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}, "sqlglot.tokens.TokenType.PRAGMA": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}, "sqlglot.tokens.TokenType.RETURNING": {"tf": 1}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}, "sqlglot.tokens.TokenType.SETTINGS": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.tokens.TokenType.SOME": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}, "sqlglot.tokens.TokenType.TOP": {"tf": 1}, "sqlglot.tokens.TokenType.THEN": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}, "sqlglot.tokens.Token": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.tokens.Tokenizer": {"tf": 1}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.tokens.Tokenizer.peek": {"tf": 1}, "sqlglot.transforms": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 1866, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.SqlglotError": {"tf": 1}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1}, "sqlglot.dialects.sqlite": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1.4142135623730951}}, "df": 11}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SqlSecurityProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Sqrt": {"tf": 1}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Schema": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_columns_sql": {"tf": 1}, "sqlglot.schema.Schema": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.empty": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1.4142135623730951}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}}, "df": 29, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.SchemaError": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SchemaCommentProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}}, "df": 1}}}, "n": {"docs": {"sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.planner.Scan": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}}, "df": 5}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.build_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1.4142135623730951}}, "df": 44, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.ScopeType": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}}, "df": 7}}}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"2": {"docs": {"sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1}, "sqlglot.dialects.spark2": {"tf": 1}, "sqlglot.dialects.spark2.Spark2": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"tf": 1.4142135623730951}}, "df": 8}, "docs": {"sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1}, "sqlglot.dialects.spark": {"tf": 1}, "sqlglot.dialects.spark.Spark": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1.4142135623730951}}, "df": 6, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.SparkSession": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}}, "df": 4}}}}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.SPACE": {"tf": 1}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Split": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2}}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}}, "df": 26, "s": {"docs": {"sqlglot.optimizer.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}}, "df": 3}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {"sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Set": {"tf": 1}, "sqlglot.expressions.DataType.Type.SET": {"tf": 1}, "sqlglot.generator.Generator.set_sql": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}}, "df": 13, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.expressions.SetTag": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.SETTINGS": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SettingsProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.SetItem": {"tf": 1}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1}}, "df": 2}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SetProperty": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.SetAgg": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.planner.SetOperation": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 3}}}}}}}}}, "s": {"docs": {"sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.serde": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}}, "df": 4, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.SerdeProperties": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}}, "df": 2}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.SessionParameter": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}, "p": {"docs": {"sqlglot.generator.Generator.sep": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}}, "df": 1}}}}}}}, "g": {"docs": {"sqlglot.generator.Generator.seg": {"tf": 1}}, "df": 1}, "q": {"docs": {"sqlglot.helper.seq_get": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.name_sequence": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.case_sensitive": {"tf": 1}}, "df": 1}}}}}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.tokens.TokenType.SEMI": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.executor.context.Context.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.expressions.Sort": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}, "sqlglot.planner.Sort": {"tf": 1}}, "df": 8, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SortKeyProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SortArray": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}}, "df": 5, "s": {"docs": {"sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.SOME": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.expressions.Sum": {"tf": 1}}, "df": 3}, "b": {"docs": {"sqlglot.expressions.Sub": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.Column.substr": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1}, "sqlglot.expressions.Substring": {"tf": 1}}, "df": 4}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subquery": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}}, "df": 10, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Subqueryable": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}}, "df": 4}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.SubqueryPredicate": {"tf": 1}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 11}}}}}}}, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.subclasses": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}}, "df": 2}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.schema.Schema.supported_table_args": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.Star": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}}, "df": 8, "t": {"docs": {"sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe.sql.Column.startswith": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1}, "sqlglot.dialects.starrocks": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}}, "df": 5}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.StarMap": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.executor.python.PythonExecutor.static": {"tf": 1}}, "df": 1}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.StabilityProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.StandardHash": {"tf": 1}}, "df": 1}}}}}}}}}}, "r": {"docs": {"sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.executor.env.str_position": {"tf": 1}}, "df": 3, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.Struct": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}}, "df": 4, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.StructExtract": {"tf": 1}}, "df": 1}}}}}}}}}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.expressions.StrPosition": {"tf": 1}}, "df": 3}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}}, "df": 9}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.StrToDate": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.StrToTime": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.StrToUnix": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.expressions.Stddev": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.StddevPop": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.StddevSamp": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.planner.Step": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}}, "df": 4}}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}}, "df": 1}}}}}}}}}, "f": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.safeconcat_sql": {"tf": 1}, "sqlglot.expressions.SafeConcat": {"tf": 1}, "sqlglot.generator.Generator.safeconcat_sql": {"tf": 1}}, "df": 3}}}}}}, "d": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.SafeDPipe": {"tf": 1}, "sqlglot.generator.Generator.safedpipe_sql": {"tf": 1}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.SafeDivide": {"tf": 1}}, "df": 1}}}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression.same_parent": {"tf": 1}}, "df": 1}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1}, "sqlglot.dialects.snowflake": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1.4142135623730951}}, "df": 11}}}}}}}, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.camel_to_snake_case": {"tf": 1}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.expressions.Show": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}}, "df": 3}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.should_identify": {"tf": 1}}, "df": 1}}}}, "a": {"2": {"docs": {"sqlglot.expressions.SHA2": {"tf": 1}}, "df": 1}, "docs": {"sqlglot.expressions.SHA": {"tf": 1}}, "df": 1}}, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.expressions.SystemTime": {"tf": 1}}, "df": 2}}}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}}, "df": 2}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}}, "df": 2}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}}, "df": 2}}}}}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.expressions.SimilarTo": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}}, "df": 2}}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.optimizer.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.flatten": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.simplify_parens": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.optimizer.simplify.always_true": {"tf": 1}, "sqlglot.optimizer.simplify.is_complement": {"tf": 1}, "sqlglot.optimizer.simplify.is_false": {"tf": 1}, "sqlglot.optimizer.simplify.is_null": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1}, "sqlglot.optimizer.simplify.date_literal": {"tf": 1}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1}}, "df": 21}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Slice": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.SLASH": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.pretty": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1}, "sqlglot.dialects.presto": {"tf": 1}, "sqlglot.dialects.presto.Presto": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 1.4142135623730951}}, "df": 10}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Predicate": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}}, "df": 8}}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.prepend_ctes": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.transforms.preprocess": {"tf": 1}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.expressions.Properties": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}}, "df": 22}}}, "y": {"docs": {"sqlglot.expressions.Property": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.naked_property": {"tf": 1}}, "df": 3}}}}}, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.expressions.Pragma": {"tf": 1}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PRAGMA": {"tf": 1}}, "df": 3}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}}, "df": 1, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.PrimaryKey": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.PrimaryKeyColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.parser.parse_like": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 11, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.parser": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.parser.parse_like": {"tf": 1}, "sqlglot.parser.binary_range_parser": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.__init__": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.reset": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.check_errors": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.raise_error": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.validate_expression": {"tf": 1.4142135623730951}}, "df": 34}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}}, "df": 3}}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Partition": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}}, "df": 5, "b": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}}, "df": 2}}, "s": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.expressions.PartitionedByProperty": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}, "s": {"docs": {"sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}}, "df": 3}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Parameter": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.expressions.ParameterizedAgg": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.expressions.Paren": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}}, "df": 7, "t": {"docs": {"sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}}, "df": 2}, "s": {"docs": {"sqlglot.optimizer.simplify.simplify_parens": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.PathColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "d": {"docs": {"sqlglot.generator.Generator.pad_comment": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.PERCENT": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.PercentileCont": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.PercentileDisc": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1}}, "df": 1}}}}}}}}}, "e": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.tokens.Tokenizer.peek": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.expressions.Pivot": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}}, "df": 6}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.PIPE": {"tf": 1}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.expressions.Placeholder": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.expressions.replace_placeholders": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {"sqlglot.planner.Plan": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}}, "df": 2, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.planner": {"tf": 1}, "sqlglot.planner.Plan": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}, "sqlglot.planner.Step": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}, "sqlglot.planner.Scan": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.Aggregate": {"tf": 1}, "sqlglot.planner.Sort": {"tf": 1}, "sqlglot.planner.SetOperation": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 16}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.PLUS": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}}, "df": 7, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1}, "sqlglot.dialects.postgres": {"tf": 1}, "sqlglot.dialects.postgres.Postgres": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}}, "df": 6}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.executor.env.str_position": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Posexplode": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {"sqlglot.executor.table.Table.pop": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}}, "df": 2}, "w": {"docs": {"sqlglot.expressions.Pow": {"tf": 1}}, "df": 1}}, "y": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.python": {"tf": 1}, "sqlglot.executor.python.PythonExecutor": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.static": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.executor.python.Python": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}}, "df": 21, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.python.PythonExecutor": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.static": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}}, "df": 17}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.PseudoType": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}, "sqlglot.optimizer.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.4142135623730951}}, "df": 10}}}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}}, "df": 3, "e": {"docs": {"sqlglot.parse_one": {"tf": 1}}, "df": 1}, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.oncluster_sql": {"tf": 1}, "sqlglot.expressions.OnCluster": {"tf": 1}, "sqlglot.generator.Generator.oncluster_sql": {"tf": 1}}, "df": 3}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.OnConflict": {"tf": 1}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1}}, "df": 2}}}}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.OnCommitProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.OnUpdateColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}, "r": {"docs": {"sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Or": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}}, "df": 9, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Order": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}}, "df": 4, "b": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.env.ordered": {"tf": 1}, "sqlglot.expressions.Ordered": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}}, "df": 4}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1}, "sqlglot.dialects.oracle": {"tf": 1}, "sqlglot.dialects.oracle.Oracle": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1.4142135623730951}}, "df": 9}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}}, "df": 3, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Overlaps": {"tf": 1}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1}, "sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1}}, "df": 3}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {"sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}}, "df": 13, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 6, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.OptimizeError": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {"sqlglot.optimizer": {"tf": 1}, "sqlglot.optimizer.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1}, "sqlglot.optimizer.canonicalize": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}, "sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalized": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.optimizer.optimizer": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}, "sqlglot.optimizer.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1}, "sqlglot.optimizer.simplify.flatten": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_parens": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.optimizer.simplify.always_true": {"tf": 1}, "sqlglot.optimizer.simplify.is_complement": {"tf": 1}, "sqlglot.optimizer.simplify.is_false": {"tf": 1}, "sqlglot.optimizer.simplify.is_null": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1}, "sqlglot.optimizer.simplify.date_literal": {"tf": 1}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 131}}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}}, "df": 2}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.unnest_operands": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1, "j": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.OpenJSON": {"tf": 1}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.OpenJSONColumnDef": {"tf": 1}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}}, "df": 1, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}}, "df": 1}}}}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.expressions.Offset": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}}, "df": 9}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}}, "df": 10}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.OUTER": {"tf": 1}}, "df": 1}}}}, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}}, "df": 3}}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.expressions.Transaction": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}}, "df": 5}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.Expression.transform": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.transforms": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 12}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.TransientProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 2}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1}, "sqlglot.dialects.trino": {"tf": 1}, "sqlglot.dialects.trino.Trino": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1.4142135623730951}}, "df": 5}}, "m": {"docs": {"sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.expressions.Trim": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}}, "df": 3}, "e": {"docs": {"sqlglot.trie": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1.4142135623730951}}, "df": 3}}, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.expressions.TryCast": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}}, "df": 3}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}}, "df": 1}}, "e": {"docs": {"sqlglot.expressions.true": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.optimizer.simplify.always_true": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}}, "df": 4}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.table": {"tf": 1}, "sqlglot.executor.table.Table": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Table.__init__": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Table.add_columns": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Table.append": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Table.pop": {"tf": 1.4142135623730951}, "sqlglot.executor.table.TableIter": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}, "sqlglot.executor.table.RangeReader": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.expressions.Table": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}}, "df": 40, "a": {"docs": {}, "df": 0, "u": {"docs": {"sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1}, "sqlglot.dialects.tableau": {"tf": 1}, "sqlglot.dialects.tableau.Tableau": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}}, "df": 7}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.TableAlias": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}}, "df": 2}}}}}, "s": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.qualify_tables": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}}, "df": 8, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.expressions.TableSample": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}}, "df": 4}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.table.TableIter": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}}, "df": 2}}}}}}}, "g": {"docs": {"sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}}, "df": 2}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.transforms.remove_target_from_merge": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {"sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.concat_to_dpipe_sql": {"tf": 1}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 1}}, "df": 23, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.tokens.Token": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 7, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 2, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Tokenizer": {"tf": 1}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1}, "sqlglot.tokens.Tokenizer": {"tf": 1}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.tokens.Tokenizer.peek": {"tf": 1}}, "df": 22}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.TokenError": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {"sqlglot.tokens": {"tf": 1}, "sqlglot.tokens.TokenType": {"tf": 1}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}, "sqlglot.tokens.TokenType.DASH": {"tf": 1}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1}, "sqlglot.tokens.TokenType.COLON": {"tf": 1}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}, "sqlglot.tokens.TokenType.AMP": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1}, "sqlglot.tokens.TokenType.CARET": {"tf": 1}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LT_AT": {"tf": 1}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.DAMP": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.DATABASE": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BIT": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}, "sqlglot.tokens.TokenType.UINT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT128": {"tf": 1}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1}, "sqlglot.tokens.TokenType.INT256": {"tf": 1}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}, "sqlglot.tokens.TokenType.INET": {"tf": 1}, "sqlglot.tokens.TokenType.ENUM": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}, "sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.tokens.TokenType.FULL": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}, "sqlglot.tokens.TokenType.INNER": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1}, "sqlglot.tokens.TokenType.KEEP": {"tf": 1}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD": {"tf": 1}, "sqlglot.tokens.TokenType.LOCK": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}, "sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}, "sqlglot.tokens.TokenType.PRAGMA": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}, "sqlglot.tokens.TokenType.RETURNING": {"tf": 1}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}, "sqlglot.tokens.TokenType.SETTINGS": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.tokens.TokenType.SOME": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}, "sqlglot.tokens.TokenType.TOP": {"tf": 1}, "sqlglot.tokens.TokenType.THEN": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}, "sqlglot.tokens.Token": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.tokens.Tokenizer": {"tf": 1}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.tokens.Tokenizer.peek": {"tf": 1}}, "df": 300}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType": {"tf": 1}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}, "sqlglot.tokens.TokenType.DASH": {"tf": 1}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1}, "sqlglot.tokens.TokenType.COLON": {"tf": 1}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}, "sqlglot.tokens.TokenType.AMP": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1}, "sqlglot.tokens.TokenType.CARET": {"tf": 1}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LT_AT": {"tf": 1}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.DAMP": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.DATABASE": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BIT": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}, "sqlglot.tokens.TokenType.UINT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT128": {"tf": 1}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1}, "sqlglot.tokens.TokenType.INT256": {"tf": 1}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}, "sqlglot.tokens.TokenType.INET": {"tf": 1}, "sqlglot.tokens.TokenType.ENUM": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}, "sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.tokens.TokenType.FULL": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}, "sqlglot.tokens.TokenType.INNER": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1}, "sqlglot.tokens.TokenType.KEEP": {"tf": 1}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD": {"tf": 1}, "sqlglot.tokens.TokenType.LOCK": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}, "sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}, "sqlglot.tokens.TokenType.PRAGMA": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}, "sqlglot.tokens.TokenType.RETURNING": {"tf": 1}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}, "sqlglot.tokens.TokenType.SETTINGS": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.tokens.TokenType.SOME": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}, "sqlglot.tokens.TokenType.TOP": {"tf": 1}, "sqlglot.tokens.TokenType.THEN": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}}, "df": 289}}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ToTableProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.ToChar": {"tf": 1}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1}}, "df": 2}}}}, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"6": {"4": {"docs": {"sqlglot.expressions.ToBase64": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}}}, "p": {"docs": {"sqlglot.tokens.TokenType.TOP": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1}, "sqlglot.dialects.teradata": {"tf": 1}, "sqlglot.dialects.teradata.Teradata": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"tf": 1.4142135623730951}}, "df": 12}}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.generator.Generator.text_width": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}}, "df": 5}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.TemporaryProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "s": {"docs": {"sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 1}}, "df": 2, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1}, "sqlglot.dialects.tsql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1.4142135623730951}}, "df": 10}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1}}, "df": 2}}}}}}}}}}, "t": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1}}, "df": 2}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.TsOrDsAdd": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.TsOrDsToDate": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.TsOrDsToDateStr": {"tf": 1}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.expressions.TsOrDiToDi": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {"sqlglot.helper.tsort": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.time": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}}, "df": 10, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}}, "df": 3, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1}, "sqlglot.expressions.TimestampTrunc": {"tf": 1}}, "df": 2}}}}, "z": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}}, "df": 2}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "z": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.TimestampAdd": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.TimestampSub": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.TimestampDiff": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.expressions.TimeStrToTime": {"tf": 1}}, "df": 2}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.TimeStrToDate": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.TimeStrToUnix": {"tf": 1}}, "df": 1}}}}}}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.TimeSub": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.TimeAdd": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.TimeDiff": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.TimeTrunc": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.TimeToStr": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.TimeToTimeStr": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.TimeToUnix": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.TitleColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.tokens.TokenType.TILDA": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.context.Context.eval_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.expressions.Tuple": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}}, "df": 5}}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.this": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.THEN": {"tf": 1}}, "df": 1}}}, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1}, "sqlglot.expressions.DataType.Type.ENUM": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INET": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SET": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}}, "df": 84, "s": {"docs": {"sqlglot.optimizer.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.TypeAnnotator": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 6}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.annotate_types.TypeAnnotator": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1}}, "df": 3}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.GroupedData": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dataframe.sql.Column": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.dataframe.sql.Window": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}}, "df": 111, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrameNaFunctions": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}}, "df": 5}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrameReader": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}}, "df": 3}}}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}}, "df": 7}}}}}}}}}}}, "b": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.databricks": {"tf": 1}, "sqlglot.dialects.databricks.Databricks": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1}}, "df": 6}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataBlocksizeProperty": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.DATABASE": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.expressions.DataType": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1}, "sqlglot.expressions.DataType.Type.ENUM": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INET": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SET": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}}, "df": 85, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataTypeSize": {"tf": 1}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "e": {"docs": {"sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.Date": {"tf": 1}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1}, "sqlglot.optimizer.simplify.date_literal": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}}, "df": 12, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.expressions.DateStrToDate": {"tf": 1}}, "df": 2}}}}}}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DateSub": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.expressions.DateDiff": {"tf": 1}}, "df": 3}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DateFormatColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.DateFromParts": {"tf": 1}}, "df": 1}}}}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"6": {"4": {"docs": {"sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {"sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}}, "df": 3, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DatetimeAdd": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DatetimeSub": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.DatetimeDiff": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.DatetimeTrunc": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.DateTrunc": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DateToDateStr": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {"sqlglot.expressions.DateToDi": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1}}, "df": 2}}}}}}}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DateAdd": {"tf": 1}}, "df": 1}}}}}, "y": {"docs": {"sqlglot.expressions.Day": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.DayOfWeek": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.DayOfMonth": {"tf": 1}}, "df": 1}}}}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DayOfYear": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.DASH": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.DARROW": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.DAMP": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Distinct": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}}, "df": 7}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Distribute": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}}, "df": 2}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.normalize.distributive_law": {"tf": 1}}, "df": 1}}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DistKeyProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DistStyleProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Distance": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 3}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect": {"tf": 1}, "sqlglot.dialects.dialect.Dialects": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialect": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.concat_to_dpipe_sql": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}}, "df": 71, "s": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.clickhouse": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.safeconcat_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.oncluster_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.databricks": {"tf": 1}, "sqlglot.dialects.databricks.Databricks": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Tokenizer": {"tf": 1}, "sqlglot.dialects.dialect": {"tf": 1}, "sqlglot.dialects.dialect.Dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1}, "sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.concat_to_dpipe_sql": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.dialects.drill": {"tf": 1}, "sqlglot.dialects.drill.Drill": {"tf": 1}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.dialects.duckdb": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.dialects.hive": {"tf": 1}, "sqlglot.dialects.hive.Hive": {"tf": 1}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.dialects.mysql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.oracle": {"tf": 1}, "sqlglot.dialects.oracle.Oracle": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres": {"tf": 1}, "sqlglot.dialects.postgres.Postgres": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto": {"tf": 1}, "sqlglot.dialects.presto.Presto": {"tf": 1}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.dialects.redshift": {"tf": 1}, "sqlglot.dialects.redshift.Redshift": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark": {"tf": 1}, "sqlglot.dialects.spark.Spark": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.spark2": {"tf": 1}, "sqlglot.dialects.spark2.Spark2": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.starrocks": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau": {"tf": 1}, "sqlglot.dialects.tableau.Tableau": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata": {"tf": 1}, "sqlglot.dialects.teradata.Teradata": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Tokenizer": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.trino": {"tf": 1}, "sqlglot.dialects.trino.Trino": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}}, "df": 226}}}}}}, "v": {"docs": {"sqlglot.expressions.Div": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1.4142135623730951}}, "df": 15}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Directory": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}}, "df": 2}}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}}, "df": 3, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DictProperty": {"tf": 1}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DictSubProperty": {"tf": 1}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DictRange": {"tf": 1}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DiToDate": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.expressions.Drop": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}}, "df": 5, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}}, "df": 1}}}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}}, "df": 1}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DropPartition": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1}, "sqlglot.dialects.drill": {"tf": 1}, "sqlglot.dialects.drill.Drill": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1.4142135623730951}}, "df": 7}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}}, "df": 4, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.expressions.Describe": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}}, "df": 4}}}}}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Delete": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}}, "df": 7}}}}, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}}, "df": 2}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.planner.Step.add_dependency": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}}, "df": 4, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DerivedTable": {"tf": 1}}, "df": 1}}}}}}}}}}, "f": {"docs": {"sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}}, "df": 3, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DefaultColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DefinerProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Decode": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 1}}}}}}}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1}, "sqlglot.dialects.duckdb": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator.interval_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1.4142135623730951}}, "df": 8}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}}, "df": 2}}}, "s": {"docs": {"sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}}, "df": 1}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.concat_to_dpipe_sql": {"tf": 1}, "sqlglot.expressions.DPipe": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}}, "df": 4}}}}, "f": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.dfs": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.Dot": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}}, "df": 6}, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}}, "df": 2}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 1}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.DCOLON": {"tf": 1}}, "df": 1}}}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.expressions.Create": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}}, "df": 5, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"tf": 1}, "sqlglot.generator.Generator.createable_sql": {"tf": 1}}, "df": 3}}}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.CROSS": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}}, "df": 6}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.expressions.Coalesce": {"tf": 1}}, "df": 2}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.expressions.Count": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 6, "i": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.CountIf": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {"sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.expressions.Column": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1}}, "df": 53, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.expressions.ColumnDef": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}}, "df": 3}}}, "s": {"docs": {"sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.generator.Generator.schema_columns_sql": {"tf": 1}, "sqlglot.optimizer.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}}, "df": 16}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.ColumnPosition": {"tf": 1}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1}}, "df": 2}}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}}, "df": 2, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.ColumnConstraintKind": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "s": {"docs": {"sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}}, "df": 1}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Collate": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}}, "df": 3, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CollateColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.CollateProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.helper.ensure_collection": {"tf": 1}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.COLON": {"tf": 1}}, "df": 1}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.expressions.Commit": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}}, "df": 4}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.expressions.Comment": {"tf": 1}, "sqlglot.generator.Generator.pad_comment": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.generator.Generator.comment_sql": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.expressions.Expression.add_comments": {"tf": 1}}, "df": 1}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CommentColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "a": {"docs": {"sqlglot.tokens.TokenType.COMMA": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Command": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}}, "df": 3}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CompressColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.simplify.is_complement": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.expressions.Constraint": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}}, "df": 4}}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.concat_to_dpipe_sql": {"tf": 1}, "sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.expressions.Concat": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}}, "df": 4, "w": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.ConcatWs": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.context": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.__init__": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.eval": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.add_columns": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.table_iter": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.filter": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.sort": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.set_row": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.set_index": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.set_range": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}}, "df": 13}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Condition": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Condition.like": {"tf": 1}, "sqlglot.expressions.Condition.ilike": {"tf": 1}, "sqlglot.expressions.Condition.eq": {"tf": 1}, "sqlglot.expressions.Condition.neq": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}}, "df": 15}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Connector": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1}}, "df": 1}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.convert": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1}, "sqlglot.expressions.Cache": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}}, "df": 5, "d": {"docs": {"sqlglot.generator.cached_generator": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.executor.env.cast": {"tf": 1}, "sqlglot.expressions.Cast": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 1}}, "df": 10, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.CastToStrType": {"tf": 1}}, "df": 1}}}}}}}}}, "s": {"docs": {"sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}}, "df": 1}}, "e": {"docs": {"sqlglot.expressions.Case": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}}, "df": 7, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CaseSpecificColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.helper.camel_to_snake_case": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.canonicalize": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}, "sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1}}, "df": 6}}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.CARET": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.clickhouse": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.safeconcat_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.oncluster_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1}}, "df": 13}}}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Clone": {"tf": 1}, "sqlglot.generator.Generator.clone_sql": {"tf": 1}}, "df": 2}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Cluster": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}}, "df": 3}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.expressions.CTE": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1}}, "df": 8, "s": {"docs": {"sqlglot.generator.Generator.prepend_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}}, "df": 5}}, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Select.ctas": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1}}, "df": 6, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.CurrentDate": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}}, "df": 2, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.CurrentDatetime": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.CurrentTime": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.CurrentTimestamp": {"tf": 1}}, "df": 1}}}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.CurrentUser": {"tf": 1}}, "df": 1}}}}}}}}}, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.CUBE": {"tf": 1}}, "df": 1}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}}, "df": 3}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.helper.while_changing": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {"sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CharacterSet": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CharacterSetColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.CharacterSetProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.Check": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}}, "df": 3, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CheckColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ChecksumProperty": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.replace_children": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Ceil": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.expressions.In": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 7, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 35, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Initcap": {"tf": 1}}, "df": 1}}}}}, "t": {"1": {"2": {"8": {"docs": {"sqlglot.expressions.DataType.Type.INT128": {"tf": 1}, "sqlglot.tokens.TokenType.INT128": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"5": {"6": {"docs": {"sqlglot.expressions.DataType.Type.INT256": {"tf": 1}, "sqlglot.tokens.TokenType.INT256": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "4": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1}}, "df": 2}}}}}}}}}}}, "8": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1}}, "df": 2}}}}}}}}}}}, "docs": {"sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Intersect": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}}, "df": 9, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1}}, "df": 1}}}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.executor.env.interval": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.Interval": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}}, "df": 10}}}}}, "o": {"docs": {"sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.expressions.Into": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}}, "df": 5}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Introducer": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}}, "df": 3}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.expressions.IntDiv": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}}, "df": 2}}}}, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.expressions.Insert": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}}, "df": 7, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.InlineLengthColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.expressions.Index": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}}, "df": 6}, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.indent": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.InputOutputFormat": {"tf": 1}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.INET": {"tf": 1}, "sqlglot.tokens.TokenType.INET": {"tf": 1}}, "df": 2}}, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.INNER": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Is": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.simplify.is_complement": {"tf": 1}, "sqlglot.optimizer.simplify.is_false": {"tf": 1}, "sqlglot.optimizer.simplify.is_null": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}}, "df": 25, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}}, "df": 3}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1.4142135623730951}}, "df": 2, "d": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.IsolatedLoadingProperty": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.expressions.Condition.ilike": {"tf": 1}, "sqlglot.expressions.ILike": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1}}, "df": 7, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ILikeAny": {"tf": 1}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1}}, "df": 2}}}}}}}, "f": {"docs": {"sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.If": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}}, "df": 7}, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.IgnoreNulls": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}}, "df": 2}}}}}}}}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.is_iterable": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Identifier": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.optimizer.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}}, "df": 3}}}}, "y": {"docs": {"sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}}, "df": 2}}}}}}}, "r": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Alias": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}}, "df": 9, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Aliases": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}}, "df": 3}}}}}, "l": {"docs": {"sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.All": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}}, "df": 6}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.ALTER": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.AlterColumn": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}}, "df": 2}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.AlterTable": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}}, "df": 2}}}}}}}}, "g": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.AlgorithmProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.simplify.always_true": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}}, "df": 2}}}}}, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}}, "df": 2, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.planner.Aggregate": {"tf": 1}}, "df": 2}}}}}}, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.AggFunc": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.expressions.Avg": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.expressions.Anonymous": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}}, "df": 3}}}}}}}, "y": {"docs": {"sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Any": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1}}, "df": 6, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.AnyValue": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.find_ancestor": {"tf": 1}}, "df": 1}}}}}}, "d": {"docs": {"sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.And": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}}, "df": 6}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.TypeAnnotator": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1.4142135623730951}}, "df": 5}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.tokens.TokenType.ANTI": {"tf": 1}}, "df": 1}}}, "s": {"docs": {"sqlglot.expressions.Condition.as_": {"tf": 1}}, "df": 1, "c": {"docs": {"sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}}, "df": 4}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.assert_is": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.tokens.TokenType.ASOF": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.Array": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}}, "df": 5, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.expressions.ArrayAgg": {"tf": 1}}, "df": 2}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.ArrayAll": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ArrayAny": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.ArrayOverlaps": {"tf": 1}}, "df": 1}}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ArrayConcat": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.ArrayContains": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.ArrayContained": {"tf": 1}}, "df": 1}}}}}}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.ArrayFilter": {"tf": 1}}, "df": 1}}}}}}, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.ArrayJoin": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.ArraySize": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ArraySort": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.ArraySum": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.ArrayUnionAgg": {"tf": 1}}, "df": 1}}}}}}}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}}, "df": 6}}}, "g": {"docs": {"sqlglot.expressions.Func.from_arg_list": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}}, "df": 2}}}, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}}, "df": 4}}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}}, "df": 1, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ApproxDistinct": {"tf": 1}}, "df": 1}}}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.ApproxQuantile": {"tf": 1}}, "df": 1}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.table.Table.append": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}}, "df": 2}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.expressions.Expression.add_comments": {"tf": 1}, "sqlglot.expressions.Add": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1}}, "df": 11, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.AddConstraint": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.AutoIncrementColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.AutoIncrementProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.AutoName": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {"sqlglot.tokens.TokenType.LT_AT": {"tf": 1}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1}}, "df": 2, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.AtTimeZone": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "b": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Abs": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}}, "df": 5}}}}}}}}}}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.AMP": {"tf": 1}}, "df": 1}}}, "w": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Where": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}}, "df": 7}}, "n": {"docs": {"sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.When": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}}, "df": 5}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.while_changing": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.expressions.With": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}}, "df": 13, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1}}, "df": 1, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.WithinGroup": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}}, "df": 2}}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.WithDataProperty": {"tf": 1}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.WithJournalTableProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dataframe.sql.Window": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Window": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}}, "df": 9, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dataframe.sql.WindowSpec": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.expressions.WindowSpec": {"tf": 1}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1}}, "df": 10}}}}}}}}, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.generator.Generator.text_width": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.errors.ErrorLevel.WARN": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 4}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.Week": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.WeekOfYear": {"tf": 1}}, "df": 1}}}}}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.generator.Generator.wrap": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1}}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.executor.context.Context.filter": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.expressions.Filter": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}}, "df": 6}}}, "l": {"docs": {"sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.FileFormatProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}}, "df": 4}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}}, "df": 7}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Final": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}}, "df": 2}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.normalize_func": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}}, "df": 10, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}}, "df": 3}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.FULL": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1}}, "df": 4, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}}, "df": 6}}}, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}}, "df": 1, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ForeignKey": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.simplify.flatten": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1}}, "df": 4}}}}}, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}}, "df": 2}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Floor": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.From": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}}, "df": 13, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"6": {"4": {"docs": {"sqlglot.expressions.FromBase64": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot.expressions.FromBase": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.FreespaceProperty": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Fetch": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}}, "df": 3}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.FallbackProperty": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.false": {"tf": 1}, "sqlglot.optimizer.simplify.is_false": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}}, "df": 3}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.FARROW": {"tf": 1}}, "df": 1}}}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Group": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1}}, "df": 6, "b": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.GroupedData": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}}, "df": 10}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.expressions.GroupConcat": {"tf": 1}}, "df": 2}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.expressions.Greatest": {"tf": 1}}, "df": 2}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.safeconcat_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.oncluster_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.unsupported": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sep": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.seg": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.pad_comment": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.wrap": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.no_identify": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.normalize_func": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.indent": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.cache_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.column_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.createable_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.create_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.clone_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.describe_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.with_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.cte_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.directory_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.delete_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.drop_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.except_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.except_op": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.filter_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.hint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.index_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.national_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.partition_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.properties_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.root_properties": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.properties": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.with_properties": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.locate_properties": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.property_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.insert_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.intersect_op": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.returning_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.table_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.update_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.values_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.var_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.into_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.from_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.group_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.having_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.join_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.limit_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.offset_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.set_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.lock_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.literal_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.null_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.order_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sort_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.offset_limit_modifiers": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.select_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.schema_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.schema_columns_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.star_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.union_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.union_op": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.where_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.window_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.between_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.all_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.any_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.exists_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.case_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.extract_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.trim_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.safeconcat_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.check_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.if_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.in_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.interval_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.return_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.reference_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.paren_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.neg_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.not_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.alias_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.add_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.and_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.connector_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.cast_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.collate_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.command_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.comment_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.commit_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.safedpipe_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.div_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.distance_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.dot_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.eq_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.escape_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.glob_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.gt_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.gte_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.is_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.like_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.lt_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.lte_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.mod_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.mul_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.neq_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.or_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.slice_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sub_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.use_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.binary": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.func": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.format_args": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.text_width": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.format_time": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.expressions": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.op_expressions": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.naked_property": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.set_operation": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tag_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.token_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.when_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.merge_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.oncluster_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.cached_generator": {"tf": 1.4142135623730951}}, "df": 321}}, "e": {"docs": {"sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}}, "df": 5, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.expressions.GenerateSeries": {"tf": 1}}, "df": 2}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.expressions.GeneratedAsIdentityColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}}, "df": 3}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}, "t": {"docs": {"sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 8}, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}}, "df": 2}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}}, "df": 2}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.Glob": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}}, "df": 3, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {"sqlglot.expressions.GT": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1}}, "df": 4, "e": {"docs": {"sqlglot.expressions.GTE": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}}, "df": 3}}}, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.expressions.Join": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.planner.Join": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1}}, "df": 15, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JoinHint": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}}, "df": 2}}}}, "s": {"docs": {"sqlglot.optimizer.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}}, "df": 9}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.JournalProperty": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}}, "df": 4, "b": {"docs": {"sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.JSONBContains": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JSONBExtract": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.JSONBExtractScalar": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.JSONKeyValue": {"tf": 1}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1}}, "df": 2}}}}}}}}, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JSONObject": {"tf": 1}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1}}, "df": 2}}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JSONExtract": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.JSONExtractScalar": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JSONFormat": {"tf": 1}}, "df": 1}}}}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.union": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Union": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}}, "df": 12, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1}}, "df": 1}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Unionable": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}}, "df": 4}}}}, "b": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {"sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}}, "df": 1}, "q": {"docs": {"sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.UniqueColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.UnixToStr": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.UnixToTime": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.UnixToTimeStr": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.expressions.Unary": {"tf": 1}}, "df": 2}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 2}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Unnest": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}}, "df": 14}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}, "sqlglot.generator.Generator.unsupported": {"tf": 1}}, "df": 2, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.UnsupportedError": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Uncache": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}}, "df": 3}}}}}, "k": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}}, "df": 1}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.Unhex": {"tf": 1}}, "df": 1}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}}, "df": 1}}}}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.expressions.Update": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}}, "df": 7}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Upper": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.UppercaseColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.UDTF": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Use": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}}, "df": 3, "r": {"docs": {"sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1}}, "df": 1, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.UserDefinedFunction": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}}, "df": 2}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1}}, "df": 2}}}}}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1}}, "df": 2}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"1": {"2": {"8": {"docs": {"sqlglot.expressions.DataType.Type.UINT128": {"tf": 1}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"5": {"6": {"docs": {"sqlglot.expressions.DataType.Type.UINT256": {"tf": 1}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {"sqlglot.expressions.DataType.Type.UINT": {"tf": 1}, "sqlglot.tokens.TokenType.UINT": {"tf": 1}}, "df": 2}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1}}, "df": 2}}}}}}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}}, "df": 2}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Except": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}}, "df": 8, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1}}, "df": 1}}}}}}}, "p": {"docs": {"sqlglot.expressions.Exp": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.add_comments": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 46, "s": {"docs": {"sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1}, "sqlglot.expressions.Expression.expressions": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.add_comments": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Condition": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Condition.like": {"tf": 1}, "sqlglot.expressions.Condition.ilike": {"tf": 1}, "sqlglot.expressions.Condition.eq": {"tf": 1}, "sqlglot.expressions.Condition.neq": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1}, "sqlglot.expressions.Predicate": {"tf": 1}, "sqlglot.expressions.DerivedTable": {"tf": 1}, "sqlglot.expressions.Unionable": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.UDTF": {"tf": 1}, "sqlglot.expressions.Cache": {"tf": 1}, "sqlglot.expressions.Uncache": {"tf": 1}, "sqlglot.expressions.Create": {"tf": 1}, "sqlglot.expressions.Clone": {"tf": 1}, "sqlglot.expressions.Describe": {"tf": 1}, "sqlglot.expressions.Pragma": {"tf": 1}, "sqlglot.expressions.Set": {"tf": 1}, "sqlglot.expressions.SetItem": {"tf": 1}, "sqlglot.expressions.Show": {"tf": 1}, "sqlglot.expressions.UserDefinedFunction": {"tf": 1}, "sqlglot.expressions.CharacterSet": {"tf": 1}, "sqlglot.expressions.With": {"tf": 1}, "sqlglot.expressions.WithinGroup": {"tf": 1}, "sqlglot.expressions.CTE": {"tf": 1}, "sqlglot.expressions.TableAlias": {"tf": 1}, "sqlglot.expressions.BitString": {"tf": 1}, "sqlglot.expressions.HexString": {"tf": 1}, "sqlglot.expressions.ByteString": {"tf": 1}, "sqlglot.expressions.RawString": {"tf": 1}, "sqlglot.expressions.Column": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.ColumnPosition": {"tf": 1}, "sqlglot.expressions.ColumnDef": {"tf": 1}, "sqlglot.expressions.AlterColumn": {"tf": 1}, "sqlglot.expressions.RenameTable": {"tf": 1}, "sqlglot.expressions.SetTag": {"tf": 1}, "sqlglot.expressions.Comment": {"tf": 1}, "sqlglot.expressions.MergeTreeTTLAction": {"tf": 1}, "sqlglot.expressions.MergeTreeTTL": {"tf": 1}, "sqlglot.expressions.ColumnConstraint": {"tf": 1}, "sqlglot.expressions.ColumnConstraintKind": {"tf": 1}, "sqlglot.expressions.AutoIncrementColumnConstraint": {"tf": 1}, "sqlglot.expressions.CaseSpecificColumnConstraint": {"tf": 1}, "sqlglot.expressions.CharacterSetColumnConstraint": {"tf": 1}, "sqlglot.expressions.CheckColumnConstraint": {"tf": 1}, "sqlglot.expressions.CollateColumnConstraint": {"tf": 1}, "sqlglot.expressions.CommentColumnConstraint": {"tf": 1}, "sqlglot.expressions.CompressColumnConstraint": {"tf": 1}, "sqlglot.expressions.DateFormatColumnConstraint": {"tf": 1}, "sqlglot.expressions.DefaultColumnConstraint": {"tf": 1}, "sqlglot.expressions.EncodeColumnConstraint": {"tf": 1}, "sqlglot.expressions.GeneratedAsIdentityColumnConstraint": {"tf": 1}, "sqlglot.expressions.InlineLengthColumnConstraint": {"tf": 1}, "sqlglot.expressions.NotNullColumnConstraint": {"tf": 1}, "sqlglot.expressions.OnUpdateColumnConstraint": {"tf": 1}, "sqlglot.expressions.PrimaryKeyColumnConstraint": {"tf": 1}, "sqlglot.expressions.TitleColumnConstraint": {"tf": 1}, "sqlglot.expressions.UniqueColumnConstraint": {"tf": 1}, "sqlglot.expressions.UppercaseColumnConstraint": {"tf": 1}, "sqlglot.expressions.PathColumnConstraint": {"tf": 1}, "sqlglot.expressions.Constraint": {"tf": 1}, "sqlglot.expressions.Delete": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Drop": {"tf": 1}, "sqlglot.expressions.Filter": {"tf": 1}, "sqlglot.expressions.Check": {"tf": 1}, "sqlglot.expressions.Directory": {"tf": 1}, "sqlglot.expressions.ForeignKey": {"tf": 1}, "sqlglot.expressions.PrimaryKey": {"tf": 1}, "sqlglot.expressions.Into": {"tf": 1}, "sqlglot.expressions.From": {"tf": 1}, "sqlglot.expressions.Having": {"tf": 1}, "sqlglot.expressions.Hint": {"tf": 1}, "sqlglot.expressions.JoinHint": {"tf": 1}, "sqlglot.expressions.Identifier": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Index": {"tf": 1}, "sqlglot.expressions.Insert": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.OnConflict": {"tf": 1}, "sqlglot.expressions.Returning": {"tf": 1}, "sqlglot.expressions.Introducer": {"tf": 1}, "sqlglot.expressions.National": {"tf": 1}, "sqlglot.expressions.LoadData": {"tf": 1}, "sqlglot.expressions.Partition": {"tf": 1}, "sqlglot.expressions.Fetch": {"tf": 1}, "sqlglot.expressions.Group": {"tf": 1}, "sqlglot.expressions.Lambda": {"tf": 1}, "sqlglot.expressions.Limit": {"tf": 1}, "sqlglot.expressions.Literal": {"tf": 1}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Lateral": {"tf": 1}, "sqlglot.expressions.MatchRecognize": {"tf": 1}, "sqlglot.expressions.Final": {"tf": 1}, "sqlglot.expressions.Offset": {"tf": 1}, "sqlglot.expressions.Order": {"tf": 1}, "sqlglot.expressions.Cluster": {"tf": 1}, "sqlglot.expressions.Distribute": {"tf": 1}, "sqlglot.expressions.Sort": {"tf": 1}, "sqlglot.expressions.Ordered": {"tf": 1}, "sqlglot.expressions.Property": {"tf": 1}, "sqlglot.expressions.AlgorithmProperty": {"tf": 1}, "sqlglot.expressions.AutoIncrementProperty": {"tf": 1}, "sqlglot.expressions.BlockCompressionProperty": {"tf": 1}, "sqlglot.expressions.CharacterSetProperty": {"tf": 1}, "sqlglot.expressions.ChecksumProperty": {"tf": 1}, "sqlglot.expressions.CollateProperty": {"tf": 1}, "sqlglot.expressions.DataBlocksizeProperty": {"tf": 1}, "sqlglot.expressions.DefinerProperty": {"tf": 1}, "sqlglot.expressions.DistKeyProperty": {"tf": 1}, "sqlglot.expressions.DistStyleProperty": {"tf": 1}, "sqlglot.expressions.EngineProperty": {"tf": 1}, "sqlglot.expressions.ToTableProperty": {"tf": 1}, "sqlglot.expressions.ExecuteAsProperty": {"tf": 1}, "sqlglot.expressions.ExternalProperty": {"tf": 1}, "sqlglot.expressions.FallbackProperty": {"tf": 1}, "sqlglot.expressions.FileFormatProperty": {"tf": 1}, "sqlglot.expressions.FreespaceProperty": {"tf": 1}, "sqlglot.expressions.InputOutputFormat": {"tf": 1}, "sqlglot.expressions.IsolatedLoadingProperty": {"tf": 1}, "sqlglot.expressions.JournalProperty": {"tf": 1}, "sqlglot.expressions.LanguageProperty": {"tf": 1}, "sqlglot.expressions.DictProperty": {"tf": 1}, "sqlglot.expressions.DictSubProperty": {"tf": 1}, "sqlglot.expressions.DictRange": {"tf": 1}, "sqlglot.expressions.OnCluster": {"tf": 1}, "sqlglot.expressions.LikeProperty": {"tf": 1}, "sqlglot.expressions.LocationProperty": {"tf": 1}, "sqlglot.expressions.LockingProperty": {"tf": 1}, "sqlglot.expressions.LogProperty": {"tf": 1}, "sqlglot.expressions.MaterializedProperty": {"tf": 1}, "sqlglot.expressions.MergeBlockRatioProperty": {"tf": 1}, "sqlglot.expressions.NoPrimaryIndexProperty": {"tf": 1}, "sqlglot.expressions.OnCommitProperty": {"tf": 1}, "sqlglot.expressions.PartitionedByProperty": {"tf": 1}, "sqlglot.expressions.ReturnsProperty": {"tf": 1}, "sqlglot.expressions.RowFormatProperty": {"tf": 1}, "sqlglot.expressions.RowFormatDelimitedProperty": {"tf": 1}, "sqlglot.expressions.RowFormatSerdeProperty": {"tf": 1}, "sqlglot.expressions.SchemaCommentProperty": {"tf": 1}, "sqlglot.expressions.SerdeProperties": {"tf": 1}, "sqlglot.expressions.SetProperty": {"tf": 1}, "sqlglot.expressions.SettingsProperty": {"tf": 1}, "sqlglot.expressions.SortKeyProperty": {"tf": 1}, "sqlglot.expressions.SqlSecurityProperty": {"tf": 1}, "sqlglot.expressions.StabilityProperty": {"tf": 1}, "sqlglot.expressions.TemporaryProperty": {"tf": 1}, "sqlglot.expressions.TransientProperty": {"tf": 1}, "sqlglot.expressions.VolatileProperty": {"tf": 1}, "sqlglot.expressions.WithDataProperty": {"tf": 1}, "sqlglot.expressions.WithJournalTableProperty": {"tf": 1}, "sqlglot.expressions.Properties": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.expressions.Qualify": {"tf": 1}, "sqlglot.expressions.Return": {"tf": 1}, "sqlglot.expressions.Reference": {"tf": 1}, "sqlglot.expressions.Tuple": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.Subqueryable": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Table": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.SystemTime": {"tf": 1}, "sqlglot.expressions.Union": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Except": {"tf": 1}, "sqlglot.expressions.Intersect": {"tf": 1}, "sqlglot.expressions.Unnest": {"tf": 1}, "sqlglot.expressions.Update": {"tf": 1}, "sqlglot.expressions.Values": {"tf": 1}, "sqlglot.expressions.Var": {"tf": 1}, "sqlglot.expressions.Schema": {"tf": 1}, "sqlglot.expressions.Lock": {"tf": 1}, "sqlglot.expressions.Select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.TableSample": {"tf": 1}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Pivot": {"tf": 1}, "sqlglot.expressions.Window": {"tf": 1}, "sqlglot.expressions.WindowSpec": {"tf": 1}, "sqlglot.expressions.Where": {"tf": 1}, "sqlglot.expressions.Star": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Parameter": {"tf": 1}, "sqlglot.expressions.SessionParameter": {"tf": 1}, "sqlglot.expressions.Placeholder": {"tf": 1}, "sqlglot.expressions.Null": {"tf": 1}, "sqlglot.expressions.Boolean": {"tf": 1}, "sqlglot.expressions.DataTypeSize": {"tf": 1}, "sqlglot.expressions.DataType": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1}, "sqlglot.expressions.DataType.Type.ENUM": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INET": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SET": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.PseudoType": {"tf": 1}, "sqlglot.expressions.SubqueryPredicate": {"tf": 1}, "sqlglot.expressions.All": {"tf": 1}, "sqlglot.expressions.Any": {"tf": 1}, "sqlglot.expressions.Exists": {"tf": 1}, "sqlglot.expressions.Command": {"tf": 1}, "sqlglot.expressions.Transaction": {"tf": 1}, "sqlglot.expressions.Commit": {"tf": 1}, "sqlglot.expressions.Rollback": {"tf": 1}, "sqlglot.expressions.AlterTable": {"tf": 1}, "sqlglot.expressions.AddConstraint": {"tf": 1}, "sqlglot.expressions.DropPartition": {"tf": 1}, "sqlglot.expressions.Binary": {"tf": 1}, "sqlglot.expressions.Add": {"tf": 1}, "sqlglot.expressions.Connector": {"tf": 1}, "sqlglot.expressions.And": {"tf": 1}, "sqlglot.expressions.Or": {"tf": 1}, "sqlglot.expressions.BitwiseAnd": {"tf": 1}, "sqlglot.expressions.BitwiseLeftShift": {"tf": 1}, "sqlglot.expressions.BitwiseOr": {"tf": 1}, "sqlglot.expressions.BitwiseRightShift": {"tf": 1}, "sqlglot.expressions.BitwiseXor": {"tf": 1}, "sqlglot.expressions.Div": {"tf": 1}, "sqlglot.expressions.Overlaps": {"tf": 1}, "sqlglot.expressions.Dot": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.expressions.DPipe": {"tf": 1}, "sqlglot.expressions.SafeDPipe": {"tf": 1}, "sqlglot.expressions.EQ": {"tf": 1}, "sqlglot.expressions.NullSafeEQ": {"tf": 1}, "sqlglot.expressions.NullSafeNEQ": {"tf": 1}, "sqlglot.expressions.Distance": {"tf": 1}, "sqlglot.expressions.Escape": {"tf": 1}, "sqlglot.expressions.Glob": {"tf": 1}, "sqlglot.expressions.GT": {"tf": 1}, "sqlglot.expressions.GTE": {"tf": 1}, "sqlglot.expressions.ILike": {"tf": 1}, "sqlglot.expressions.ILikeAny": {"tf": 1}, "sqlglot.expressions.IntDiv": {"tf": 1}, "sqlglot.expressions.Is": {"tf": 1}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.Like": {"tf": 1}, "sqlglot.expressions.LikeAny": {"tf": 1}, "sqlglot.expressions.LT": {"tf": 1}, "sqlglot.expressions.LTE": {"tf": 1}, "sqlglot.expressions.Mod": {"tf": 1}, "sqlglot.expressions.Mul": {"tf": 1}, "sqlglot.expressions.NEQ": {"tf": 1}, "sqlglot.expressions.SimilarTo": {"tf": 1}, "sqlglot.expressions.Slice": {"tf": 1}, "sqlglot.expressions.Sub": {"tf": 1}, "sqlglot.expressions.ArrayOverlaps": {"tf": 1}, "sqlglot.expressions.Unary": {"tf": 1}, "sqlglot.expressions.BitwiseNot": {"tf": 1}, "sqlglot.expressions.Not": {"tf": 1}, "sqlglot.expressions.Paren": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Neg": {"tf": 1}, "sqlglot.expressions.Alias": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Aliases": {"tf": 1}, "sqlglot.expressions.AtTimeZone": {"tf": 1}, "sqlglot.expressions.Between": {"tf": 1}, "sqlglot.expressions.Bracket": {"tf": 1}, "sqlglot.expressions.Distinct": {"tf": 1}, "sqlglot.expressions.In": {"tf": 1}, "sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}, "sqlglot.expressions.Interval": {"tf": 1}, "sqlglot.expressions.IgnoreNulls": {"tf": 1}, "sqlglot.expressions.RespectNulls": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.expressions.AggFunc": {"tf": 1}, "sqlglot.expressions.ParameterizedAgg": {"tf": 1}, "sqlglot.expressions.Abs": {"tf": 1}, "sqlglot.expressions.Anonymous": {"tf": 1}, "sqlglot.expressions.Hll": {"tf": 1}, "sqlglot.expressions.ApproxDistinct": {"tf": 1}, "sqlglot.expressions.Array": {"tf": 1}, "sqlglot.expressions.ToChar": {"tf": 1}, "sqlglot.expressions.GenerateSeries": {"tf": 1}, "sqlglot.expressions.ArrayAgg": {"tf": 1}, "sqlglot.expressions.ArrayAll": {"tf": 1}, "sqlglot.expressions.ArrayAny": {"tf": 1}, "sqlglot.expressions.ArrayConcat": {"tf": 1}, "sqlglot.expressions.ArrayContains": {"tf": 1}, "sqlglot.expressions.ArrayContained": {"tf": 1}, "sqlglot.expressions.ArrayFilter": {"tf": 1}, "sqlglot.expressions.ArrayJoin": {"tf": 1}, "sqlglot.expressions.ArraySize": {"tf": 1}, "sqlglot.expressions.ArraySort": {"tf": 1}, "sqlglot.expressions.ArraySum": {"tf": 1}, "sqlglot.expressions.ArrayUnionAgg": {"tf": 1}, "sqlglot.expressions.Avg": {"tf": 1}, "sqlglot.expressions.AnyValue": {"tf": 1}, "sqlglot.expressions.Case": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.Cast": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.expressions.CastToStrType": {"tf": 1}, "sqlglot.expressions.Collate": {"tf": 1}, "sqlglot.expressions.TryCast": {"tf": 1}, "sqlglot.expressions.Ceil": {"tf": 1}, "sqlglot.expressions.Coalesce": {"tf": 1}, "sqlglot.expressions.Concat": {"tf": 1}, "sqlglot.expressions.SafeConcat": {"tf": 1}, "sqlglot.expressions.ConcatWs": {"tf": 1}, "sqlglot.expressions.Count": {"tf": 1}, "sqlglot.expressions.CountIf": {"tf": 1}, "sqlglot.expressions.CurrentDate": {"tf": 1}, "sqlglot.expressions.CurrentDatetime": {"tf": 1}, "sqlglot.expressions.CurrentTime": {"tf": 1}, "sqlglot.expressions.CurrentTimestamp": {"tf": 1}, "sqlglot.expressions.CurrentUser": {"tf": 1}, "sqlglot.expressions.DateAdd": {"tf": 1}, "sqlglot.expressions.DateSub": {"tf": 1}, "sqlglot.expressions.DateDiff": {"tf": 1}, "sqlglot.expressions.DateTrunc": {"tf": 1}, "sqlglot.expressions.DatetimeAdd": {"tf": 1}, "sqlglot.expressions.DatetimeSub": {"tf": 1}, "sqlglot.expressions.DatetimeDiff": {"tf": 1}, "sqlglot.expressions.DatetimeTrunc": {"tf": 1}, "sqlglot.expressions.DayOfWeek": {"tf": 1}, "sqlglot.expressions.DayOfMonth": {"tf": 1}, "sqlglot.expressions.DayOfYear": {"tf": 1}, "sqlglot.expressions.WeekOfYear": {"tf": 1}, "sqlglot.expressions.LastDateOfMonth": {"tf": 1}, "sqlglot.expressions.Extract": {"tf": 1}, "sqlglot.expressions.TimestampAdd": {"tf": 1}, "sqlglot.expressions.TimestampSub": {"tf": 1}, "sqlglot.expressions.TimestampDiff": {"tf": 1}, "sqlglot.expressions.TimestampTrunc": {"tf": 1}, "sqlglot.expressions.TimeAdd": {"tf": 1}, "sqlglot.expressions.TimeSub": {"tf": 1}, "sqlglot.expressions.TimeDiff": {"tf": 1}, "sqlglot.expressions.TimeTrunc": {"tf": 1}, "sqlglot.expressions.DateFromParts": {"tf": 1}, "sqlglot.expressions.DateStrToDate": {"tf": 1}, "sqlglot.expressions.DateToDateStr": {"tf": 1}, "sqlglot.expressions.DateToDi": {"tf": 1}, "sqlglot.expressions.Date": {"tf": 1}, "sqlglot.expressions.Day": {"tf": 1}, "sqlglot.expressions.Decode": {"tf": 1}, "sqlglot.expressions.DiToDate": {"tf": 1}, "sqlglot.expressions.Encode": {"tf": 1}, "sqlglot.expressions.Exp": {"tf": 1}, "sqlglot.expressions.Explode": {"tf": 1}, "sqlglot.expressions.Floor": {"tf": 1}, "sqlglot.expressions.FromBase64": {"tf": 1}, "sqlglot.expressions.ToBase64": {"tf": 1}, "sqlglot.expressions.Greatest": {"tf": 1}, "sqlglot.expressions.GroupConcat": {"tf": 1}, "sqlglot.expressions.Hex": {"tf": 1}, "sqlglot.expressions.If": {"tf": 1}, "sqlglot.expressions.Initcap": {"tf": 1}, "sqlglot.expressions.JSONKeyValue": {"tf": 1}, "sqlglot.expressions.JSONObject": {"tf": 1}, "sqlglot.expressions.OpenJSONColumnDef": {"tf": 1}, "sqlglot.expressions.OpenJSON": {"tf": 1}, "sqlglot.expressions.JSONBContains": {"tf": 1}, "sqlglot.expressions.JSONExtract": {"tf": 1}, "sqlglot.expressions.JSONExtractScalar": {"tf": 1}, "sqlglot.expressions.JSONBExtract": {"tf": 1}, "sqlglot.expressions.JSONBExtractScalar": {"tf": 1}, "sqlglot.expressions.JSONFormat": {"tf": 1}, "sqlglot.expressions.Least": {"tf": 1}, "sqlglot.expressions.Left": {"tf": 1}, "sqlglot.expressions.Right": {"tf": 1}, "sqlglot.expressions.Length": {"tf": 1}, "sqlglot.expressions.Levenshtein": {"tf": 1}, "sqlglot.expressions.Ln": {"tf": 1}, "sqlglot.expressions.Log": {"tf": 1}, "sqlglot.expressions.Log2": {"tf": 1}, "sqlglot.expressions.Log10": {"tf": 1}, "sqlglot.expressions.LogicalOr": {"tf": 1}, "sqlglot.expressions.LogicalAnd": {"tf": 1}, "sqlglot.expressions.Lower": {"tf": 1}, "sqlglot.expressions.Map": {"tf": 1}, "sqlglot.expressions.StarMap": {"tf": 1}, "sqlglot.expressions.VarMap": {"tf": 1}, "sqlglot.expressions.MatchAgainst": {"tf": 1}, "sqlglot.expressions.Max": {"tf": 1}, "sqlglot.expressions.MD5": {"tf": 1}, "sqlglot.expressions.Min": {"tf": 1}, "sqlglot.expressions.Month": {"tf": 1}, "sqlglot.expressions.Nvl2": {"tf": 1}, "sqlglot.expressions.Posexplode": {"tf": 1}, "sqlglot.expressions.Pow": {"tf": 1}, "sqlglot.expressions.PercentileCont": {"tf": 1}, "sqlglot.expressions.PercentileDisc": {"tf": 1}, "sqlglot.expressions.Quantile": {"tf": 1}, "sqlglot.expressions.ApproxQuantile": {"tf": 1}, "sqlglot.expressions.RangeN": {"tf": 1}, "sqlglot.expressions.ReadCSV": {"tf": 1}, "sqlglot.expressions.Reduce": {"tf": 1}, "sqlglot.expressions.RegexpExtract": {"tf": 1}, "sqlglot.expressions.RegexpLike": {"tf": 1}, "sqlglot.expressions.RegexpILike": {"tf": 1}, "sqlglot.expressions.RegexpSplit": {"tf": 1}, "sqlglot.expressions.Repeat": {"tf": 1}, "sqlglot.expressions.Round": {"tf": 1}, "sqlglot.expressions.RowNumber": {"tf": 1}, "sqlglot.expressions.SafeDivide": {"tf": 1}, "sqlglot.expressions.SetAgg": {"tf": 1}, "sqlglot.expressions.SHA": {"tf": 1}, "sqlglot.expressions.SHA2": {"tf": 1}, "sqlglot.expressions.SortArray": {"tf": 1}, "sqlglot.expressions.Split": {"tf": 1}, "sqlglot.expressions.Substring": {"tf": 1}, "sqlglot.expressions.StandardHash": {"tf": 1}, "sqlglot.expressions.StrPosition": {"tf": 1}, "sqlglot.expressions.StrToDate": {"tf": 1}, "sqlglot.expressions.StrToTime": {"tf": 1}, "sqlglot.expressions.StrToUnix": {"tf": 1}, "sqlglot.expressions.NumberToStr": {"tf": 1}, "sqlglot.expressions.FromBase": {"tf": 1}, "sqlglot.expressions.Struct": {"tf": 1}, "sqlglot.expressions.StructExtract": {"tf": 1}, "sqlglot.expressions.Sum": {"tf": 1}, "sqlglot.expressions.Sqrt": {"tf": 1}, "sqlglot.expressions.Stddev": {"tf": 1}, "sqlglot.expressions.StddevPop": {"tf": 1}, "sqlglot.expressions.StddevSamp": {"tf": 1}, "sqlglot.expressions.TimeToStr": {"tf": 1}, "sqlglot.expressions.TimeToTimeStr": {"tf": 1}, "sqlglot.expressions.TimeToUnix": {"tf": 1}, "sqlglot.expressions.TimeStrToDate": {"tf": 1}, "sqlglot.expressions.TimeStrToTime": {"tf": 1}, "sqlglot.expressions.TimeStrToUnix": {"tf": 1}, "sqlglot.expressions.Trim": {"tf": 1}, "sqlglot.expressions.TsOrDsAdd": {"tf": 1}, "sqlglot.expressions.TsOrDsToDateStr": {"tf": 1}, "sqlglot.expressions.TsOrDsToDate": {"tf": 1}, "sqlglot.expressions.TsOrDiToDi": {"tf": 1}, "sqlglot.expressions.Unhex": {"tf": 1}, "sqlglot.expressions.UnixToStr": {"tf": 1}, "sqlglot.expressions.UnixToTime": {"tf": 1}, "sqlglot.expressions.UnixToTimeStr": {"tf": 1}, "sqlglot.expressions.Upper": {"tf": 1}, "sqlglot.expressions.Variance": {"tf": 1}, "sqlglot.expressions.VariancePop": {"tf": 1}, "sqlglot.expressions.Week": {"tf": 1}, "sqlglot.expressions.XMLTable": {"tf": 1}, "sqlglot.expressions.Year": {"tf": 1}, "sqlglot.expressions.Use": {"tf": 1}, "sqlglot.expressions.Merge": {"tf": 1}, "sqlglot.expressions.When": {"tf": 1}, "sqlglot.expressions.NextValueFor": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.expressions.false": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}}, "df": 658}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Explode": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}}, "df": 3}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.expand": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.expressions.Extract": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1}}, "df": 7}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ExternalProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.ExecuteError": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ExecuteAsProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.context.Context.eval": {"tf": 1}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 1}, "sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.executor.context.Context.filter": {"tf": 1}, "sqlglot.executor.context.Context.sort": {"tf": 1}, "sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.executor.env": {"tf": 1}, "sqlglot.executor.env.reverse_key": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.env.str_position": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1}, "sqlglot.executor.env.cast": {"tf": 1}, "sqlglot.executor.env.ordered": {"tf": 1}, "sqlglot.executor.env.interval": {"tf": 1}, "sqlglot.executor.python": {"tf": 1}, "sqlglot.executor.python.PythonExecutor": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.static": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.executor.python.Python": {"tf": 1}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.executor.table": {"tf": 1}, "sqlglot.executor.table.Table": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.append": {"tf": 1}, "sqlglot.executor.table.Table.pop": {"tf": 1}, "sqlglot.executor.table.TableIter": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}, "sqlglot.executor.table.RangeReader": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}}, "df": 59}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Exists": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}}, "df": 3}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}}, "df": 9}}}}, "d": {"docs": {"sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe.sql.Column.endswith": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {"sqlglot.executor.env": {"tf": 1}, "sqlglot.executor.env.reverse_key": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.env.str_position": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1}, "sqlglot.executor.env.cast": {"tf": 1}, "sqlglot.executor.env.ordered": {"tf": 1}, "sqlglot.executor.env.interval": {"tf": 1}}, "df": 10}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Encode": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.EncodeColumnConstraint": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.EngineProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.DataType.Type.ENUM": {"tf": 1}, "sqlglot.tokens.TokenType.ENUM": {"tf": 1}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.errors": {"tf": 1}, "sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}, "sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.check_errors": {"tf": 1}}, "df": 18}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}}, "df": 5}}}}}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor.context.Context.eval": {"tf": 1}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}}, "df": 3}}}, "q": {"docs": {"sqlglot.expressions.Condition.eq": {"tf": 1}, "sqlglot.expressions.EQ": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}}, "df": 5}, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Escape": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}}, "df": 3}}}}}, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 10}}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.schema.Schema.empty": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.transforms.epoch_cast_to_ts": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {"sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}}, "df": 9}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Repeat": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1}, "sqlglot.dialects.redshift": {"tf": 1}, "sqlglot.dialects.redshift.Redshift": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.4142135623730951}}, "df": 9}}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Reduce": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}}, "df": 3, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.RenameTable": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1}}, "df": 3}}}}}}, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Return": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.expressions.ReturnsProperty": {"tf": 1}}, "df": 2}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Returning": {"tf": 1}, "sqlglot.generator.Generator.returning_sql": {"tf": 1}, "sqlglot.tokens.TokenType.RETURNING": {"tf": 1}}, "df": 4}}}}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1}}, "df": 9}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.env.reverse_key": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}}, "df": 2}}}}}, "f": {"docs": {"sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Reference": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.RespectNulls": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}}, "df": 5}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.parser.Parser.reset": {"tf": 1}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.expressions.ReadCSV": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.helper.csv_reader": {"tf": 1}}, "df": 1}}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.RegexpExtract": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.RegexpLike": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.RegexpILike": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.RegexpSplit": {"tf": 1}}, "df": 1}}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}}, "df": 1}}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}}, "df": 3}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.tokens.TokenType.ROWS": {"tf": 1}}, "df": 1, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}}, "df": 2}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.table.RowReader": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}}, "df": 2}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.RowFormatProperty": {"tf": 1}}, "df": 1}}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.RowFormatDelimitedProperty": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.RowFormatSerdeProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}}, "df": 2}}}}}}}, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.RowNumber": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.expressions.Rollback": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}}, "df": 4}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}}, "df": 4}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Round": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.parser.binary_range_parser": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}}, "df": 3, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}}, "df": 2}}}}}}}, "n": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.expressions.RangeN": {"tf": 1}}, "df": 2}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.table.RangeReader": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}}, "df": 2}}}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 3}}}, "w": {"docs": {"sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.RawString": {"tf": 1}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1}}, "df": 2}}}}}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.expressions.Right": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}}, "df": 3}}}}}, "l": {"docs": {"sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.expressions.Limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}}, "df": 11}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.expressions.Literal": {"tf": 1}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.optimizer.simplify.date_literal": {"tf": 1}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1}}, "df": 8, "s": {"docs": {"sqlglot.optimizer.simplify.simplify_literals": {"tf": 1}}, "df": 1}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.expressions.Condition.like": {"tf": 1}, "sqlglot.expressions.Like": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.parser.parse_like": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1}}, "df": 7, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LikeProperty": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LikeAny": {"tf": 1}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1}}, "df": 2}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.lineage": {"tf": 1}, "sqlglot.lineage.Node": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}}, "df": 8, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}}, "df": 2}}}}}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}}, "df": 2, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.LastDateOfMonth": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.expressions.Lambda": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}}, "df": 3}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Lateral": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}}, "df": 4}}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LanguageProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "w": {"docs": {"sqlglot.optimizer.normalize.distributive_law": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}}, "df": 3}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}}, "df": 9, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LocationProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "k": {"docs": {"sqlglot.expressions.Lock": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LOCK": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LockingProperty": {"tf": 1}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD": {"tf": 1}}, "df": 3, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.expressions.LoadData": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}}, "df": 2}}}}}}, "g": {"1": {"0": {"docs": {"sqlglot.expressions.Log10": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "2": {"docs": {"sqlglot.expressions.Log2": {"tf": 1}}, "df": 1}, "docs": {"sqlglot.expressions.Log": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LogProperty": {"tf": 1}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.LogicalOr": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.LogicalAnd": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}}, "df": 2}}}}}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Lower": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.expressions.Left": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}}, "df": 3}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.expressions.Least": {"tf": 1}}, "df": 3}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Length": {"tf": 1}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Levenshtein": {"tf": 1}}, "df": 1}}}}}}}}}}, "t": {"docs": {"sqlglot.expressions.LT": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1}, "sqlglot.tokens.TokenType.LT_AT": {"tf": 1}}, "df": 4, "e": {"docs": {"sqlglot.expressions.LTE": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}}, "df": 3}}, "n": {"docs": {"sqlglot.expressions.Ln": {"tf": 1}}, "df": 1}, "r": {"docs": {"sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}}, "df": 1}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.expressions.Hint": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}}, "df": 5, "s": {"docs": {"sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}}, "df": 1}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1}, "sqlglot.dialects.hive": {"tf": 1}, "sqlglot.dialects.hive.Hive": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1.4142135623730951}}, "df": 10}}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.expressions.Having": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}}, "df": 6}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}}, "df": 3}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.Hex": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.HexString": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}}, "df": 2}}}}}}}, "l": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.helper": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}}, "df": 23}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Hll": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}}, "df": 2}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}}, "df": 2}}}}}, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.lineage.Node.to_html": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}}, "df": 2}}}}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.expressions.Merge": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.optimizer.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}}, "df": 9, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.MergeTreeTTL": {"tf": 1}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.MergeTreeTTLAction": {"tf": 1}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.MergeBlockRatioProperty": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}}, "df": 2}}}}}}}}}, "a": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.expressions.Max": {"tf": 1}}, "df": 3}, "p": {"docs": {"sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.Map": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}}, "df": 5, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}}, "df": 7}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.MatchRecognize": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.MatchAgainst": {"tf": 1}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.MaterializedProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "y": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.expressions.Min": {"tf": 1}}, "df": 3}}, "o": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.expressions.Mod": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}}, "df": 4, "e": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}}, "df": 7}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}}, "df": 2}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Month": {"tf": 1}}, "df": 1}}}}, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1}, "sqlglot.dialects.mysql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1.4142135623730951}}, "df": 7}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Mul": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}}, "df": 2}}, "d": {"5": {"docs": {"sqlglot.expressions.MD5": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.Binary": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.parser.binary_range_parser": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}}, "df": 7}}}}, "g": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.bigquery": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1}}, "df": 14}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1}}, "df": 2}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}}, "df": 2}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}}, "df": 2}}}}}}}, "t": {"docs": {"sqlglot.expressions.DataType.Type.BIT": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BIT": {"tf": 1}}, "df": 3, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.BitString": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}}, "df": 2}}}}}}, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.BitwiseAnd": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.BitwiseLeftShift": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.BitwiseOr": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}}, "df": 2}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.BitwiseRightShift": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}}, "df": 2}}}}}}}}}}, "x": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.BitwiseXor": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}}, "df": 2}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.BitwiseNot": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Between": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}}, "df": 6}}}}}, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.BEGIN": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.bfs": {"tf": 1}}, "df": 1}}, "y": {"docs": {"sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}}, "df": 8, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.ByteString": {"tf": 1}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.BlockCompressionProperty": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Boolean": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}}, "df": 6}}}}}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}}, "df": 3}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Bracket": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}}, "df": 4}}}, "e": {"docs": {"sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.optimizer.scope.Scope.branch": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.tokens.TokenType.BREAK": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}}, "df": 16, "s": {"docs": {"sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1}}, "df": 7}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.National": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1}}, "df": 3}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.NATURAL": {"tf": 1}}, "df": 1}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.naked_property": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Null": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator.null_sql": {"tf": 1}, "sqlglot.optimizer.simplify.is_null": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}}, "df": 7, "s": {"docs": {"sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}}, "df": 5, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "q": {"docs": {"sqlglot.expressions.NullSafeEQ": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "q": {"docs": {"sqlglot.expressions.NullSafeNEQ": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}}, "df": 2}}}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}}, "df": 2}}}}}}, "m": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}}, "df": 4, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.NumberToStr": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1}}, "df": 2}}}}}}}}}}}}, "o": {"docs": {"sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}}, "df": 10, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.generator.Generator.normalize_func": {"tf": 1}, "sqlglot.optimizer.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalized": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}}, "df": 10, "d": {"docs": {"sqlglot.optimizer.normalize.normalized": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 1}}}}}}}}}}}, "t": {"docs": {"sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Not": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}}, "df": 6, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.NotNullColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.NoPrimaryIndexProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.lineage.Node": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 3}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}}, "df": 4}}}}, "q": {"docs": {"sqlglot.expressions.Condition.neq": {"tf": 1}, "sqlglot.expressions.NEQ": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}}, "df": 4}, "g": {"docs": {"sqlglot.expressions.Neg": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}}, "df": 2}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}}, "df": 2, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.NextValueFor": {"tf": 1}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}}, "df": 2}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}}, "df": 2}}}}}}, "l": {"2": {"docs": {"sqlglot.expressions.Nvl2": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.expressions.Var": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 7, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}}, "df": 2}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}}, "df": 2}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Variance": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.VariancePop": {"tf": 1}}, "df": 1}}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.VarMap": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.expressions.Values": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}}, "df": 5}}}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 2}}}}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.VolatileProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.VIEW": {"tf": 1}}, "df": 1}}}}, "x": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}}, "df": 2, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.expressions.XMLTable": {"tf": 1}}, "df": 2}}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.diff.Keep": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.tokens.TokenType.KEEP": {"tf": 1}}, "df": 3}}, "y": {"docs": {"sqlglot.executor.env.reverse_key": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}}, "df": 4}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}}, "df": 2}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Qualify": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.optimizer.qualify": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 18}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Quantile": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.query_modifiers": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}}, "df": 2}}}}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Year": {"tf": 1}}, "df": 1}}}}}}, "annotation": {"root": {"docs": {"sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.empty": {"tf": 1}}, "df": 25, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.schema.Schema.empty": {"tf": 1}}, "df": 9}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}}, "df": 11}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.parent_select": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.depth": {"tf": 1}}, "df": 1}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}}, "df": 2}}}}}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.parent_select": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}}, "df": 3}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}}, "df": 2}}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.schema.Schema.supported_table_args": {"tf": 1}}, "df": 1}}}}}}}}}}}, "default_value": {"root": {"1": {"docs": {"sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}}, "df": 1}, "2": {"docs": {"sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}}, "df": 1}, "3": {"docs": {"sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}}, "df": 1}, "4": {"docs": {"sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}}, "df": 1}, "5": {"docs": {"sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}}, "df": 1}, "6": {"docs": {"sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}}, "df": 1}, "docs": {"sqlglot.schema": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BIT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.ENUM": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INET": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT128": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT256": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SET": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UINT128": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UINT256": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DOT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.STAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LTE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GTE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NOT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.EQ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AND": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CARET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LT_AT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DAMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATABASE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT128": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT256": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JSON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UUID": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.XML": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ENUM": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ANY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ASC": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CASE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DESC": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DIV": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DROP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.END": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FOR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FROM": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IF": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INNER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTO": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.KEEP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LOAD": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LOCK": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MAP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MOD": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OVER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PRAGMA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RETURNING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SETTINGS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SOME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TOP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.THEN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.USE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.USING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1.4142135623730951}}, "df": 405, "f": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.pretty": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1.4142135623730951}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.FARROW": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1.4142135623730951}}, "df": 2}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.FETCH": {"tf": 1.4142135623730951}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.FILTER": {"tf": 1.4142135623730951}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.FINAL": {"tf": 1.4142135623730951}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.FIRST": {"tf": 1.4142135623730951}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.FOR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1.4142135623730951}}, "df": 2, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1.4142135623730951}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.FORMAT": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.tokens.TokenType.FROM": {"tf": 1.4142135623730951}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.FULL": {"tf": 1.4142135623730951}}, "df": 1}}, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.FUNCTION": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "l": {"docs": {"sqlglot.tokens.TokenType.L_PAREN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1.4142135623730951}}, "df": 3, "t": {"docs": {"sqlglot.schema": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1}, "sqlglot.expressions.DataType.Type.ENUM": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INET": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SET": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}, "sqlglot.tokens.TokenType.DASH": {"tf": 1}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1}, "sqlglot.tokens.TokenType.COLON": {"tf": 1}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}, "sqlglot.tokens.TokenType.AMP": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1}, "sqlglot.tokens.TokenType.CARET": {"tf": 1}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LT_AT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.DAMP": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.DATABASE": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BIT": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}, "sqlglot.tokens.TokenType.UINT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT128": {"tf": 1}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1}, "sqlglot.tokens.TokenType.INT256": {"tf": 1}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}, "sqlglot.tokens.TokenType.INET": {"tf": 1}, "sqlglot.tokens.TokenType.ENUM": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}, "sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.tokens.TokenType.FULL": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}, "sqlglot.tokens.TokenType.INNER": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1}, "sqlglot.tokens.TokenType.KEEP": {"tf": 1}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD": {"tf": 1}, "sqlglot.tokens.TokenType.LOCK": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}, "sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}, "sqlglot.tokens.TokenType.PRAGMA": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}, "sqlglot.tokens.TokenType.RETURNING": {"tf": 1}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}, "sqlglot.tokens.TokenType.SETTINGS": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.tokens.TokenType.SOME": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}, "sqlglot.tokens.TokenType.TOP": {"tf": 1}, "sqlglot.tokens.TokenType.THEN": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}}, "df": 405, "e": {"docs": {"sqlglot.tokens.TokenType.LTE": {"tf": 1.4142135623730951}}, "df": 1}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}}, "df": 8}}}}}, "k": {"docs": {"sqlglot.tokens.TokenType.LOCK": {"tf": 1.4142135623730951}}, "df": 1}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1.4142135623730951}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.LOAD": {"tf": 1.4142135623730951}}, "df": 1}}}, "r": {"docs": {"sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1.4142135623730951}}, "df": 1}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.LATERAL": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.LEFT": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.LIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1.4142135623730951}}, "df": 2}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.LIMIT": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.schema": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1.4142135623730951}}, "df": 3}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}}, "df": 6}}}}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"2": {"docs": {"sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {"sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1.4142135623730951}}, "df": 1}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.SPACE": {"tf": 1.4142135623730951}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.STAR": {"tf": 1.4142135623730951}}, "df": 1, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "t": {"docs": {"sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1.4142135623730951}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1.4142135623730951}}, "df": 2}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1.4142135623730951}}, "df": 6}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1.4142135623730951}}, "df": 2}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1.4142135623730951}}, "df": 1}}}, "t": {"docs": {"sqlglot.expressions.DataType.Type.SET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SET": {"tf": 1.4142135623730951}}, "df": 3, "s": {"docs": {"sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1.4142135623730951}}, "df": 1}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.SETTINGS": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.tokens.TokenType.SEMI": {"tf": 1.4142135623730951}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.SELECT": {"tf": 1.4142135623730951}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1.4142135623730951}}, "df": 2}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.SUPER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1.4142135623730951}}, "df": 2}}}, "b": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.SLASH": {"tf": 1.4142135623730951}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.SHOW": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.SOME": {"tf": 1.4142135623730951}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.DataType.Type.MAP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MAP": {"tf": 1.4142135623730951}}, "df": 2, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema": {"tf": 1}}, "df": 1}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1.4142135623730951}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1.4142135623730951}}, "df": 1}}}}, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1.4142135623730951}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1.4142135623730951}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.MERGE": {"tf": 1.4142135623730951}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.MONEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1.4142135623730951}}, "df": 2}}}, "d": {"docs": {"sqlglot.tokens.TokenType.MOD": {"tf": 1.4142135623730951}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.schema": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1.4142135623730951}}, "df": 3}}}}}, "r": {"docs": {"sqlglot.tokens.TokenType.OR": {"tf": 1.4142135623730951}}, "df": 1, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1.4142135623730951}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1.4142135623730951}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.ORDERED": {"tf": 1.4142135623730951}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.OFFSET": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "n": {"docs": {"sqlglot.tokens.TokenType.ON": {"tf": 1.4142135623730951}}, "df": 1}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.OUTER": {"tf": 1.4142135623730951}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.OVER": {"tf": 1.4142135623730951}}, "df": 1, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1.4142135623730951}}, "df": 1}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "g": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.schema": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1}, "sqlglot.expressions.DataType.Type.ENUM": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INET": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SET": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}, "sqlglot.tokens.TokenType.DASH": {"tf": 1}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1}, "sqlglot.tokens.TokenType.COLON": {"tf": 1}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}, "sqlglot.tokens.TokenType.AMP": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1}, "sqlglot.tokens.TokenType.CARET": {"tf": 1}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LT_AT": {"tf": 1}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.DAMP": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.DATABASE": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BIT": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}, "sqlglot.tokens.TokenType.UINT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT128": {"tf": 1}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1}, "sqlglot.tokens.TokenType.INT256": {"tf": 1}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}, "sqlglot.tokens.TokenType.INET": {"tf": 1}, "sqlglot.tokens.TokenType.ENUM": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}, "sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.tokens.TokenType.FULL": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}, "sqlglot.tokens.TokenType.INNER": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1}, "sqlglot.tokens.TokenType.KEEP": {"tf": 1}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD": {"tf": 1}, "sqlglot.tokens.TokenType.LOCK": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}, "sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}, "sqlglot.tokens.TokenType.PRAGMA": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}, "sqlglot.tokens.TokenType.RETURNING": {"tf": 1}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}, "sqlglot.tokens.TokenType.SETTINGS": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.tokens.TokenType.SOME": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}, "sqlglot.tokens.TokenType.TOP": {"tf": 1}, "sqlglot.tokens.TokenType.THEN": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}}, "df": 405, "e": {"docs": {"sqlglot.tokens.TokenType.GTE": {"tf": 1.4142135623730951}}, "df": 1}}, "e": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.tokens.TokenType.GLOB": {"tf": 1.4142135623730951}}, "df": 1, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.GLOBAL": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1.4142135623730951}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1}}, "df": 21}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.DISTINCT": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "v": {"docs": {"sqlglot.tokens.TokenType.DIV": {"tf": 1.4142135623730951}}, "df": 1}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.DATABASE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "e": {"docs": {"sqlglot.expressions.DataType.Type.DATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1.4142135623730951}}, "df": 3, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"6": {"4": {"docs": {"sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1.4142135623730951}}, "df": 2}, "docs": {}, "df": 0}, "docs": {"sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1.4142135623730951}}, "df": 3}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.DASH": {"tf": 1.4142135623730951}}, "df": 1}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.DARROW": {"tf": 1.4142135623730951}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.DAMP": {"tf": 1.4142135623730951}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1.4142135623730951}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.DROP": {"tf": 1.4142135623730951}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}}, "df": 1}}}}}, "f": {"docs": {"sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1.4142135623730951}}, "df": 1, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.DEFAULT": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.DELETE": {"tf": 1.4142135623730951}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.tokens.TokenType.DESC": {"tf": 1.4142135623730951}}, "df": 1, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1.4142135623730951}}, "df": 2}}}}, "t": {"docs": {"sqlglot.tokens.TokenType.DOT": {"tf": 1.4142135623730951}}, "df": 1}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.DOLLAR": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.DCOLON": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.DPIPE": {"tf": 1.4142135623730951}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "x": {"2": {"7": {"docs": {"sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BIT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.ENUM": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INET": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT128": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT256": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SET": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UINT128": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UINT256": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DOT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.STAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LTE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GTE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NOT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.EQ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AND": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CARET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LT_AT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DAMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATABASE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT128": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT256": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JSON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UUID": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.XML": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ENUM": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ANY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ASC": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CASE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DESC": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DIV": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DROP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.END": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FOR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FROM": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IF": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INNER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTO": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.KEEP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LOAD": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LOCK": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MAP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MOD": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OVER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PRAGMA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RETURNING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SETTINGS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SOME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TOP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.THEN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.USE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.USING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1.4142135623730951}}, "df": 398}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.XML": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.XML": {"tf": 1.4142135623730951}}, "df": 2}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1.4142135623730951}}, "df": 2}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.BINARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1.4142135623730951}}, "df": 2}}}}, "t": {"docs": {"sqlglot.expressions.DataType.Type.BIT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIT": {"tf": 1.4142135623730951}}, "df": 3}}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1.4142135623730951}}, "df": 2}}}, "e": {"docs": {"sqlglot.tokens.TokenType.L_BRACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1.4142135623730951}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.tokens.TokenType.BREAK": {"tf": 1.4142135623730951}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1.4142135623730951}}, "df": 2}}}}, "y": {"docs": {"sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1.4142135623730951}}, "df": 3, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1.4142135623730951}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.BEGIN": {"tf": 1.4142135623730951}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.BETWEEN": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1.4142135623730951}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.CROSS": {"tf": 1.4142135623730951}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.CHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1.4142135623730951}}, "df": 2, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.tokens.TokenType.COMMA": {"tf": 1.4142135623730951}}, "df": 1, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.COMMAND": {"tf": 1.4142135623730951}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.COMMENT": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.COMMIT": {"tf": 1.4142135623730951}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.COLON": {"tf": 1.4142135623730951}}, "df": 1}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.COLUMN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1.4142135623730951}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.COLLATE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.CARET": {"tf": 1.4142135623730951}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.CACHE": {"tf": 1.4142135623730951}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.CASE": {"tf": 1.4142135623730951}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.CUBE": {"tf": 1.4142135623730951}}, "df": 1}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1.4142135623730951}}, "df": 5}}}}}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1.4142135623730951}}, "df": 1}}, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.HINT": {"tf": 1.4142135623730951}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.HASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1.4142135623730951}}, "df": 2}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.HAVING": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1.4142135623730951}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1.4142135623730951}}, "df": 7, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1.4142135623730951}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.tokens.TokenType.PRAGMA": {"tf": 1.4142135623730951}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.L_PAREN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1.4142135623730951}}, "df": 2}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.PARAMETER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.PARTITION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.PLUS": {"tf": 1.4142135623730951}}, "df": 1}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.PIPE": {"tf": 1.4142135623730951}}, "df": 1}}, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.PIVOT": {"tf": 1.4142135623730951}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.PERCENT": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "r": {"docs": {"sqlglot.tokens.TokenType.R_PAREN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1.4142135623730951}}, "df": 3, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.REPLACE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.RETURNING": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.REFERENCES": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ErrorLevel.RAISE": {"tf": 1.4142135623730951}}, "df": 1}}}, "w": {"docs": {"sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1.4142135623730951}}, "df": 1}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.RANGE": {"tf": 1.4142135623730951}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.ROW": {"tf": 1.4142135623730951}}, "df": 1, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}, "s": {"docs": {"sqlglot.tokens.TokenType.ROWS": {"tf": 1.4142135623730951}}, "df": 1}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1.4142135623730951}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.ROLLUP": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.RIGHT": {"tf": 1.4142135623730951}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.RLIKE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1.4142135623730951}}, "df": 3, "a": {"docs": {}, "df": 0, "u": {"docs": {"sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.TEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1.4142135623730951}}, "df": 2}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1.4142135623730951}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.TRUE": {"tf": 1.4142135623730951}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1.4142135623730951}}, "df": 1}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}, "t": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}}}}, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1}, "sqlglot.expressions.DataType.Type.ENUM": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.INET": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SET": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT128": {"tf": 1}, "sqlglot.expressions.DataType.Type.UINT256": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1.4142135623730951}}, "df": 78}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1.4142135623730951}}, "df": 3, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1.4142135623730951}}, "df": 3, "t": {"docs": {}, "df": 0, "z": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1.4142135623730951}}, "df": 2}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "z": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.tokens.TokenType.TILDA": {"tf": 1.4142135623730951}}, "df": 1}}}}, "o": {"docs": {"sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1.4142135623730951}}, "df": 1, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}, "sqlglot.tokens.TokenType.DASH": {"tf": 1}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1}, "sqlglot.tokens.TokenType.COLON": {"tf": 1}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}, "sqlglot.tokens.TokenType.AMP": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1}, "sqlglot.tokens.TokenType.CARET": {"tf": 1}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LT_AT": {"tf": 1}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.DAMP": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.DATABASE": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BIT": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}, "sqlglot.tokens.TokenType.UINT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT128": {"tf": 1}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1}, "sqlglot.tokens.TokenType.INT256": {"tf": 1}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}, "sqlglot.tokens.TokenType.INET": {"tf": 1}, "sqlglot.tokens.TokenType.ENUM": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}, "sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.tokens.TokenType.FULL": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}, "sqlglot.tokens.TokenType.INNER": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1}, "sqlglot.tokens.TokenType.KEEP": {"tf": 1}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD": {"tf": 1}, "sqlglot.tokens.TokenType.LOCK": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}, "sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}, "sqlglot.tokens.TokenType.PRAGMA": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}, "sqlglot.tokens.TokenType.RETURNING": {"tf": 1}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}, "sqlglot.tokens.TokenType.SETTINGS": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.tokens.TokenType.SOME": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}, "sqlglot.tokens.TokenType.TOP": {"tf": 1}, "sqlglot.tokens.TokenType.THEN": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}}, "df": 288}}}}}}}, "p": {"docs": {"sqlglot.tokens.TokenType.TOP": {"tf": 1.4142135623730951}}, "df": 1}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.THEN": {"tf": 1.4142135623730951}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}}, "df": 4}}}}}}}}}, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.EXCEPT": {"tf": 1.4142135623730951}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.EXECUTE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.EXISTS": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.DataType.Type.ENUM": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ENUM": {"tf": 1.4142135623730951}}, "df": 2}}, "d": {"docs": {"sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.END": {"tf": 1.4142135623730951}}, "df": 2}}, "q": {"docs": {"sqlglot.tokens.TokenType.EQ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1.4142135623730951}}, "df": 2}, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.ELSE": {"tf": 1.4142135623730951}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.ESCAPE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1.4142135623730951}}, "df": 2}}}}, "n": {"docs": {"sqlglot.tokens.TokenType.IN": {"tf": 1.4142135623730951}}, "df": 1, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1.4142135623730951}}, "df": 2}}}, "t": {"1": {"2": {"8": {"docs": {"sqlglot.expressions.DataType.Type.INT128": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT128": {"tf": 1.4142135623730951}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"5": {"6": {"docs": {"sqlglot.expressions.DataType.Type.INT256": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT256": {"tf": 1.4142135623730951}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "4": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}}, "8": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}}, "docs": {"sqlglot.expressions.DataType.Type.INT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT": {"tf": 1.4142135623730951}}, "df": 2, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1.4142135623730951}}, "df": 2}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.INTERSECT": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "o": {"docs": {"sqlglot.tokens.TokenType.INTO": {"tf": 1.4142135623730951}}, "df": 1}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.INET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INET": {"tf": 1.4142135623730951}}, "df": 2}}, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.INNER": {"tf": 1.4142135623730951}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.INSERT": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "f": {"docs": {"sqlglot.tokens.TokenType.IF": {"tf": 1.4142135623730951}}, "df": 1}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.ILIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1.4142135623730951}}, "df": 2}}}}, "r": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.IRLIKE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "s": {"docs": {"sqlglot.tokens.TokenType.IS": {"tf": 1.4142135623730951}}, "df": 1, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.ISNULL": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.errors.ErrorLevel.WARN": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITH": {"tf": 1.4142135623730951}}, "df": 2}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.WINDOW": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.WHEN": {"tf": 1.4142135623730951}}, "df": 1}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.WHERE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1.4142135623730951}}, "df": 1}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.NATURAL": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.NUMBER": {"tf": 1.4142135623730951}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.NULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULL": {"tf": 1.4142135623730951}}, "df": 2, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1.4142135623730951}}, "df": 2}}}}, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1.4142135623730951}}, "df": 2}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.NOT": {"tf": 1.4142135623730951}}, "df": 1, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.NOTNULL": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "q": {"docs": {"sqlglot.tokens.TokenType.NEQ": {"tf": 1.4142135623730951}}, "df": 1}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.NEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1.4142135623730951}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1.4142135623730951}}, "df": 2}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.ALTER": {"tf": 1.4142135623730951}}, "df": 1}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.ALWAYS": {"tf": 1.4142135623730951}}, "df": 1}}}}, "l": {"docs": {"sqlglot.tokens.TokenType.ALL": {"tf": 1.4142135623730951}}, "df": 1}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1.4142135623730951}}, "df": 2}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1.4142135623730951}}, "df": 4}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.AND": {"tf": 1.4142135623730951}}, "df": 1}, "t": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.tokens.TokenType.ANTI": {"tf": 1.4142135623730951}}, "df": 1}}, "y": {"docs": {"sqlglot.tokens.TokenType.ANY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1.4142135623730951}}, "df": 3}}, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.AMP": {"tf": 1.4142135623730951}}, "df": 1}}, "t": {"docs": {"sqlglot.tokens.TokenType.LT_AT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1.4142135623730951}}, "df": 2}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.APPLY": {"tf": 1.4142135623730951}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.tokens.TokenType.ASC": {"tf": 1.4142135623730951}}, "df": 1}, "o": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.tokens.TokenType.ASOF": {"tf": 1.4142135623730951}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1.4142135623730951}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "k": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.UNIQUE": {"tf": 1.4142135623730951}}, "df": 1, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1.4142135623730951}}, "df": 2}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.UNCACHE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.UNNEST": {"tf": 1.4142135623730951}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"1": {"2": {"8": {"docs": {"sqlglot.expressions.DataType.Type.UINT128": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1.4142135623730951}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"5": {"6": {"docs": {"sqlglot.expressions.DataType.Type.UINT256": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1.4142135623730951}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {"sqlglot.expressions.DataType.Type.UINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UINT": {"tf": 1.4142135623730951}}, "df": 2}}}, "s": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}, "e": {"docs": {"sqlglot.tokens.TokenType.USE": {"tf": 1.4142135623730951}}, "df": 1, "r": {"docs": {"sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1.4142135623730951}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.USING": {"tf": 1.4142135623730951}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DataType.Type.UUID": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UUID": {"tf": 1.4142135623730951}}, "df": 2}}}, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.UPDATE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "j": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DataType.Type.JSON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JSON": {"tf": 1.4142135623730951}}, "df": 2, "b": {"docs": {"sqlglot.expressions.DataType.Type.JSONB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1.4142135623730951}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.JOIN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1.4142135623730951}}, "df": 2}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.VAR": {"tf": 1.4142135623730951}}, "df": 1, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1.4142135623730951}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"sqlglot.tokens.TokenType.VALUES": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.VIEW": {"tf": 1.4142135623730951}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.VOLATILE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1.4142135623730951}}, "df": 2}, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.KEEP": {"tf": 1.4142135623730951}}, "df": 1}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.QUALIFY": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.QUOTE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "signature": {"root": {"0": {"docs": {"sqlglot.diff.ChangeDistiller.__init__": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.indent": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}, "sqlglot.tokens.Tokenizer.peek": {"tf": 1}}, "df": 5, "x": {"7": {"docs": {}, "df": 0, "f": {"5": {"docs": {}, "df": 0, "e": {"6": {"1": {"3": {"docs": {}, "df": 0, "d": {"0": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "f": {"7": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "1": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "f": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}, "2": {"0": {"5": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "3": {"1": {"3": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "c": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "7": {"docs": {}, "df": 0, "f": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}, "docs": {}, "df": 0}}, "4": {"0": {"2": {"docs": {}, "df": 0, "b": {"0": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "3": {"0": {"docs": {}, "df": 0, "a": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "1": {"docs": {}, "df": 0, "c": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "2": {"docs": {}, "df": 0, "e": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "4": {"9": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "5": {"3": {"5": {"2": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}}, "1": {"0": {"0": {"docs": {"sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "2": {"8": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "4": {"0": {"0": {"4": {"3": {"3": {"0": {"9": {"0": {"8": {"0": {"9": {"9": {"2": {"docs": {"sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"2": {"1": {"2": {"4": {"8": {"docs": {"sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "3": {"0": {"5": {"1": {"2": {"docs": {"sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "4": {"5": {"3": {"7": {"4": {"4": {"docs": {"sqlglot.dataframe.sql.Column.isin": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "5": {"6": {"3": {"9": {"6": {"8": {"docs": {"sqlglot.dataframe.sql.Column.between": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "8": {"9": {"0": {"4": {"0": {"docs": {"sqlglot.dataframe.sql.Column.between": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "6": {"4": {"6": {"8": {"9": {"6": {"docs": {"sqlglot.dataframe.sql.Column.over": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "7": {"4": {"0": {"4": {"3": {"2": {"docs": {"sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "8": {"7": {"9": {"9": {"2": {"0": {"docs": {"sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"5": {"6": {"2": {"0": {"8": {"docs": {"sqlglot.dataframe.sql.Window.orderBy": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "1": {"0": {"0": {"5": {"3": {"3": {"6": {"0": {"docs": {"sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"8": {"0": {"0": {"0": {"0": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "4": {"0": {"7": {"7": {"1": {"2": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "7": {"0": {"3": {"9": {"2": {"0": {"docs": {"sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "8": {"9": {"5": {"6": {"3": {"2": {"docs": {"sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"3": {"5": {"3": {"1": {"2": {"docs": {"sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "1": {"0": {"0": {"7": {"7": {"2": {"8": {"docs": {"sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "6": {"1": {"7": {"2": {"8": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "1": {"3": {"7": {"7": {"4": {"4": {"docs": {"sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "5": {"9": {"8": {"6": {"0": {"8": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"8": {"2": {"6": {"8": {"8": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"8": {"0": {"5": {"7": {"7": {"6": {"docs": {"sqlglot.dataframe.sql.Column.__init__": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "4": {"7": {"8": {"6": {"2": {"2": {"4": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"7": {"2": {"0": {"8": {"0": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}}, "df": 3}, "2": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}}, "df": 1}, "3": {"9": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 2.449489742783178}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 2}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.isin": {"tf": 2}, "sqlglot.dataframe.sql.Column.between": {"tf": 2}, "sqlglot.dataframe.sql.Column.over": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 2}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 2}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 2}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 2}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 2}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sep": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.seg": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.national_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.properties": {"tf": 2.449489742783178}, "sqlglot.generator.Generator.table_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 2}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.func": {"tf": 2}, "sqlglot.generator.Generator.expressions": {"tf": 2}, "sqlglot.helper.csv": {"tf": 1.4142135623730951}, "sqlglot.lineage.Node.__init__": {"tf": 1.4142135623730951}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}}, "df": 43}, "docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 2}, "6": {"docs": {"sqlglot.diff.ChangeDistiller.__init__": {"tf": 1.4142135623730951}}, "df": 1}, "8": {"0": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot.parse": {"tf": 10.099504938362077}, "sqlglot.parse_one": {"tf": 12.727922061357855}, "sqlglot.transpile": {"tf": 14}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 6}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 12.806248474865697}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 6}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 14.2828568570857}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 6.4031242374328485}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 5.830951894845301}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 6.324555320336759}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 6.48074069840786}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 8.426149773176359}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 8.426149773176359}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 6.324555320336759}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 6.324555320336759}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 12.041594578792296}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 9.9498743710662}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 9.9498743710662}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 7.3484692283495345}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 7.3484692283495345}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 7.14142842854285}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 7.3484692283495345}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 7.3484692283495345}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 7.3484692283495345}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 5.656854249492381}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 10.677078252031311}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 10}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 13.19090595827292}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 7.937253933193772}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 5.0990195135927845}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 8.12403840463596}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 6}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 8}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 9.327379053088816}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 6}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 6.782329983125268}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 9.16515138991168}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 8.602325267042627}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 6.164414002968976}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 6.164414002968976}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 6.164414002968976}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 6.164414002968976}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 6.164414002968976}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 6.164414002968976}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 7.0710678118654755}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 7.54983443527075}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 8.831760866327848}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 9.797958971132712}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 8.54400374531753}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 8.246211251235321}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 8.246211251235321}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 6.48074069840786}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 5.656854249492381}, "sqlglot.dataframe.sql.Column.copy": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 6.6332495807108}, "sqlglot.dataframe.sql.Column.sql": {"tf": 4.242640687119285}, "sqlglot.dataframe.sql.Column.alias": {"tf": 6}, "sqlglot.dataframe.sql.Column.asc": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.desc": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.when": {"tf": 7.937253933193772}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 6}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.cast": {"tf": 6.6332495807108}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 8}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 8}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 6}, "sqlglot.dataframe.sql.Column.like": {"tf": 4.242640687119285}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 4.242640687119285}, "sqlglot.dataframe.sql.Column.substr": {"tf": 9.9498743710662}, "sqlglot.dataframe.sql.Column.isin": {"tf": 7.937253933193772}, "sqlglot.dataframe.sql.Column.between": {"tf": 8.660254037844387}, "sqlglot.dataframe.sql.Column.over": {"tf": 7.211102550927978}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 10.677078252031311}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 10.908712114635714}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 12.083045973594572}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 9}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 9}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 6.6332495807108}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 6.6332495807108}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 5.744562646538029}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 3.1622776601683795}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 4.242640687119285}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 9}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 9}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 6.6332495807108}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 6.6332495807108}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 6}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 10}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 5.830951894845301}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 4.795831523312719}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 6.557438524302}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 7.615773105863909}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 7.745966692414834}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 4.47213595499958}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 4.47213595499958}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 4.47213595499958}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 5.291502622129181}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 5.291502622129181}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 5.291502622129181}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 5.291502622129181}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.safeconcat_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 5.744562646538029}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.oncluster_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"tf": 8.48528137423857}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 9.273618495495704}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 7.615773105863909}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 10.099504938362077}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 5.5677643628300215}, "sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 5.744562646538029}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 5.0990195135927845}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 5.0990195135927845}, "sqlglot.dialects.dialect.rename_func": {"tf": 6.928203230275509}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.if_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 7.416198487095663}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 7.416198487095663}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 8.48528137423857}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 8.660254037844387}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 8}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 6.164414002968976}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 6.164414002968976}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 4.898979485566356}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.min_or_least": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.trim_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 4}, "sqlglot.dialects.dialect.concat_to_dpipe_sql": {"tf": 7.416198487095663}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 9.539392014169456}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 4.47213595499958}, "sqlglot.dialects.duckdb.DuckDB.Generator.interval_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 8}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 5.291502622129181}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 5.744562646538029}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 9.16515138991168}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 5.291502622129181}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 5.291502622129181}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 5.291502622129181}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 5.477225575051661}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 6.6332495807108}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"tf": 8.48528137423857}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 7.416198487095663}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 5.291502622129181}, "sqlglot.diff.Insert.__init__": {"tf": 4.47213595499958}, "sqlglot.diff.Remove.__init__": {"tf": 4.47213595499958}, "sqlglot.diff.Move.__init__": {"tf": 4.47213595499958}, "sqlglot.diff.Update.__init__": {"tf": 6.164414002968976}, "sqlglot.diff.Keep.__init__": {"tf": 6.164414002968976}, "sqlglot.diff.diff": {"tf": 12.96148139681572}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 5.656854249492381}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 12.727922061357855}, "sqlglot.errors.ParseError.__init__": {"tf": 6.48074069840786}, "sqlglot.errors.ParseError.new": {"tf": 13.038404810405298}, "sqlglot.errors.concat_messages": {"tf": 5.385164807134504}, "sqlglot.errors.merge_errors": {"tf": 6.48074069840786}, "sqlglot.executor.execute": {"tf": 12.727922061357855}, "sqlglot.executor.context.Context.__init__": {"tf": 7.416198487095663}, "sqlglot.executor.context.Context.eval": {"tf": 3.7416573867739413}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 3.7416573867739413}, "sqlglot.executor.context.Context.add_columns": {"tf": 4.69041575982343}, "sqlglot.executor.context.Context.table_iter": {"tf": 7.681145747868608}, "sqlglot.executor.context.Context.filter": {"tf": 4}, "sqlglot.executor.context.Context.sort": {"tf": 4}, "sqlglot.executor.context.Context.set_row": {"tf": 4.47213595499958}, "sqlglot.executor.context.Context.set_index": {"tf": 4.47213595499958}, "sqlglot.executor.context.Context.set_range": {"tf": 5.291502622129181}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 2.8284271247461903}, "sqlglot.executor.env.filter_nulls": {"tf": 4.242640687119285}, "sqlglot.executor.env.null_if_any": {"tf": 3.4641016151377544}, "sqlglot.executor.env.str_position": {"tf": 4.69041575982343}, "sqlglot.executor.env.substring": {"tf": 5.0990195135927845}, "sqlglot.executor.env.cast": {"tf": 3.7416573867739413}, "sqlglot.executor.env.ordered": {"tf": 4.242640687119285}, "sqlglot.executor.env.interval": {"tf": 3.7416573867739413}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 4.47213595499958}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 3.7416573867739413}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 3.7416573867739413}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 3.7416573867739413}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 3.7416573867739413}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 3.7416573867739413}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 4.242640687119285}, "sqlglot.executor.python.PythonExecutor.static": {"tf": 3.1622776601683795}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 3.7416573867739413}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 3.7416573867739413}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 4.242640687119285}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 4.795831523312719}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 4.69041575982343}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 4.242640687119285}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 4.242640687119285}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 4.242640687119285}, "sqlglot.executor.table.Table.__init__": {"tf": 4.898979485566356}, "sqlglot.executor.table.Table.add_columns": {"tf": 4.69041575982343}, "sqlglot.executor.table.Table.append": {"tf": 3.7416573867739413}, "sqlglot.executor.table.Table.pop": {"tf": 3.1622776601683795}, "sqlglot.executor.table.TableIter.__init__": {"tf": 2.8284271247461903}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 2.8284271247461903}, "sqlglot.executor.table.RowReader.__init__": {"tf": 4}, "sqlglot.executor.table.ensure_tables": {"tf": 5.744562646538029}, "sqlglot.expressions.Expression.__init__": {"tf": 3.7416573867739413}, "sqlglot.expressions.Expression.text": {"tf": 4}, "sqlglot.expressions.Expression.copy": {"tf": 3.1622776601683795}, "sqlglot.expressions.Expression.add_comments": {"tf": 5.291502622129181}, "sqlglot.expressions.Expression.append": {"tf": 5.291502622129181}, "sqlglot.expressions.Expression.set": {"tf": 5.291502622129181}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 5.744562646538029}, "sqlglot.expressions.Expression.find": {"tf": 7.0710678118654755}, "sqlglot.expressions.Expression.find_all": {"tf": 7.0710678118654755}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 6}, "sqlglot.expressions.Expression.root": {"tf": 4.47213595499958}, "sqlglot.expressions.Expression.walk": {"tf": 5.0990195135927845}, "sqlglot.expressions.Expression.dfs": {"tf": 5.830951894845301}, "sqlglot.expressions.Expression.bfs": {"tf": 4.242640687119285}, "sqlglot.expressions.Expression.unnest": {"tf": 3.1622776601683795}, "sqlglot.expressions.Expression.unalias": {"tf": 3.1622776601683795}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 3.1622776601683795}, "sqlglot.expressions.Expression.flatten": {"tf": 4.242640687119285}, "sqlglot.expressions.Expression.sql": {"tf": 9.055385138137417}, "sqlglot.expressions.Expression.transform": {"tf": 5.830951894845301}, "sqlglot.expressions.Expression.replace": {"tf": 3.7416573867739413}, "sqlglot.expressions.Expression.pop": {"tf": 4.47213595499958}, "sqlglot.expressions.Expression.assert_is": {"tf": 5.477225575051661}, "sqlglot.expressions.Expression.error_messages": {"tf": 6}, "sqlglot.expressions.Expression.dump": {"tf": 3.1622776601683795}, "sqlglot.expressions.Expression.load": {"tf": 3.7416573867739413}, "sqlglot.expressions.Condition.and_": {"tf": 11.789826122551595}, "sqlglot.expressions.Condition.or_": {"tf": 11.789826122551595}, "sqlglot.expressions.Condition.not_": {"tf": 4.898979485566356}, "sqlglot.expressions.Condition.as_": {"tf": 12.12435565298214}, "sqlglot.expressions.Condition.isin": {"tf": 9.591663046625438}, "sqlglot.expressions.Condition.between": {"tf": 7.810249675906654}, "sqlglot.expressions.Condition.is_": {"tf": 6.928203230275509}, "sqlglot.expressions.Condition.like": {"tf": 6.928203230275509}, "sqlglot.expressions.Condition.ilike": {"tf": 6.928203230275509}, "sqlglot.expressions.Condition.eq": {"tf": 5.291502622129181}, "sqlglot.expressions.Condition.neq": {"tf": 5.291502622129181}, "sqlglot.expressions.Condition.rlike": {"tf": 6.928203230275509}, "sqlglot.expressions.Unionable.union": {"tf": 11.489125293076057}, "sqlglot.expressions.Unionable.intersect": {"tf": 11.489125293076057}, "sqlglot.expressions.Unionable.except_": {"tf": 11.489125293076057}, "sqlglot.expressions.Column.to_dot": {"tf": 4.47213595499958}, "sqlglot.expressions.Delete.delete": {"tf": 11.489125293076057}, "sqlglot.expressions.Delete.where": {"tf": 12.409673645990857}, "sqlglot.expressions.Delete.returning": {"tf": 11.489125293076057}, "sqlglot.expressions.Insert.with_": {"tf": 13.96424004376894}, "sqlglot.expressions.Literal.number": {"tf": 4.898979485566356}, "sqlglot.expressions.Literal.string": {"tf": 4.898979485566356}, "sqlglot.expressions.Join.on": {"tf": 12.409673645990857}, "sqlglot.expressions.Join.using": {"tf": 12.409673645990857}, "sqlglot.expressions.Properties.from_dict": {"tf": 5.291502622129181}, "sqlglot.expressions.Tuple.isin": {"tf": 9.591663046625438}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 8.602325267042627}, "sqlglot.expressions.Subqueryable.limit": {"tf": 11.704699910719626}, "sqlglot.expressions.Subqueryable.with_": {"tf": 13.96424004376894}, "sqlglot.expressions.Union.limit": {"tf": 11.704699910719626}, "sqlglot.expressions.Union.select": {"tf": 12.409673645990857}, "sqlglot.expressions.Select.from_": {"tf": 11.489125293076057}, "sqlglot.expressions.Select.group_by": {"tf": 12.409673645990857}, "sqlglot.expressions.Select.order_by": {"tf": 12.409673645990857}, "sqlglot.expressions.Select.sort_by": {"tf": 12.409673645990857}, "sqlglot.expressions.Select.cluster_by": {"tf": 12.409673645990857}, "sqlglot.expressions.Select.limit": {"tf": 11.704699910719626}, "sqlglot.expressions.Select.offset": {"tf": 11.704699910719626}, "sqlglot.expressions.Select.select": {"tf": 12.409673645990857}, "sqlglot.expressions.Select.lateral": {"tf": 12.409673645990857}, "sqlglot.expressions.Select.join": {"tf": 17.46424919657298}, "sqlglot.expressions.Select.where": {"tf": 12.409673645990857}, "sqlglot.expressions.Select.having": {"tf": 12.409673645990857}, "sqlglot.expressions.Select.window": {"tf": 12.409673645990857}, "sqlglot.expressions.Select.qualify": {"tf": 12.409673645990857}, "sqlglot.expressions.Select.distinct": {"tf": 9.219544457292887}, "sqlglot.expressions.Select.ctas": {"tf": 12.328828005937952}, "sqlglot.expressions.Select.lock": {"tf": 7.14142842854285}, "sqlglot.expressions.Select.hint": {"tf": 11.269427669584644}, "sqlglot.expressions.Subquery.unnest": {"tf": 3.1622776601683795}, "sqlglot.expressions.DataType.build": {"tf": 11.357816691600547}, "sqlglot.expressions.DataType.is_type": {"tf": 7.3484692283495345}, "sqlglot.expressions.Dot.build": {"tf": 6.557438524302}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 3.1622776601683795}, "sqlglot.expressions.Func.from_arg_list": {"tf": 3.7416573867739413}, "sqlglot.expressions.Func.sql_names": {"tf": 3.1622776601683795}, "sqlglot.expressions.Func.sql_name": {"tf": 3.1622776601683795}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 3.1622776601683795}, "sqlglot.expressions.Case.when": {"tf": 9.848857801796104}, "sqlglot.expressions.Case.else_": {"tf": 8.366600265340756}, "sqlglot.expressions.Cast.is_type": {"tf": 7.3484692283495345}, "sqlglot.expressions.maybe_parse": {"tf": 14.866068747318506}, "sqlglot.expressions.union": {"tf": 12.409673645990857}, "sqlglot.expressions.intersect": {"tf": 12.409673645990857}, "sqlglot.expressions.except_": {"tf": 12.409673645990857}, "sqlglot.expressions.select": {"tf": 10.677078252031311}, "sqlglot.expressions.from_": {"tf": 10.583005244258363}, "sqlglot.expressions.update": {"tf": 13.892443989449804}, "sqlglot.expressions.delete": {"tf": 13.711309200802088}, "sqlglot.expressions.insert": {"tf": 14.594519519326424}, "sqlglot.expressions.condition": {"tf": 11.269427669584644}, "sqlglot.expressions.and_": {"tf": 11.575836902790225}, "sqlglot.expressions.or_": {"tf": 11.575836902790225}, "sqlglot.expressions.not_": {"tf": 11.269427669584644}, "sqlglot.expressions.paren": {"tf": 7.615773105863909}, "sqlglot.expressions.to_identifier": {"tf": 5.0990195135927845}, "sqlglot.expressions.to_interval": {"tf": 6.164414002968976}, "sqlglot.expressions.to_table": {"tf": 11.045361017187261}, "sqlglot.expressions.to_column": {"tf": 6.708203932499369}, "sqlglot.expressions.alias_": {"tf": 14.142135623730951}, "sqlglot.expressions.subquery": {"tf": 12.24744871391589}, "sqlglot.expressions.column": {"tf": 13.114877048604}, "sqlglot.expressions.cast": {"tf": 9.433981132056603}, "sqlglot.expressions.table_": {"tf": 13.114877048604}, "sqlglot.expressions.values": {"tf": 10.44030650891055}, "sqlglot.expressions.var": {"tf": 6.928203230275509}, "sqlglot.expressions.rename_table": {"tf": 7.745966692414834}, "sqlglot.expressions.convert": {"tf": 6.164414002968976}, "sqlglot.expressions.replace_children": {"tf": 6.928203230275509}, "sqlglot.expressions.column_table_names": {"tf": 5.385164807134504}, "sqlglot.expressions.table_name": {"tf": 5.385164807134504}, "sqlglot.expressions.replace_tables": {"tf": 7.211102550927978}, "sqlglot.expressions.replace_placeholders": {"tf": 6.855654600401044}, "sqlglot.expressions.expand": {"tf": 8.660254037844387}, "sqlglot.expressions.func": {"tf": 10.04987562112089}, "sqlglot.expressions.true": {"tf": 4.123105625617661}, "sqlglot.expressions.false": {"tf": 4.123105625617661}, "sqlglot.expressions.null": {"tf": 4.123105625617661}, "sqlglot.generator.Generator.__init__": {"tf": 14.7648230602334}, "sqlglot.generator.Generator.generate": {"tf": 7.937253933193772}, "sqlglot.generator.Generator.unsupported": {"tf": 4.47213595499958}, "sqlglot.generator.Generator.sep": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.seg": {"tf": 6}, "sqlglot.generator.Generator.pad_comment": {"tf": 4.47213595499958}, "sqlglot.generator.Generator.maybe_comment": {"tf": 8.54400374531753}, "sqlglot.generator.Generator.wrap": {"tf": 5.744562646538029}, "sqlglot.generator.Generator.no_identify": {"tf": 6.6332495807108}, "sqlglot.generator.Generator.normalize_func": {"tf": 4.47213595499958}, "sqlglot.generator.Generator.indent": {"tf": 9.327379053088816}, "sqlglot.generator.Generator.sql": {"tf": 8.94427190999916}, "sqlglot.generator.Generator.uncache_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.cache_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.characterset_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.column_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.columndef_sql": {"tf": 6.6332495807108}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 4.242640687119285}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 5.477225575051661}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.createable_sql": {"tf": 8.48528137423857}, "sqlglot.generator.Generator.create_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.clone_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.describe_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 6}, "sqlglot.generator.Generator.with_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.cte_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.datatype_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.directory_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.delete_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.drop_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.except_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.except_op": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.fetch_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.filter_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.hint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.index_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.identifier_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.national_sql": {"tf": 6.6332495807108}, "sqlglot.generator.Generator.partition_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.properties_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.root_properties": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.properties": {"tf": 9.797958971132712}, "sqlglot.generator.Generator.with_properties": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.locate_properties": {"tf": 7.937253933193772}, "sqlglot.generator.Generator.property_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.insert_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.intersect_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.intersect_op": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.introducer_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.returning_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.table_sql": {"tf": 6.6332495807108}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 7.615773105863909}, "sqlglot.generator.Generator.pivot_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.tuple_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.update_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.values_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.var_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.into_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.from_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.group_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.having_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.join_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.lambda_sql": {"tf": 6.855654600401044}, "sqlglot.generator.Generator.lateral_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.limit_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.offset_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.setitem_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.set_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.pragma_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.lock_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.literal_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.null_sql": {"tf": 4.47213595499958}, "sqlglot.generator.Generator.boolean_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.order_sql": {"tf": 6.48074069840786}, "sqlglot.generator.Generator.cluster_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.distribute_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.sort_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.ordered_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.query_modifiers": {"tf": 6.164414002968976}, "sqlglot.generator.Generator.offset_limit_modifiers": {"tf": 9.16515138991168}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 5.744562646538029}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 5.744562646538029}, "sqlglot.generator.Generator.select_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.schema_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.schema_columns_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.star_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.parameter_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.subquery_sql": {"tf": 6.6332495807108}, "sqlglot.generator.Generator.qualify_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.union_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.union_op": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.unnest_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.where_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.window_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 6.557438524302}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.between_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.bracket_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.all_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.any_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.exists_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.case_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.constraint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.extract_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.trim_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.safeconcat_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.check_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.if_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.openjson_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.in_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.interval_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.return_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.reference_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.paren_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.neg_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.not_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.alias_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.aliases_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.add_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.and_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.connector_sql": {"tf": 6}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.cast_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.collate_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.command_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.comment_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.transaction_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.commit_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.rollback_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.renametable_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.altertable_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.distinct_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.safedpipe_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.div_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.distance_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.dot_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.eq_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.escape_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.glob_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.gt_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.gte_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.ilike_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.is_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.like_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.likeany_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.similarto_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.lt_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.lte_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.mod_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.mul_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.neq_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.or_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.slice_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.sub_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.trycast_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.use_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.binary": {"tf": 6}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.func": {"tf": 9.486832980505138}, "sqlglot.generator.Generator.format_args": {"tf": 6.708203932499369}, "sqlglot.generator.Generator.text_width": {"tf": 4.47213595499958}, "sqlglot.generator.Generator.format_time": {"tf": 5.744562646538029}, "sqlglot.generator.Generator.expressions": {"tf": 12.165525060596439}, "sqlglot.generator.Generator.op_expressions": {"tf": 7.3484692283495345}, "sqlglot.generator.Generator.naked_property": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.set_operation": {"tf": 6}, "sqlglot.generator.Generator.tag_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.token_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.when_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.merge_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.tochar_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.oncluster_sql": {"tf": 5.291502622129181}, "sqlglot.generator.cached_generator": {"tf": 7.3484692283495345}, "sqlglot.helper.seq_get": {"tf": 6.164414002968976}, "sqlglot.helper.ensure_list": {"tf": 3.1622776601683795}, "sqlglot.helper.ensure_collection": {"tf": 3.1622776601683795}, "sqlglot.helper.csv": {"tf": 5.830951894845301}, "sqlglot.helper.subclasses": {"tf": 9.38083151964686}, "sqlglot.helper.apply_index_offset": {"tf": 7.937253933193772}, "sqlglot.helper.camel_to_snake_case": {"tf": 4}, "sqlglot.helper.while_changing": {"tf": 7.483314773547883}, "sqlglot.helper.tsort": {"tf": 6.324555320336759}, "sqlglot.helper.open_file": {"tf": 3.872983346207417}, "sqlglot.helper.csv_reader": {"tf": 4.898979485566356}, "sqlglot.helper.find_new_name": {"tf": 5.385164807134504}, "sqlglot.helper.name_sequence": {"tf": 4.795831523312719}, "sqlglot.helper.object_to_dict": {"tf": 4.69041575982343}, "sqlglot.helper.split_num_words": {"tf": 7.615773105863909}, "sqlglot.helper.is_iterable": {"tf": 4}, "sqlglot.helper.flatten": {"tf": 6.082762530298219}, "sqlglot.helper.dict_depth": {"tf": 4}, "sqlglot.helper.first": {"tf": 5}, "sqlglot.helper.case_sensitive": {"tf": 8.54400374531753}, "sqlglot.helper.should_identify": {"tf": 9.643650760992955}, "sqlglot.lineage.Node.__init__": {"tf": 9.899494936611665}, "sqlglot.lineage.Node.walk": {"tf": 5}, "sqlglot.lineage.Node.to_html": {"tf": 5.0990195135927845}, "sqlglot.lineage.lineage": {"tf": 14.422205101855956}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 10.44030650891055}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 13.2664991614216}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 11.74734012447073}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 4.898979485566356}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 5.744562646538029}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 5.656854249492381}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 5.656854249492381}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 5.744562646538029}, "sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 5.744562646538029}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 3.1622776601683795}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 3.1622776601683795}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 3.1622776601683795}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 3.1622776601683795}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 4.242640687119285}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 4.242640687119285}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 4.242640687119285}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 4.242640687119285}, "sqlglot.optimizer.normalize.normalize": {"tf": 7.280109889280518}, "sqlglot.optimizer.normalize.normalized": {"tf": 4.242640687119285}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 4.242640687119285}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 4.69041575982343}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 9.1104335791443}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 3.1622776601683795}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 3.1622776601683795}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 3.1622776601683795}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 3.7416573867739413}, "sqlglot.optimizer.optimizer.optimize": {"tf": 21.071307505705477}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 3.1622776601683795}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 4.242640687119285}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 4.242640687119285}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 4.242640687119285}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 4.242640687119285}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 3.7416573867739413}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 2.6457513110645907}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 5.0990195135927845}, "sqlglot.optimizer.qualify.qualify": {"tf": 16.822603841260722}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 9.746794344808963}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 3.1622776601683795}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 9.899494936611665}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 5.0990195135927845}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 5.744562646538029}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 4.69041575982343}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 9.433981132056603}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 8.06225774829855}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 3.1622776601683795}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 5.656854249492381}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 4.242640687119285}, "sqlglot.optimizer.scope.Scope.find": {"tf": 4.898979485566356}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 4.898979485566356}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 4.242640687119285}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 3.7416573867739413}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 4.242640687119285}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 4.242640687119285}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 3.7416573867739413}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 3.1622776601683795}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 3.1622776601683795}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 6.48074069840786}, "sqlglot.optimizer.scope.build_scope": {"tf": 6.48074069840786}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 4.242640687119285}, "sqlglot.optimizer.simplify.simplify": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 5.744562646538029}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.flatten": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 4.242640687119285}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 4.242640687119285}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 4.69041575982343}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 4.242640687119285}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 4.242640687119285}, "sqlglot.optimizer.simplify.simplify_parens": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.always_true": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.is_complement": {"tf": 3.7416573867739413}, "sqlglot.optimizer.simplify.is_false": {"tf": 4.898979485566356}, "sqlglot.optimizer.simplify.is_null": {"tf": 4.898979485566356}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 4.242640687119285}, "sqlglot.optimizer.simplify.extract_date": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.date_literal": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 3.1622776601683795}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 3.1622776601683795}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 4.242640687119285}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 4.69041575982343}, "sqlglot.parser.parse_var_map": {"tf": 6.082762530298219}, "sqlglot.parser.parse_like": {"tf": 6.082762530298219}, "sqlglot.parser.binary_range_parser": {"tf": 8.660254037844387}, "sqlglot.parser.Parser.__init__": {"tf": 7.874007874011811}, "sqlglot.parser.Parser.reset": {"tf": 3.1622776601683795}, "sqlglot.parser.Parser.parse": {"tf": 8.426149773176359}, "sqlglot.parser.Parser.parse_into": {"tf": 11.313708498984761}, "sqlglot.parser.Parser.check_errors": {"tf": 3.4641016151377544}, "sqlglot.parser.Parser.raise_error": {"tf": 6.855654600401044}, "sqlglot.parser.Parser.expression": {"tf": 7.810249675906654}, "sqlglot.parser.Parser.validate_expression": {"tf": 6.557438524302}, "sqlglot.planner.Plan.__init__": {"tf": 4.47213595499958}, "sqlglot.planner.Step.from_expression": {"tf": 8.602325267042627}, "sqlglot.planner.Step.add_dependency": {"tf": 5.291502622129181}, "sqlglot.planner.Step.to_s": {"tf": 5.0990195135927845}, "sqlglot.planner.Scan.from_expression": {"tf": 8.602325267042627}, "sqlglot.planner.Join.from_joins": {"tf": 8.888194417315589}, "sqlglot.planner.SetOperation.__init__": {"tf": 8.306623862918075}, "sqlglot.planner.SetOperation.from_expression": {"tf": 8.602325267042627}, "sqlglot.schema.Schema.add_table": {"tf": 12.36931687685298}, "sqlglot.schema.Schema.column_names": {"tf": 10.816653826391969}, "sqlglot.schema.Schema.get_column_type": {"tf": 11.045361017187261}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 4.795831523312719}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 5.744562646538029}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 8.48528137423857}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 7.810249675906654}, "sqlglot.schema.MappingSchema.__init__": {"tf": 11}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 6.164414002968976}, "sqlglot.schema.MappingSchema.copy": {"tf": 5.0990195135927845}, "sqlglot.schema.MappingSchema.add_table": {"tf": 12.36931687685298}, "sqlglot.schema.MappingSchema.column_names": {"tf": 10.816653826391969}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 11.045361017187261}, "sqlglot.schema.ensure_schema": {"tf": 7.681145747868608}, "sqlglot.schema.ensure_column_mapping": {"tf": 7.615773105863909}, "sqlglot.schema.flatten_schema": {"tf": 7.54983443527075}, "sqlglot.schema.nested_get": {"tf": 7.615773105863909}, "sqlglot.schema.nested_set": {"tf": 6.082762530298219}, "sqlglot.serde.dump": {"tf": 11.489125293076057}, "sqlglot.serde.load": {"tf": 11.489125293076057}, "sqlglot.time.format_time": {"tf": 7.810249675906654}, "sqlglot.tokens.Token.__init__": {"tf": 10.535653752852738}, "sqlglot.tokens.Token.number": {"tf": 5.291502622129181}, "sqlglot.tokens.Token.string": {"tf": 5.291502622129181}, "sqlglot.tokens.Token.identifier": {"tf": 5.291502622129181}, "sqlglot.tokens.Token.var": {"tf": 5.291502622129181}, "sqlglot.tokens.Tokenizer.reset": {"tf": 3.4641016151377544}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 5.744562646538029}, "sqlglot.tokens.Tokenizer.peek": {"tf": 5.0990195135927845}, "sqlglot.transforms.unalias_group": {"tf": 5.744562646538029}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 5.744562646538029}, "sqlglot.transforms.eliminate_qualify": {"tf": 5.744562646538029}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 5.744562646538029}, "sqlglot.transforms.unnest_to_explode": {"tf": 5.744562646538029}, "sqlglot.transforms.explode_to_unnest": {"tf": 5.744562646538029}, "sqlglot.transforms.remove_target_from_merge": {"tf": 5.744562646538029}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 5.744562646538029}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 5.744562646538029}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 5.744562646538029}, "sqlglot.transforms.preprocess": {"tf": 8.774964387392123}, "sqlglot.trie.new_trie": {"tf": 6.708203932499369}, "sqlglot.trie.in_trie": {"tf": 6.244997998398398}}, "df": 795, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.generator.Generator.seg": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 108, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.parse": {"tf": 1.7320508075688772}, "sqlglot.parse_one": {"tf": 2.23606797749979}, "sqlglot.transpile": {"tf": 2.23606797749979}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.safeconcat_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.oncluster_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1}, "sqlglot.dialects.dialect.rename_func": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.if_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.concat_to_dpipe_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.Keep.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 3}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 3}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.executor.execute": {"tf": 2.23606797749979}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1.4142135623730951}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 2}, "sqlglot.expressions.Condition.or_": {"tf": 2}, "sqlglot.expressions.Condition.as_": {"tf": 2}, "sqlglot.expressions.Condition.isin": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.like": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.ilike": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.eq": {"tf": 1}, "sqlglot.expressions.Condition.neq": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 2}, "sqlglot.expressions.Unionable.intersect": {"tf": 2}, "sqlglot.expressions.Unionable.except_": {"tf": 2}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 2}, "sqlglot.expressions.Delete.where": {"tf": 2}, "sqlglot.expressions.Delete.returning": {"tf": 2}, "sqlglot.expressions.Insert.with_": {"tf": 2.23606797749979}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 2}, "sqlglot.expressions.Join.using": {"tf": 2}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.limit": {"tf": 2}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2.23606797749979}, "sqlglot.expressions.Union.limit": {"tf": 2}, "sqlglot.expressions.Union.select": {"tf": 2}, "sqlglot.expressions.Select.from_": {"tf": 2}, "sqlglot.expressions.Select.group_by": {"tf": 2}, "sqlglot.expressions.Select.order_by": {"tf": 2}, "sqlglot.expressions.Select.sort_by": {"tf": 2}, "sqlglot.expressions.Select.cluster_by": {"tf": 2}, "sqlglot.expressions.Select.limit": {"tf": 2}, "sqlglot.expressions.Select.offset": {"tf": 2}, "sqlglot.expressions.Select.select": {"tf": 2}, "sqlglot.expressions.Select.lateral": {"tf": 2}, "sqlglot.expressions.Select.join": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.where": {"tf": 2}, "sqlglot.expressions.Select.having": {"tf": 2}, "sqlglot.expressions.Select.window": {"tf": 2}, "sqlglot.expressions.Select.qualify": {"tf": 2}, "sqlglot.expressions.Select.distinct": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 2}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 2}, "sqlglot.expressions.DataType.build": {"tf": 2.23606797749979}, "sqlglot.expressions.DataType.is_type": {"tf": 1.4142135623730951}, "sqlglot.expressions.Dot.build": {"tf": 1.4142135623730951}, "sqlglot.expressions.Case.when": {"tf": 1.7320508075688772}, "sqlglot.expressions.Case.else_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Cast.is_type": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 2.449489742783178}, "sqlglot.expressions.union": {"tf": 2.23606797749979}, "sqlglot.expressions.intersect": {"tf": 2.23606797749979}, "sqlglot.expressions.except_": {"tf": 2.23606797749979}, "sqlglot.expressions.select": {"tf": 2}, "sqlglot.expressions.from_": {"tf": 2}, "sqlglot.expressions.update": {"tf": 2.449489742783178}, "sqlglot.expressions.delete": {"tf": 2.449489742783178}, "sqlglot.expressions.insert": {"tf": 2.449489742783178}, "sqlglot.expressions.condition": {"tf": 2}, "sqlglot.expressions.and_": {"tf": 2}, "sqlglot.expressions.or_": {"tf": 2}, "sqlglot.expressions.not_": {"tf": 2}, "sqlglot.expressions.paren": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_interval": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 2}, "sqlglot.expressions.to_column": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 2.23606797749979}, "sqlglot.expressions.subquery": {"tf": 2.23606797749979}, "sqlglot.expressions.column": {"tf": 2.23606797749979}, "sqlglot.expressions.cast": {"tf": 2}, "sqlglot.expressions.table_": {"tf": 2.23606797749979}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.var": {"tf": 1.4142135623730951}, "sqlglot.expressions.rename_table": {"tf": 1.7320508075688772}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1.7320508075688772}, "sqlglot.expressions.func": {"tf": 1.7320508075688772}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.expressions.false": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.generator.Generator.wrap": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.createable_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.clone_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1}, "sqlglot.generator.Generator.returning_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1}, "sqlglot.generator.Generator.set_sql": {"tf": 1}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.offset_limit_modifiers": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_columns_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.safeconcat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.comment_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.safedpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}, "sqlglot.generator.Generator.naked_property": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.oncluster_sql": {"tf": 1}, "sqlglot.generator.cached_generator": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1.4142135623730951}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1.4142135623730951}, "sqlglot.helper.should_identify": {"tf": 1.4142135623730951}, "sqlglot.lineage.Node.__init__": {"tf": 1.7320508075688772}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 2.6457513110645907}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 2}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.build_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.is_false": {"tf": 1}, "sqlglot.optimizer.simplify.is_null": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1.4142135623730951}, "sqlglot.parser.parse_like": {"tf": 1.4142135623730951}, "sqlglot.parser.binary_range_parser": {"tf": 2}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 2}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.7320508075688772}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1.7320508075688772}, "sqlglot.planner.Join.from_joins": {"tf": 1.7320508075688772}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.7320508075688772}, "sqlglot.schema.Schema.add_table": {"tf": 2}, "sqlglot.schema.Schema.column_names": {"tf": 1.7320508075688772}, "sqlglot.schema.Schema.get_column_type": {"tf": 2.23606797749979}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 2}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 2.23606797749979}, "sqlglot.schema.ensure_schema": {"tf": 1.4142135623730951}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.4142135623730951}, "sqlglot.transforms.unnest_to_explode": {"tf": 1.4142135623730951}, "sqlglot.transforms.explode_to_unnest": {"tf": 1.4142135623730951}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1.4142135623730951}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1.4142135623730951}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1.4142135623730951}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 1.4142135623730951}, "sqlglot.transforms.preprocess": {"tf": 2}}, "df": 599}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {"sqlglot.generator.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}}, "df": 2}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.parse": {"tf": 1.4142135623730951}, "sqlglot.parse_one": {"tf": 2}, "sqlglot.transpile": {"tf": 2}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 2}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 2.23606797749979}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 2}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.safeconcat_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.oncluster_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1}, "sqlglot.dialects.dialect.rename_func": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.concat_to_dpipe_sql": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1.4142135623730951}, "sqlglot.errors.ParseError.new": {"tf": 2.449489742783178}, "sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.add_comments": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.as_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Condition.like": {"tf": 1}, "sqlglot.expressions.Condition.ilike": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.window": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.qualify": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.hint": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.build": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1.4142135623730951}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 2.23606797749979}, "sqlglot.expressions.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 2}, "sqlglot.expressions.delete": {"tf": 2}, "sqlglot.expressions.insert": {"tf": 2}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 2}, "sqlglot.expressions.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.column": {"tf": 2}, "sqlglot.expressions.cast": {"tf": 1.4142135623730951}, "sqlglot.expressions.table_": {"tf": 2}, "sqlglot.expressions.values": {"tf": 1.7320508075688772}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.unsupported": {"tf": 1}, "sqlglot.generator.Generator.sep": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.seg": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.pad_comment": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.wrap": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.no_identify": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.normalize_func": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.indent": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.createable_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.clone_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 2}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1}, "sqlglot.generator.Generator.returning_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1}, "sqlglot.generator.Generator.set_sql": {"tf": 1}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.null_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_columns_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.safeconcat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.comment_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.safedpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 2.23606797749979}, "sqlglot.generator.Generator.format_args": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 2.23606797749979}, "sqlglot.generator.Generator.op_expressions": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.naked_property": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.oncluster_sql": {"tf": 1}, "sqlglot.generator.cached_generator": {"tf": 1.4142135623730951}, "sqlglot.helper.csv": {"tf": 1.7320508075688772}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1.4142135623730951}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1.7320508075688772}, "sqlglot.helper.name_sequence": {"tf": 1.4142135623730951}, "sqlglot.helper.split_num_words": {"tf": 1.7320508075688772}, "sqlglot.helper.case_sensitive": {"tf": 1.4142135623730951}, "sqlglot.helper.should_identify": {"tf": 1.7320508075688772}, "sqlglot.lineage.Node.__init__": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 2.23606797749979}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1.7320508075688772}, "sqlglot.schema.Schema.column_names": {"tf": 1.7320508075688772}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_get": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_set": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}, "sqlglot.time.format_time": {"tf": 2}, "sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.tokens.Tokenizer.peek": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 528, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}}, "df": 2}}}}}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}}, "df": 3}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}}, "df": 3}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.star_sql": {"tf": 1}}, "df": 1, "t": {"docs": {"sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 9, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.Column.substr": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.parser.parse_var_map": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Join.from_joins": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}}, "df": 12}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.safeconcat_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.oncluster_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.dialects.dialect.concat_to_dpipe_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.executor.context.Context.eval": {"tf": 1}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 1}, "sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.executor.context.Context.filter": {"tf": 1}, "sqlglot.executor.context.Context.sort": {"tf": 1}, "sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.static": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.append": {"tf": 1}, "sqlglot.executor.table.Table.pop": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.add_comments": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Condition.like": {"tf": 1}, "sqlglot.expressions.Condition.ilike": {"tf": 1}, "sqlglot.expressions.Condition.eq": {"tf": 1}, "sqlglot.expressions.Condition.neq": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.unsupported": {"tf": 1}, "sqlglot.generator.Generator.sep": {"tf": 1}, "sqlglot.generator.Generator.seg": {"tf": 1}, "sqlglot.generator.Generator.pad_comment": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.generator.Generator.wrap": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.generator.Generator.normalize_func": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.createable_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.clone_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1}, "sqlglot.generator.Generator.returning_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1}, "sqlglot.generator.Generator.set_sql": {"tf": 1}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.null_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_columns_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.safeconcat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.comment_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.safedpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.text_width": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}, "sqlglot.generator.Generator.naked_property": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.oncluster_sql": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.parser.Parser.reset": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.tokens.Tokenizer.peek": {"tf": 1}}, "df": 560}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1.4142135623730951}}, "df": 26, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}}, "df": 1}}}}}}}}, "q": {"docs": {"sqlglot.helper.seq_get": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 14}}}}}}, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}}, "df": 2, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1.4142135623730951}}, "df": 2}}, "p": {"docs": {"sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.sep": {"tf": 1}, "sqlglot.generator.Generator.seg": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 13}, "t": {"docs": {"sqlglot.generator.Generator.set_sql": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}}, "df": 3, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.generator.Generator.setitem_sql": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_columns_sql": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1.7320508075688772}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 2}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 2}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 2.23606797749979}, "sqlglot.schema.flatten_schema": {"tf": 1}}, "df": 19}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.build_scope": {"tf": 1.4142135623730951}}, "df": 9, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}}, "df": 1}}}}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}}, "df": 1}}}}}}, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.safeconcat_sql": {"tf": 1}, "sqlglot.dialects.dialect.concat_to_dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.safeconcat_sql": {"tf": 1}}, "df": 3}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.safedpipe_sql": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}}, "df": 4, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}}, "df": 2}}}}}}}}}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.generator.Generator.sub_sql": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}}, "df": 7}}, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.env.str_position": {"tf": 1}}, "df": 1}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}}, "df": 3}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}}, "df": 2}}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}}, "df": 1}}}, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}}, "df": 10, "s": {"docs": {"sqlglot.expressions.expand": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}}, "df": 6}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.sort_sql": {"tf": 1}}, "df": 1}}}, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.generator.Generator.indent": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.generator.Generator.similarto_sql": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.slice_sql": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}}, "df": 5, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}}, "df": 3}}}}}}, "c": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.helper.csv_reader": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}}, "df": 2}}}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dataframe.sql.Column.rlike": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Condition.rlike": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.return_sql": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.delete": {"tf": 1}, "sqlglot.generator.Generator.returning_sql": {"tf": 1}}, "df": 2}}}}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}}, "df": 3}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.env.null_if_any": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}}, "df": 2}}}}}}}, "f": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}}, "df": 4, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.reference_sql": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {"sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.renametable_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}}, "df": 2, "n": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}}, "df": 1}}}}, "w": {"docs": {"sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.rawstring_sql": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 3}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.executor.table.Table.append": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.executor.table.Table.__init__": {"tf": 1}}, "df": 1}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.generator.Generator.rollback_sql": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 1}}, "df": 6}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}}, "df": 4}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.7320508075688772}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Condition.like": {"tf": 1}, "sqlglot.expressions.Condition.ilike": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.window": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.qualify": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.hint": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1.4142135623730951}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 2}, "sqlglot.expressions.union": {"tf": 2}, "sqlglot.expressions.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.7320508075688772}, "sqlglot.expressions.delete": {"tf": 2}, "sqlglot.expressions.insert": {"tf": 2}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.column": {"tf": 1.7320508075688772}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1.7320508075688772}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}}, "df": 138, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}}, "df": 3}}}}}}, "t": {"docs": {"sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.executor.env.interval": {"tf": 1}}, "df": 2}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 5}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.uncache_sql": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe.sql.Column.between": {"tf": 1}}, "df": 1}}}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}}, "df": 6}}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Select.join": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.generator.Generator.use_sql": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "d": {"docs": {"sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}}, "df": 5, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.parse": {"tf": 2}, "sqlglot.parse_one": {"tf": 2}, "sqlglot.transpile": {"tf": 2.8284271247461903}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 2.6457513110645907}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 2.23606797749979}, "sqlglot.executor.execute": {"tf": 2}, "sqlglot.expressions.Expression.sql": {"tf": 2.23606797749979}, "sqlglot.expressions.Condition.and_": {"tf": 2.23606797749979}, "sqlglot.expressions.Condition.or_": {"tf": 2.23606797749979}, "sqlglot.expressions.Condition.as_": {"tf": 2.23606797749979}, "sqlglot.expressions.Unionable.union": {"tf": 2.23606797749979}, "sqlglot.expressions.Unionable.intersect": {"tf": 2.23606797749979}, "sqlglot.expressions.Unionable.except_": {"tf": 2.23606797749979}, "sqlglot.expressions.Delete.delete": {"tf": 2.23606797749979}, "sqlglot.expressions.Delete.where": {"tf": 2.23606797749979}, "sqlglot.expressions.Delete.returning": {"tf": 2.23606797749979}, "sqlglot.expressions.Insert.with_": {"tf": 2.23606797749979}, "sqlglot.expressions.Join.on": {"tf": 2.23606797749979}, "sqlglot.expressions.Join.using": {"tf": 2.23606797749979}, "sqlglot.expressions.Subqueryable.limit": {"tf": 2.23606797749979}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2.23606797749979}, "sqlglot.expressions.Union.limit": {"tf": 2.23606797749979}, "sqlglot.expressions.Union.select": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.from_": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.group_by": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.order_by": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.sort_by": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.cluster_by": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.limit": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.offset": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.select": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.lateral": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.join": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.where": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.having": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.window": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.qualify": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.ctas": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.hint": {"tf": 2.23606797749979}, "sqlglot.expressions.DataType.build": {"tf": 2.23606797749979}, "sqlglot.expressions.maybe_parse": {"tf": 2.23606797749979}, "sqlglot.expressions.union": {"tf": 2.23606797749979}, "sqlglot.expressions.intersect": {"tf": 2.23606797749979}, "sqlglot.expressions.except_": {"tf": 2.23606797749979}, "sqlglot.expressions.select": {"tf": 2.23606797749979}, "sqlglot.expressions.from_": {"tf": 2.23606797749979}, "sqlglot.expressions.update": {"tf": 2.23606797749979}, "sqlglot.expressions.delete": {"tf": 2.23606797749979}, "sqlglot.expressions.insert": {"tf": 2.23606797749979}, "sqlglot.expressions.condition": {"tf": 2.23606797749979}, "sqlglot.expressions.and_": {"tf": 2.23606797749979}, "sqlglot.expressions.or_": {"tf": 2.23606797749979}, "sqlglot.expressions.not_": {"tf": 2.23606797749979}, "sqlglot.expressions.to_table": {"tf": 2.23606797749979}, "sqlglot.expressions.alias_": {"tf": 2.23606797749979}, "sqlglot.expressions.subquery": {"tf": 2.23606797749979}, "sqlglot.expressions.func": {"tf": 2.23606797749979}, "sqlglot.helper.case_sensitive": {"tf": 2.23606797749979}, "sqlglot.helper.should_identify": {"tf": 2.23606797749979}, "sqlglot.lineage.lineage": {"tf": 2.23606797749979}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 2.23606797749979}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 2.23606797749979}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 2.23606797749979}, "sqlglot.schema.Schema.add_table": {"tf": 2.23606797749979}, "sqlglot.schema.Schema.column_names": {"tf": 2.23606797749979}, "sqlglot.schema.Schema.get_column_type": {"tf": 2.23606797749979}, "sqlglot.schema.MappingSchema.__init__": {"tf": 2.23606797749979}, "sqlglot.schema.MappingSchema.add_table": {"tf": 2.23606797749979}, "sqlglot.schema.MappingSchema.column_names": {"tf": 2.23606797749979}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 2.23606797749979}}, "df": 75, "s": {"docs": {"sqlglot.parse": {"tf": 1.4142135623730951}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 2}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.sql": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.as_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.window": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.qualify": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.hint": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.build": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.helper.case_sensitive": {"tf": 1.4142135623730951}, "sqlglot.helper.should_identify": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.4142135623730951}}, "df": 72}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.__init__": {"tf": 1.4142135623730951}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.createable_sql": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}, "sqlglot.generator.cached_generator": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1.4142135623730951}, "sqlglot.schema.flatten_schema": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1.4142135623730951}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1.4142135623730951}}, "df": 52, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.dictproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.dictrange_sql": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}}, "df": 9}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.distribute_sql": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 1}}, "df": 3}}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.diff.diff": {"tf": 2.23606797749979}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 2.23606797749979}}, "df": 2}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.directory_sql": {"tf": 1}}, "df": 1}}}}}}}, "v": {"docs": {"sqlglot.generator.Generator.div_sql": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}}, "df": 1, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.table": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 2.8284271247461903}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 2.449489742783178}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 2.449489742783178}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 2.449489742783178}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 2.449489742783178}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 2.449489742783178}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 2.23606797749979}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 2}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 2}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}}, "df": 89, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}}, "df": 3}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.cast": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.is_type": {"tf": 1.4142135623730951}, "sqlglot.expressions.Cast.is_type": {"tf": 1.4142135623730951}, "sqlglot.expressions.cast": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 16, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.datatypesize_sql": {"tf": 1}}, "df": 1}}}}}}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "e": {"docs": {"sqlglot.optimizer.simplify.date_literal": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}}, "df": 3}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}}, "df": 1}}}}}, "g": {"docs": {"sqlglot.helper.tsort": {"tf": 1}}, "df": 1}}, "f": {"docs": {"sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}}, "df": 4}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.executor.env.ordered": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}}, "df": 2}}, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.errors.ParseError.new": {"tf": 1}}, "df": 1}}}}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}}, "df": 5}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.planner.Step.add_dependency": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.schema.flatten_schema": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}}, "df": 3}, "w": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.lineage.Node.__init__": {"tf": 1}}, "df": 1}}}}}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.build": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}}, "df": 2}}}}}, "b": {"docs": {"sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 5}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.generator.Generator.drop_sql": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.droppartition_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.dpipe_sql": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalized": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 1}}, "df": 4}}}, "t": {"docs": {"sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1.4142135623730951}, "sqlglot.helper.tsort": {"tf": 1.7320508075688772}, "sqlglot.helper.first": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}}, "df": 5, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.7320508075688772}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.7320508075688772}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 2.23606797749979}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.parser.binary_range_parser": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 96, "s": {"docs": {"sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}}, "df": 13}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}}, "df": 2}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.window": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.qualify": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 86}}, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}}, "df": 2}, "e": {"docs": {"sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 4}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}}, "df": 3}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.transforms.preprocess": {"tf": 1}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.__init__": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.table_iter": {"tf": 1.4142135623730951}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1.4142135623730951}, "sqlglot.expressions.rename_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.4142135623730951}}, "df": 26, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}}, "df": 3}}}}, "s": {"docs": {"sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 9, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}}, "df": 3}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.context.Context.table_iter": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.generator.Generator.tablealias_sql": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}}, "df": 4}}}}, "g": {"docs": {"sqlglot.generator.Generator.tag_sql": {"tf": 1}}, "df": 1}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.helper.find_new_name": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 15}}}}, "h": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor.env.str_position": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1}, "sqlglot.executor.env.cast": {"tf": 1}, "sqlglot.executor.env.ordered": {"tf": 1}, "sqlglot.executor.env.interval": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}}, "df": 6}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Case.when": {"tf": 1}}, "df": 1}}}, "o": {"docs": {"sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.executor.env.cast": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}}, "df": 6, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 11, "s": {"docs": {"sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 11}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 2}}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.tochar_sql": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1}}, "df": 2}}}}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {"sqlglot.generator.Generator.national_sql": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 2.6457513110645907}, "sqlglot.executor.execute": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.context.Context.filter": {"tf": 1}, "sqlglot.executor.context.Context.sort": {"tf": 1}, "sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.executor.env.str_position": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Table.__init__": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.expressions.Expression.add_comments": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.7320508075688772}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1.7320508075688772}, "sqlglot.expressions.delete": {"tf": 1.7320508075688772}, "sqlglot.expressions.insert": {"tf": 1.7320508075688772}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.column": {"tf": 2}, "sqlglot.expressions.table_": {"tf": 2}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.unsupported": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.indent": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1.7320508075688772}, "sqlglot.generator.cached_generator": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.7320508075688772}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1.4142135623730951}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 2}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1.7320508075688772}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 153, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.window": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.qualify": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1.7320508075688772}, "sqlglot.expressions.delete": {"tf": 1.7320508075688772}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.column": {"tf": 1.7320508075688772}, "sqlglot.expressions.table_": {"tf": 1.7320508075688772}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}}, "df": 104}}}}}}, "t": {"docs": {"sqlglot.expressions.not_": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}}, "df": 2, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}}, "df": 3}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1}}, "df": 8}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}, "sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.normalize_func": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 28}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.generator.Generator.national_sql": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}}, "df": 4}, "q": {"docs": {"sqlglot.expressions.Condition.neq": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}}, "df": 2}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 2, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1}}, "df": 1}}}}}}}}}}, "g": {"docs": {"sqlglot.generator.Generator.neg_sql": {"tf": 1}}, "df": 1}}, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}}, "df": 2}}}}}}}}}}, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}}, "df": 2}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.executor.env.ordered": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "q": {"docs": {"sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "q": {"docs": {"sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}}, "df": 7, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}}, "df": 64}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 2.6457513110645907}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.expressions.Expression.add_comments": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.indent": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1.7320508075688772}, "sqlglot.generator.cached_generator": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.parser.binary_range_parser": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1.4142135623730951}, "sqlglot.schema.flatten_schema": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 81}}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 2, "r": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}}, "df": 4}}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}}, "df": 2}}}}}}}, "n": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.openjson_sql": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Condition.like": {"tf": 1}, "sqlglot.expressions.Condition.ilike": {"tf": 1}, "sqlglot.expressions.Condition.eq": {"tf": 1}, "sqlglot.expressions.Condition.neq": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1}}, "df": 17}}}}, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 5, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.oncluster_sql": {"tf": 1}, "sqlglot.generator.Generator.oncluster_sql": {"tf": 1}}, "df": 2}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.onconflict_sql": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {"sqlglot.expressions.Select.distinct": {"tf": 1}}, "df": 1}, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}}, "df": 3}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.generator.Generator.overlaps_sql": {"tf": 1}}, "df": 1}}}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}}, "df": 4}}}}}, "b": {"docs": {}, "df": 0, "j": {"docs": {"sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 4}}, "r": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}}, "df": 2, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.order_sql": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.ordered_sql": {"tf": 1}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}}, "df": 3}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1.4142135623730951}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.expressions.Expression.add_comments": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.generator.Generator.createable_sql": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}, "sqlglot.generator.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.helper.tsort": {"tf": 1.4142135623730951}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.parser.parse_like": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1.7320508075688772}, "sqlglot.serde.dump": {"tf": 1.7320508075688772}, "sqlglot.serde.load": {"tf": 1.7320508075688772}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 77}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_limit_modifiers": {"tf": 1.4142135623730951}}, "df": 3}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}}, "df": 5, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.lineage.Node.to_html": {"tf": 1}}, "df": 1}}}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Condition.like": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.parser.parse_like": {"tf": 1}}, "df": 3, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.likeany_sql": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}}, "df": 4}}}}}}, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}}, "df": 5}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe.sql.Column.substr": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1}}, "df": 2}}}}, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}}, "df": 6}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}}, "df": 1}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}}, "df": 3}}}}, "t": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.between": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 3.7416573867739413}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}}, "df": 25, "e": {"docs": {"sqlglot.generator.Generator.lte_sql": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}}, "df": 3}}, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.generator.Generator.lambda_sql": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}}, "df": 2}}}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.expressions.Condition.between": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe.sql.Column.between": {"tf": 1}}, "df": 1}}}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"tf": 1}, "sqlglot.generator.Generator.createable_sql": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"tf": 1}, "sqlglot.generator.Generator.createable_sql": {"tf": 1}}, "df": 3}}}}}}, "k": {"docs": {"sqlglot.generator.Generator.lock_sql": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.generator.Generator.loaddata_sql": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.pop": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.helper.while_changing": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2.23606797749979}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.validate_expression": {"tf": 1.4142135623730951}}, "df": 19, "x": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}}, "df": 3, "r": {"docs": {"sqlglot.parser.binary_range_parser": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.safeconcat_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.oncluster_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 2}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.concat_to_dpipe_sql": {"tf": 1}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.Remove.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.Move.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.Update.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.Keep.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 2}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 2}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.expressions.Condition.like": {"tf": 1}, "sqlglot.expressions.Condition.ilike": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1.4142135623730951}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 2.23606797749979}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.7320508075688772}, "sqlglot.expressions.insert": {"tf": 2}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.paren": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.cast": {"tf": 1.4142135623730951}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1.7320508075688772}, "sqlglot.expressions.expand": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.wrap": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.createable_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.clone_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1}, "sqlglot.generator.Generator.returning_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1}, "sqlglot.generator.Generator.set_sql": {"tf": 1}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.offset_limit_modifiers": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_columns_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.safeconcat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.comment_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.safedpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.expressions": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.op_expressions": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.naked_property": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.oncluster_sql": {"tf": 1}, "sqlglot.generator.cached_generator": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1.7320508075688772}, "sqlglot.lineage.Node.__init__": {"tf": 1.7320508075688772}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalized": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.build_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1}, "sqlglot.optimizer.simplify.flatten": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_parens": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.optimizer.simplify.always_true": {"tf": 1}, "sqlglot.optimizer.simplify.is_false": {"tf": 1}, "sqlglot.optimizer.simplify.is_null": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.binary_range_parser": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 2}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.7320508075688772}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.7320508075688772}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.7320508075688772}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.7320508075688772}, "sqlglot.transforms.unnest_to_explode": {"tf": 1.7320508075688772}, "sqlglot.transforms.explode_to_unnest": {"tf": 1.7320508075688772}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1.7320508075688772}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1.7320508075688772}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1.7320508075688772}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 1.7320508075688772}, "sqlglot.transforms.preprocess": {"tf": 1.7320508075688772}}, "df": 481, "s": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.safeconcat_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.oncluster_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1}, "sqlglot.dialects.dialect.concat_to_dpipe_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.Keep.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 2}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 2}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.as_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.isin": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Condition.is_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.like": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.ilike": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.eq": {"tf": 1}, "sqlglot.expressions.Condition.neq": {"tf": 1}, "sqlglot.expressions.Condition.rlike": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.using": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.order_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.lateral": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.having": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.window": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.qualify": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.distinct": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.build": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.is_type": {"tf": 1.4142135623730951}, "sqlglot.expressions.Dot.build": {"tf": 1.7320508075688772}, "sqlglot.expressions.Case.when": {"tf": 1.7320508075688772}, "sqlglot.expressions.Case.else_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Cast.is_type": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 2}, "sqlglot.expressions.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 2}, "sqlglot.expressions.delete": {"tf": 2}, "sqlglot.expressions.insert": {"tf": 2}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.paren": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_interval": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_column": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.column": {"tf": 2.23606797749979}, "sqlglot.expressions.cast": {"tf": 2}, "sqlglot.expressions.table_": {"tf": 2.23606797749979}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.var": {"tf": 1.4142135623730951}, "sqlglot.expressions.rename_table": {"tf": 1.7320508075688772}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1.7320508075688772}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.expressions.false": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.generator.Generator.wrap": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.createable_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.clone_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1}, "sqlglot.generator.Generator.returning_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1}, "sqlglot.generator.Generator.set_sql": {"tf": 1}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.offset_limit_modifiers": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_columns_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.safeconcat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.comment_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.safedpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}, "sqlglot.generator.Generator.naked_property": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.oncluster_sql": {"tf": 1}, "sqlglot.generator.cached_generator": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.helper.while_changing": {"tf": 1.4142135623730951}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.is_false": {"tf": 1}, "sqlglot.optimizer.simplify.is_null": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1.4142135623730951}, "sqlglot.parser.parse_like": {"tf": 1.4142135623730951}, "sqlglot.parser.binary_range_parser": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1.7320508075688772}, "sqlglot.planner.Plan.__init__": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.7320508075688772}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.7320508075688772}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.4142135623730951}, "sqlglot.transforms.unnest_to_explode": {"tf": 1.4142135623730951}, "sqlglot.transforms.explode_to_unnest": {"tf": 1.4142135623730951}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1.4142135623730951}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1.4142135623730951}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1.4142135623730951}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 1.4142135623730951}, "sqlglot.transforms.preprocess": {"tf": 1.7320508075688772}}, "df": 482}}}}}}}, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1}}, "df": 1}}}, "s": {"docs": {"sqlglot.generator.Generator.exists_sql": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}}, "df": 5}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1.4142135623730951}, "sqlglot.executor.table.ensure_tables": {"tf": 1}}, "df": 4}}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.extract_sql": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1.4142135623730951}}, "df": 2, "s": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1.4142135623730951}}, "df": 7}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 3}}}}}}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 7}, "v": {"docs": {"sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}}, "df": 2}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor.env.filter_nulls": {"tf": 1}}, "df": 1}}}}, "q": {"docs": {"sqlglot.expressions.Condition.eq": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}}, "df": 2}, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.parser.parse_like": {"tf": 1}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}}}, "i": {"docs": {"sqlglot.tokens.Tokenizer.peek": {"tf": 1}}, "df": 1, "n": {"docs": {"sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}}, "df": 3, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1.4142135623730951}, "sqlglot.errors.ParseError.new": {"tf": 1.4142135623730951}, "sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 2}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.text_width": {"tf": 1}, "sqlglot.generator.cached_generator": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.to_s": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.__init__": {"tf": 2}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Tokenizer.peek": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 41, "o": {"docs": {"sqlglot.parse_one": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}}, "df": 5}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}}, "df": 5}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.duckdb.DuckDB.Generator.interval_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1}}, "df": 5}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.introducer_sql": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.generator.Generator.intdiv_sql": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}}, "df": 5}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}}, "df": 3}, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}}, "df": 2}}}}, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}}, "df": 3}}}}, "d": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.between": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1.4142135623730951}}, "df": 19, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.transpile": {"tf": 1}}, "df": 1}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.column": {"tf": 2}, "sqlglot.expressions.table_": {"tf": 2}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}}, "df": 9, "s": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 2}}}}, "y": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}}, "df": 4}}}}}}}, "t": {"docs": {"sqlglot.helper.first": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.text_width": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 7}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}}, "df": 5}}}}}}}, "f": {"docs": {"sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}}, "df": 3}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.expressions.Condition.ilike": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}}, "df": 3, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.ilikeany_sql": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {"sqlglot.expressions.Condition.is_": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}}, "df": 2, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 1, "d": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}}, "df": 3, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}}, "df": 1}}}}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.lineage.LineageHTML.__init__": {"tf": 1}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 4, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.parse_one": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 6}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.collate_sql": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}}, "df": 18}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.where": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.when": {"tf": 2}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 2}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 2}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.substr": {"tf": 2.449489742783178}, "sqlglot.dataframe.sql.Column.between": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.over": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1.4142135623730951}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.4142135623730951}}, "df": 48, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}}, "df": 2}}}, "s": {"docs": {"sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 8}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.columnposition_sql": {"tf": 1}}, "df": 1}}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.errors.ParseError.new": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.table_iter": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 10}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.executor.context.Context.filter": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1}}, "df": 11}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.concat_to_dpipe_sql": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.constraint_sql": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.connector_sql": {"tf": 1}}, "df": 1}}}}}}}, "p": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}}, "df": 52}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.pad_comment": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.comment_sql": {"tf": 1}}, "df": 3, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "s": {"docs": {"sqlglot.expressions.Expression.add_comments": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 5}}}}, "a": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.command_sql": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.commit_sql": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}}, "df": 5, "i": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.context.Context.eval": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.executor.context.Context.eval_tuple": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}}, "df": 2}}}}}}, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 30}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}}, "df": 5, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.subclasses": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.clone_sql": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.cluster_sql": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.generator.cached_generator": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.parser.binary_range_parser": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}}, "df": 19}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1}}, "df": 5}, "e": {"docs": {"sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}}, "df": 3}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 5}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.cached_generator": {"tf": 1}}, "df": 3}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 5}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.generator.Generator.createable_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}}, "df": 6}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}}, "df": 2}}}}}}}}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.characterset_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.scope.Scope.branch": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.generator.Generator.check_sql": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.helper.csv_reader": {"tf": 1}}, "df": 1}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transpile": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.properties": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}}, "df": 12, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1}}, "df": 9}}}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}}, "df": 2, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.generator.Generator.withingroup_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}}, "df": 3}}, "n": {"docs": {"sqlglot.generator.Generator.when_sql": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1}}}}}, "b": {"docs": {"sqlglot.optimizer.simplify.is_complement": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}}, "df": 2, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Condition.as_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.window": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.qualify": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Case.when": {"tf": 1}, "sqlglot.expressions.Case.else_": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 2.449489742783178}, "sqlglot.generator.Generator.indent": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_limit_modifiers": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.4142135623730951}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.6457513110645907}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1}, "sqlglot.optimizer.simplify.is_false": {"tf": 1}, "sqlglot.optimizer.simplify.is_null": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}}, "df": 101, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.true": {"tf": 1}, "sqlglot.expressions.false": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}}, "df": 3}}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.bracket_sql": {"tf": 1}}, "df": 1}}}}}}, "y": {"docs": {"sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}}, "df": 2, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.bytestring_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "f": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 7}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Condition.between": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}}, "df": 2}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.bitstring_sql": {"tf": 1}}, "df": 1}}}}}}, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}}, "df": 1}}}}}}}}}}, "x": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.binary": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.find_new_name": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.between": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1.4142135623730951}}, "df": 19}}}}}}}, "p": {"docs": {"sqlglot.dialects.dialect.var_map_sql": {"tf": 1.7320508075688772}}, "df": 1, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}}, "df": 8, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}}, "df": 2}}}}}}}}}}}, "x": {"docs": {"sqlglot.dialects.dialect.max_or_greatest": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 5, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.errors.concat_messages": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}}, "df": 2}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.matchagainst_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.generator.Generator.unsupported": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 5}}}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 2, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}}, "df": 2, "e": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}}, "df": 2}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.subclasses": {"tf": 1}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}}, "df": 2}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.min_or_least": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 3}}}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.generator.Generator.mul_sql": {"tf": 1}}, "df": 1}}}, "g": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.between": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 3.7416573867739413}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}}, "df": 26, "e": {"docs": {"sqlglot.generator.Generator.gte_sql": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1}}, "df": 1}}}}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.Dialect.generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.rename_func": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.if_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.concat_to_dpipe_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1.4142135623730951}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}}, "df": 34}}, "e": {"docs": {"sqlglot.optimizer.normalize.distributive_law": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1}}, "df": 1}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.generator.Generator.glob_sql": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {"sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.4142135623730951}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1.4142135623730951}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}}, "df": 7}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}}, "df": 3}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.indent": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalized": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}}, "df": 23}}, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.lineage.Node.__init__": {"tf": 1}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}}, "df": 2, "c": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}}, "df": 7, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 3.7416573867739413}}, "df": 1, "s": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}}, "df": 2}}}}}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 2}}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_limit_modifiers": {"tf": 1.4142135623730951}}, "df": 3}}}}, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.env.ordered": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.filter_sql": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1}, "l": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.update": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}}, "df": 18, "s": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}}, "df": 4}}}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {"sqlglot.expressions.var": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 3, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}}, "df": 4}}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.parameter_sql": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.Dialect.parser": {"tf": 1.4142135623730951}, "sqlglot.parser.binary_range_parser": {"tf": 1.4142135623730951}}, "df": 2}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1}}, "df": 2}}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.partition_sql": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "s": {"docs": {"sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.paren": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}}, "df": 2, "t": {"docs": {"sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 4}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 3}}, "d": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}}, "df": 2}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.generator.Generator.createable_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.properties": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.with_properties": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.locate_properties": {"tf": 1.7320508075688772}}, "df": 15}}}, "y": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"tf": 1}, "sqlglot.generator.Generator.createable_sql": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.naked_property": {"tf": 1}}, "df": 6}}}}}, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}}}}}}}}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}}, "df": 8}}}, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 3}}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}}, "df": 3}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.generator.Generator.pragma_sql": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}}, "df": 2}}}}}}}}, "n": {"docs": {"sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Join.from_joins": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}}, "df": 5}}}}}}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.env.str_position": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.hint_sql": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}}, "df": 2}}}, "g": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Condition.between": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.errors.ParseError.new": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.hexstring_sql": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.having_sql": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 2}}}}}}}}, "k": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.kwarg_sql": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1}}, "df": 34}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}}, "df": 3}}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}}, "df": 2}}, "y": {"docs": {"sqlglot.executor.context.Context.sort": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 8, "s": {"docs": {"sqlglot.schema.flatten_schema": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}}, "df": 2}, "w": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.trie.new_trie": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {"sqlglot.optimizer.simplify.is_complement": {"tf": 1}, "sqlglot.optimizer.simplify.is_false": {"tf": 1}, "sqlglot.optimizer.simplify.is_null": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}}, "df": 6, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}}}}}, "n": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Condition.between": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.eq": {"tf": 1}, "sqlglot.expressions.Condition.neq": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1.7320508075688772}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}}, "df": 31}, "d": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}}, "df": 2}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}}, "df": 2}}}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}}, "df": 2}}}, "e": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 3}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.generator.Generator.all_sql": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Condition.as_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 15, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.generator.Generator.aliases_sql": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}}, "df": 2}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}}, "df": 1}}}}}}}}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.generator.Generator.func": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.text_width": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.parser.parse_like": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 20}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.generator.Generator.lambda_sql": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.qualify": {"tf": 1}}, "df": 17}}}}}, "g": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.pivot_column_names": {"tf": 1}}, "df": 1}}}}}}}}}}}, "t": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 3.7416573867739413}}, "df": 1, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.attimezone_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.add_sql": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "j": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.jsonobject_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}}, "df": 9, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.joinhint_sql": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.planner.Join.from_joins": {"tf": 1}}, "df": 2}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "x": {"2": {"7": {"docs": {"sqlglot.helper.open_file": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1}}, "df": 1}}}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 2, "d": {"docs": {"sqlglot.expressions.Condition.as_": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}}, "df": 5}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Condition.isin": {"tf": 1}, "sqlglot.expressions.Tuple.isin": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}}}, "bases": {"root": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill": {"tf": 1}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive": {"tf": 1}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto": {"tf": 1}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Tokenizer": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.executor.python.Python": {"tf": 1}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.tokens.TokenType": {"tf": 1}}, "df": 85, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}}, "df": 6}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"2": {"docs": {"sqlglot.dialects.databricks.Databricks.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}}, "df": 4}, "docs": {"sqlglot.dialects.databricks.Databricks": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}}, "df": 3}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.Dialects": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 2}, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.planner.Scan": {"tf": 1}, "sqlglot.planner.Join": {"tf": 1}, "sqlglot.planner.Aggregate": {"tf": 1}, "sqlglot.planner.Sort": {"tf": 1}, "sqlglot.planner.SetOperation": {"tf": 1}}, "df": 5}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Union": {"tf": 1}, "sqlglot.expressions.Select": {"tf": 1}}, "df": 2}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.All": {"tf": 1}, "sqlglot.expressions.Any": {"tf": 1}, "sqlglot.expressions.Exists": {"tf": 1}}, "df": 3}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python": {"tf": 1.4142135623730951}}, "df": 15, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse": {"tf": 1}, "sqlglot.dialects.databricks.Databricks": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB": {"tf": 1}, "sqlglot.dialects.hive.Hive": {"tf": 1}, "sqlglot.dialects.mysql.MySQL": {"tf": 1}, "sqlglot.dialects.oracle.Oracle": {"tf": 1}, "sqlglot.dialects.postgres.Postgres": {"tf": 1}, "sqlglot.dialects.presto.Presto": {"tf": 1}, "sqlglot.dialects.redshift.Redshift": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake": {"tf": 1}, "sqlglot.dialects.spark.Spark": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau": {"tf": 1}, "sqlglot.dialects.teradata.Teradata": {"tf": 1}, "sqlglot.dialects.trino.Trino": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL": {"tf": 1}, "sqlglot.executor.python.Python": {"tf": 1}}, "df": 36}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.UDTF": {"tf": 1}, "sqlglot.expressions.CTE": {"tf": 1}, "sqlglot.expressions.Subquery": {"tf": 1}}, "df": 3}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.SafeDPipe": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1}}, "df": 14}, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Tokenizer": {"tf": 1}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1}}, "df": 18}}}}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Interval": {"tf": 1}, "sqlglot.expressions.DateAdd": {"tf": 1}, "sqlglot.expressions.DateSub": {"tf": 1}, "sqlglot.expressions.DateDiff": {"tf": 1}, "sqlglot.expressions.DatetimeAdd": {"tf": 1}, "sqlglot.expressions.DatetimeSub": {"tf": 1}, "sqlglot.expressions.DatetimeDiff": {"tf": 1}, "sqlglot.expressions.DatetimeTrunc": {"tf": 1}, "sqlglot.expressions.TimestampAdd": {"tf": 1}, "sqlglot.expressions.TimestampSub": {"tf": 1}, "sqlglot.expressions.TimestampDiff": {"tf": 1}, "sqlglot.expressions.TimestampTrunc": {"tf": 1}, "sqlglot.expressions.TimeAdd": {"tf": 1}, "sqlglot.expressions.TimeSub": {"tf": 1}, "sqlglot.expressions.TimeDiff": {"tf": 1}, "sqlglot.expressions.TimeTrunc": {"tf": 1}, "sqlglot.expressions.TsOrDsAdd": {"tf": 1}}, "df": 17}}}}}}}, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}}, "df": 19}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.redshift.Redshift": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}}, "df": 4}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dialects.trino.Trino": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1.4142135623730951}}, "df": 3}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.SubqueryPredicate": {"tf": 1}, "sqlglot.expressions.EQ": {"tf": 1}, "sqlglot.expressions.NullSafeEQ": {"tf": 1}, "sqlglot.expressions.NullSafeNEQ": {"tf": 1}, "sqlglot.expressions.Glob": {"tf": 1}, "sqlglot.expressions.GT": {"tf": 1}, "sqlglot.expressions.GTE": {"tf": 1}, "sqlglot.expressions.ILike": {"tf": 1}, "sqlglot.expressions.ILikeAny": {"tf": 1}, "sqlglot.expressions.Is": {"tf": 1}, "sqlglot.expressions.Like": {"tf": 1}, "sqlglot.expressions.LikeAny": {"tf": 1}, "sqlglot.expressions.LT": {"tf": 1}, "sqlglot.expressions.LTE": {"tf": 1}, "sqlglot.expressions.NEQ": {"tf": 1}, "sqlglot.expressions.SimilarTo": {"tf": 1}, "sqlglot.expressions.Between": {"tf": 1}, "sqlglot.expressions.In": {"tf": 1}}, "df": 18}}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.AlgorithmProperty": {"tf": 1}, "sqlglot.expressions.AutoIncrementProperty": {"tf": 1}, "sqlglot.expressions.BlockCompressionProperty": {"tf": 1}, "sqlglot.expressions.CharacterSetProperty": {"tf": 1}, "sqlglot.expressions.ChecksumProperty": {"tf": 1}, "sqlglot.expressions.CollateProperty": {"tf": 1}, "sqlglot.expressions.DataBlocksizeProperty": {"tf": 1}, "sqlglot.expressions.DefinerProperty": {"tf": 1}, "sqlglot.expressions.DistKeyProperty": {"tf": 1}, "sqlglot.expressions.DistStyleProperty": {"tf": 1}, "sqlglot.expressions.EngineProperty": {"tf": 1}, "sqlglot.expressions.ToTableProperty": {"tf": 1}, "sqlglot.expressions.ExecuteAsProperty": {"tf": 1}, "sqlglot.expressions.ExternalProperty": {"tf": 1}, "sqlglot.expressions.FallbackProperty": {"tf": 1}, "sqlglot.expressions.FileFormatProperty": {"tf": 1}, "sqlglot.expressions.FreespaceProperty": {"tf": 1}, "sqlglot.expressions.IsolatedLoadingProperty": {"tf": 1}, "sqlglot.expressions.JournalProperty": {"tf": 1}, "sqlglot.expressions.LanguageProperty": {"tf": 1}, "sqlglot.expressions.DictProperty": {"tf": 1}, "sqlglot.expressions.DictSubProperty": {"tf": 1}, "sqlglot.expressions.DictRange": {"tf": 1}, "sqlglot.expressions.OnCluster": {"tf": 1}, "sqlglot.expressions.LikeProperty": {"tf": 1}, "sqlglot.expressions.LocationProperty": {"tf": 1}, "sqlglot.expressions.LockingProperty": {"tf": 1}, "sqlglot.expressions.LogProperty": {"tf": 1}, "sqlglot.expressions.MaterializedProperty": {"tf": 1}, "sqlglot.expressions.MergeBlockRatioProperty": {"tf": 1}, "sqlglot.expressions.NoPrimaryIndexProperty": {"tf": 1}, "sqlglot.expressions.OnCommitProperty": {"tf": 1}, "sqlglot.expressions.PartitionedByProperty": {"tf": 1}, "sqlglot.expressions.ReturnsProperty": {"tf": 1}, "sqlglot.expressions.RowFormatProperty": {"tf": 1}, "sqlglot.expressions.RowFormatDelimitedProperty": {"tf": 1}, "sqlglot.expressions.RowFormatSerdeProperty": {"tf": 1}, "sqlglot.expressions.SchemaCommentProperty": {"tf": 1}, "sqlglot.expressions.SerdeProperties": {"tf": 1}, "sqlglot.expressions.SetProperty": {"tf": 1}, "sqlglot.expressions.SettingsProperty": {"tf": 1}, "sqlglot.expressions.SortKeyProperty": {"tf": 1}, "sqlglot.expressions.SqlSecurityProperty": {"tf": 1}, "sqlglot.expressions.StabilityProperty": {"tf": 1}, "sqlglot.expressions.TemporaryProperty": {"tf": 1}, "sqlglot.expressions.TransientProperty": {"tf": 1}, "sqlglot.expressions.VolatileProperty": {"tf": 1}, "sqlglot.expressions.WithDataProperty": {"tf": 1}, "sqlglot.expressions.WithJournalTableProperty": {"tf": 1}}, "df": 49}}}}}}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}}, "df": 21}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "~": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 1}}}}}}}}}}, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.Dialects": {"tf": 1}, "sqlglot.errors.SqlglotError": {"tf": 1}}, "df": 2}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Add": {"tf": 1}, "sqlglot.expressions.Connector": {"tf": 1}, "sqlglot.expressions.BitwiseAnd": {"tf": 1}, "sqlglot.expressions.BitwiseLeftShift": {"tf": 1}, "sqlglot.expressions.BitwiseOr": {"tf": 1}, "sqlglot.expressions.BitwiseRightShift": {"tf": 1}, "sqlglot.expressions.BitwiseXor": {"tf": 1}, "sqlglot.expressions.Div": {"tf": 1}, "sqlglot.expressions.Overlaps": {"tf": 1}, "sqlglot.expressions.Dot": {"tf": 1}, "sqlglot.expressions.DPipe": {"tf": 1}, "sqlglot.expressions.EQ": {"tf": 1}, "sqlglot.expressions.NullSafeEQ": {"tf": 1}, "sqlglot.expressions.NullSafeNEQ": {"tf": 1}, "sqlglot.expressions.Distance": {"tf": 1}, "sqlglot.expressions.Escape": {"tf": 1}, "sqlglot.expressions.Glob": {"tf": 1}, "sqlglot.expressions.GT": {"tf": 1}, "sqlglot.expressions.GTE": {"tf": 1}, "sqlglot.expressions.ILike": {"tf": 1}, "sqlglot.expressions.ILikeAny": {"tf": 1}, "sqlglot.expressions.IntDiv": {"tf": 1}, "sqlglot.expressions.Is": {"tf": 1}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.Like": {"tf": 1}, "sqlglot.expressions.LikeAny": {"tf": 1}, "sqlglot.expressions.LT": {"tf": 1}, "sqlglot.expressions.LTE": {"tf": 1}, "sqlglot.expressions.Mod": {"tf": 1}, "sqlglot.expressions.Mul": {"tf": 1}, "sqlglot.expressions.NEQ": {"tf": 1}, "sqlglot.expressions.SimilarTo": {"tf": 1}, "sqlglot.expressions.Slice": {"tf": 1}, "sqlglot.expressions.Sub": {"tf": 1}, "sqlglot.expressions.ArrayOverlaps": {"tf": 1}, "sqlglot.expressions.ArrayContains": {"tf": 1}, "sqlglot.expressions.ArrayContained": {"tf": 1}, "sqlglot.expressions.Collate": {"tf": 1}, "sqlglot.expressions.JSONBContains": {"tf": 1}, "sqlglot.expressions.JSONExtract": {"tf": 1}, "sqlglot.expressions.Pow": {"tf": 1}}, "df": 41}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.dialects.dialect.Dialects": {"tf": 1.4142135623730951}, "sqlglot.helper.AutoName": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.ScopeType": {"tf": 1.4142135623730951}}, "df": 3}}}, "x": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.errors.SqlglotError": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Condition": {"tf": 1}, "sqlglot.expressions.DerivedTable": {"tf": 1}, "sqlglot.expressions.Unionable": {"tf": 1}, "sqlglot.expressions.Cache": {"tf": 1}, "sqlglot.expressions.Uncache": {"tf": 1}, "sqlglot.expressions.Create": {"tf": 1}, "sqlglot.expressions.Clone": {"tf": 1}, "sqlglot.expressions.Describe": {"tf": 1}, "sqlglot.expressions.Pragma": {"tf": 1}, "sqlglot.expressions.Set": {"tf": 1}, "sqlglot.expressions.SetItem": {"tf": 1}, "sqlglot.expressions.Show": {"tf": 1}, "sqlglot.expressions.UserDefinedFunction": {"tf": 1}, "sqlglot.expressions.CharacterSet": {"tf": 1}, "sqlglot.expressions.With": {"tf": 1}, "sqlglot.expressions.WithinGroup": {"tf": 1}, "sqlglot.expressions.TableAlias": {"tf": 1}, "sqlglot.expressions.ColumnPosition": {"tf": 1}, "sqlglot.expressions.ColumnDef": {"tf": 1}, "sqlglot.expressions.AlterColumn": {"tf": 1}, "sqlglot.expressions.RenameTable": {"tf": 1}, "sqlglot.expressions.SetTag": {"tf": 1}, "sqlglot.expressions.Comment": {"tf": 1}, "sqlglot.expressions.MergeTreeTTLAction": {"tf": 1}, "sqlglot.expressions.MergeTreeTTL": {"tf": 1}, "sqlglot.expressions.ColumnConstraint": {"tf": 1}, "sqlglot.expressions.ColumnConstraintKind": {"tf": 1}, "sqlglot.expressions.Constraint": {"tf": 1}, "sqlglot.expressions.Delete": {"tf": 1}, "sqlglot.expressions.Drop": {"tf": 1}, "sqlglot.expressions.Filter": {"tf": 1}, "sqlglot.expressions.Check": {"tf": 1}, "sqlglot.expressions.Directory": {"tf": 1}, "sqlglot.expressions.ForeignKey": {"tf": 1}, "sqlglot.expressions.PrimaryKey": {"tf": 1}, "sqlglot.expressions.Into": {"tf": 1}, "sqlglot.expressions.From": {"tf": 1}, "sqlglot.expressions.Having": {"tf": 1}, "sqlglot.expressions.Hint": {"tf": 1}, "sqlglot.expressions.JoinHint": {"tf": 1}, "sqlglot.expressions.Identifier": {"tf": 1}, "sqlglot.expressions.Index": {"tf": 1}, "sqlglot.expressions.Insert": {"tf": 1}, "sqlglot.expressions.OnConflict": {"tf": 1}, "sqlglot.expressions.Returning": {"tf": 1}, "sqlglot.expressions.Introducer": {"tf": 1}, "sqlglot.expressions.National": {"tf": 1}, "sqlglot.expressions.LoadData": {"tf": 1}, "sqlglot.expressions.Partition": {"tf": 1}, "sqlglot.expressions.Fetch": {"tf": 1}, "sqlglot.expressions.Group": {"tf": 1}, "sqlglot.expressions.Lambda": {"tf": 1}, "sqlglot.expressions.Limit": {"tf": 1}, "sqlglot.expressions.Join": {"tf": 1}, "sqlglot.expressions.MatchRecognize": {"tf": 1}, "sqlglot.expressions.Final": {"tf": 1}, "sqlglot.expressions.Offset": {"tf": 1}, "sqlglot.expressions.Order": {"tf": 1}, "sqlglot.expressions.Ordered": {"tf": 1}, "sqlglot.expressions.Property": {"tf": 1}, "sqlglot.expressions.InputOutputFormat": {"tf": 1}, "sqlglot.expressions.Properties": {"tf": 1}, "sqlglot.expressions.Qualify": {"tf": 1}, "sqlglot.expressions.Return": {"tf": 1}, "sqlglot.expressions.Reference": {"tf": 1}, "sqlglot.expressions.Tuple": {"tf": 1}, "sqlglot.expressions.Table": {"tf": 1}, "sqlglot.expressions.SystemTime": {"tf": 1}, "sqlglot.expressions.Update": {"tf": 1}, "sqlglot.expressions.Var": {"tf": 1}, "sqlglot.expressions.Schema": {"tf": 1}, "sqlglot.expressions.Lock": {"tf": 1}, "sqlglot.expressions.TableSample": {"tf": 1}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Pivot": {"tf": 1}, "sqlglot.expressions.Window": {"tf": 1}, "sqlglot.expressions.WindowSpec": {"tf": 1}, "sqlglot.expressions.Where": {"tf": 1}, "sqlglot.expressions.Star": {"tf": 1}, "sqlglot.expressions.Parameter": {"tf": 1}, "sqlglot.expressions.SessionParameter": {"tf": 1}, "sqlglot.expressions.Placeholder": {"tf": 1}, "sqlglot.expressions.DataTypeSize": {"tf": 1}, "sqlglot.expressions.DataType": {"tf": 1}, "sqlglot.expressions.PseudoType": {"tf": 1}, "sqlglot.expressions.Command": {"tf": 1}, "sqlglot.expressions.Transaction": {"tf": 1}, "sqlglot.expressions.Commit": {"tf": 1}, "sqlglot.expressions.Rollback": {"tf": 1}, "sqlglot.expressions.AlterTable": {"tf": 1}, "sqlglot.expressions.AddConstraint": {"tf": 1}, "sqlglot.expressions.DropPartition": {"tf": 1}, "sqlglot.expressions.Alias": {"tf": 1}, "sqlglot.expressions.Aliases": {"tf": 1}, "sqlglot.expressions.AtTimeZone": {"tf": 1}, "sqlglot.expressions.Distinct": {"tf": 1}, "sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.IgnoreNulls": {"tf": 1}, "sqlglot.expressions.RespectNulls": {"tf": 1}, "sqlglot.expressions.JSONKeyValue": {"tf": 1}, "sqlglot.expressions.OpenJSONColumnDef": {"tf": 1}, "sqlglot.expressions.Use": {"tf": 1}, "sqlglot.expressions.Merge": {"tf": 1}}, "df": 103}}}}}}}}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.spark2.Spark2": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"tf": 1.4142135623730951}}, "df": 4}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.tokens.TokenType": {"tf": 1}}, "df": 4}}}}}}, "m": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.starrocks.StarRocks": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}}, "df": 3}}}}}, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.tokens.TokenType": {"tf": 1}}, "df": 4}}}}}}}, "b": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}}}, "c": {"docs": {"sqlglot.schema.Schema": {"tf": 1.4142135623730951}}, "df": 1}}, "g": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.ParameterizedAgg": {"tf": 1}, "sqlglot.expressions.Hll": {"tf": 1}, "sqlglot.expressions.ApproxDistinct": {"tf": 1}, "sqlglot.expressions.ArrayAgg": {"tf": 1}, "sqlglot.expressions.ArrayUnionAgg": {"tf": 1}, "sqlglot.expressions.Avg": {"tf": 1}, "sqlglot.expressions.AnyValue": {"tf": 1}, "sqlglot.expressions.Count": {"tf": 1}, "sqlglot.expressions.CountIf": {"tf": 1}, "sqlglot.expressions.LogicalOr": {"tf": 1}, "sqlglot.expressions.LogicalAnd": {"tf": 1}, "sqlglot.expressions.Max": {"tf": 1}, "sqlglot.expressions.Min": {"tf": 1}, "sqlglot.expressions.PercentileCont": {"tf": 1}, "sqlglot.expressions.PercentileDisc": {"tf": 1}, "sqlglot.expressions.Quantile": {"tf": 1}, "sqlglot.expressions.SetAgg": {"tf": 1}, "sqlglot.expressions.Sum": {"tf": 1}, "sqlglot.expressions.Stddev": {"tf": 1}, "sqlglot.expressions.StddevPop": {"tf": 1}, "sqlglot.expressions.StddevSamp": {"tf": 1}, "sqlglot.expressions.Variance": {"tf": 1}, "sqlglot.expressions.VariancePop": {"tf": 1}}, "df": 23}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Predicate": {"tf": 1}, "sqlglot.expressions.BitString": {"tf": 1}, "sqlglot.expressions.HexString": {"tf": 1}, "sqlglot.expressions.ByteString": {"tf": 1}, "sqlglot.expressions.RawString": {"tf": 1}, "sqlglot.expressions.Column": {"tf": 1}, "sqlglot.expressions.Literal": {"tf": 1}, "sqlglot.expressions.Null": {"tf": 1}, "sqlglot.expressions.Boolean": {"tf": 1}, "sqlglot.expressions.Binary": {"tf": 1}, "sqlglot.expressions.Unary": {"tf": 1}, "sqlglot.expressions.Bracket": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 13}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.And": {"tf": 1}, "sqlglot.expressions.Or": {"tf": 1}}, "df": 2}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.SafeConcat": {"tf": 1}, "sqlglot.expressions.ConcatWs": {"tf": 1}}, "df": 2}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.AutoIncrementColumnConstraint": {"tf": 1}, "sqlglot.expressions.CaseSpecificColumnConstraint": {"tf": 1}, "sqlglot.expressions.CharacterSetColumnConstraint": {"tf": 1}, "sqlglot.expressions.CheckColumnConstraint": {"tf": 1}, "sqlglot.expressions.CollateColumnConstraint": {"tf": 1}, "sqlglot.expressions.CommentColumnConstraint": {"tf": 1}, "sqlglot.expressions.CompressColumnConstraint": {"tf": 1}, "sqlglot.expressions.DateFormatColumnConstraint": {"tf": 1}, "sqlglot.expressions.DefaultColumnConstraint": {"tf": 1}, "sqlglot.expressions.EncodeColumnConstraint": {"tf": 1}, "sqlglot.expressions.GeneratedAsIdentityColumnConstraint": {"tf": 1}, "sqlglot.expressions.InlineLengthColumnConstraint": {"tf": 1}, "sqlglot.expressions.NotNullColumnConstraint": {"tf": 1}, "sqlglot.expressions.OnUpdateColumnConstraint": {"tf": 1}, "sqlglot.expressions.PrimaryKeyColumnConstraint": {"tf": 1}, "sqlglot.expressions.TitleColumnConstraint": {"tf": 1}, "sqlglot.expressions.UniqueColumnConstraint": {"tf": 1}, "sqlglot.expressions.UppercaseColumnConstraint": {"tf": 1}, "sqlglot.expressions.PathColumnConstraint": {"tf": 1}}, "df": 19}}}}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.TryCast": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Except": {"tf": 1}, "sqlglot.expressions.Intersect": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.UDTF": {"tf": 1}, "sqlglot.expressions.Subqueryable": {"tf": 1}, "sqlglot.expressions.Subquery": {"tf": 1}}, "df": 3}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.BitwiseNot": {"tf": 1}, "sqlglot.expressions.Not": {"tf": 1}, "sqlglot.expressions.Paren": {"tf": 1}, "sqlglot.expressions.Neg": {"tf": 1}}, "df": 4}}}}, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.Lateral": {"tf": 1}, "sqlglot.expressions.Unnest": {"tf": 1}, "sqlglot.expressions.Values": {"tf": 1}}, "df": 3}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Cluster": {"tf": 1}, "sqlglot.expressions.Distribute": {"tf": 1}, "sqlglot.expressions.Sort": {"tf": 1}}, "df": 3}}}}}, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.AggFunc": {"tf": 1}, "sqlglot.expressions.Abs": {"tf": 1}, "sqlglot.expressions.Anonymous": {"tf": 1}, "sqlglot.expressions.Array": {"tf": 1}, "sqlglot.expressions.ToChar": {"tf": 1}, "sqlglot.expressions.GenerateSeries": {"tf": 1}, "sqlglot.expressions.ArrayAll": {"tf": 1}, "sqlglot.expressions.ArrayAny": {"tf": 1}, "sqlglot.expressions.ArrayConcat": {"tf": 1}, "sqlglot.expressions.ArrayContains": {"tf": 1}, "sqlglot.expressions.ArrayFilter": {"tf": 1}, "sqlglot.expressions.ArrayJoin": {"tf": 1}, "sqlglot.expressions.ArraySize": {"tf": 1}, "sqlglot.expressions.ArraySort": {"tf": 1}, "sqlglot.expressions.ArraySum": {"tf": 1}, "sqlglot.expressions.Case": {"tf": 1}, "sqlglot.expressions.Cast": {"tf": 1}, "sqlglot.expressions.CastToStrType": {"tf": 1}, "sqlglot.expressions.Ceil": {"tf": 1}, "sqlglot.expressions.Coalesce": {"tf": 1}, "sqlglot.expressions.Concat": {"tf": 1}, "sqlglot.expressions.CurrentDate": {"tf": 1}, "sqlglot.expressions.CurrentDatetime": {"tf": 1}, "sqlglot.expressions.CurrentTime": {"tf": 1}, "sqlglot.expressions.CurrentTimestamp": {"tf": 1}, "sqlglot.expressions.CurrentUser": {"tf": 1}, "sqlglot.expressions.DateAdd": {"tf": 1}, "sqlglot.expressions.DateSub": {"tf": 1}, "sqlglot.expressions.DateDiff": {"tf": 1}, "sqlglot.expressions.DateTrunc": {"tf": 1}, "sqlglot.expressions.DatetimeAdd": {"tf": 1}, "sqlglot.expressions.DatetimeSub": {"tf": 1}, "sqlglot.expressions.DatetimeDiff": {"tf": 1}, "sqlglot.expressions.DatetimeTrunc": {"tf": 1}, "sqlglot.expressions.DayOfWeek": {"tf": 1}, "sqlglot.expressions.DayOfMonth": {"tf": 1}, "sqlglot.expressions.DayOfYear": {"tf": 1}, "sqlglot.expressions.WeekOfYear": {"tf": 1}, "sqlglot.expressions.LastDateOfMonth": {"tf": 1}, "sqlglot.expressions.Extract": {"tf": 1}, "sqlglot.expressions.TimestampAdd": {"tf": 1}, "sqlglot.expressions.TimestampSub": {"tf": 1}, "sqlglot.expressions.TimestampDiff": {"tf": 1}, "sqlglot.expressions.TimestampTrunc": {"tf": 1}, "sqlglot.expressions.TimeAdd": {"tf": 1}, "sqlglot.expressions.TimeSub": {"tf": 1}, "sqlglot.expressions.TimeDiff": {"tf": 1}, "sqlglot.expressions.TimeTrunc": {"tf": 1}, "sqlglot.expressions.DateFromParts": {"tf": 1}, "sqlglot.expressions.DateStrToDate": {"tf": 1}, "sqlglot.expressions.DateToDateStr": {"tf": 1}, "sqlglot.expressions.DateToDi": {"tf": 1}, "sqlglot.expressions.Date": {"tf": 1}, "sqlglot.expressions.Day": {"tf": 1}, "sqlglot.expressions.Decode": {"tf": 1}, "sqlglot.expressions.DiToDate": {"tf": 1}, "sqlglot.expressions.Encode": {"tf": 1}, "sqlglot.expressions.Exp": {"tf": 1}, "sqlglot.expressions.Explode": {"tf": 1}, "sqlglot.expressions.Floor": {"tf": 1}, "sqlglot.expressions.FromBase64": {"tf": 1}, "sqlglot.expressions.ToBase64": {"tf": 1}, "sqlglot.expressions.Greatest": {"tf": 1}, "sqlglot.expressions.GroupConcat": {"tf": 1}, "sqlglot.expressions.Hex": {"tf": 1}, "sqlglot.expressions.If": {"tf": 1}, "sqlglot.expressions.Initcap": {"tf": 1}, "sqlglot.expressions.JSONObject": {"tf": 1}, "sqlglot.expressions.OpenJSON": {"tf": 1}, "sqlglot.expressions.JSONExtract": {"tf": 1}, "sqlglot.expressions.JSONFormat": {"tf": 1}, "sqlglot.expressions.Least": {"tf": 1}, "sqlglot.expressions.Left": {"tf": 1}, "sqlglot.expressions.Right": {"tf": 1}, "sqlglot.expressions.Length": {"tf": 1}, "sqlglot.expressions.Levenshtein": {"tf": 1}, "sqlglot.expressions.Ln": {"tf": 1}, "sqlglot.expressions.Log": {"tf": 1}, "sqlglot.expressions.Log2": {"tf": 1}, "sqlglot.expressions.Log10": {"tf": 1}, "sqlglot.expressions.Lower": {"tf": 1}, "sqlglot.expressions.Map": {"tf": 1}, "sqlglot.expressions.StarMap": {"tf": 1}, "sqlglot.expressions.VarMap": {"tf": 1}, "sqlglot.expressions.MatchAgainst": {"tf": 1}, "sqlglot.expressions.MD5": {"tf": 1}, "sqlglot.expressions.Month": {"tf": 1}, "sqlglot.expressions.Nvl2": {"tf": 1}, "sqlglot.expressions.Posexplode": {"tf": 1}, "sqlglot.expressions.Pow": {"tf": 1}, "sqlglot.expressions.RangeN": {"tf": 1}, "sqlglot.expressions.ReadCSV": {"tf": 1}, "sqlglot.expressions.Reduce": {"tf": 1}, "sqlglot.expressions.RegexpExtract": {"tf": 1}, "sqlglot.expressions.RegexpLike": {"tf": 1}, "sqlglot.expressions.RegexpILike": {"tf": 1}, "sqlglot.expressions.RegexpSplit": {"tf": 1}, "sqlglot.expressions.Repeat": {"tf": 1}, "sqlglot.expressions.Round": {"tf": 1}, "sqlglot.expressions.RowNumber": {"tf": 1}, "sqlglot.expressions.SafeDivide": {"tf": 1}, "sqlglot.expressions.SHA": {"tf": 1}, "sqlglot.expressions.SHA2": {"tf": 1}, "sqlglot.expressions.SortArray": {"tf": 1}, "sqlglot.expressions.Split": {"tf": 1}, "sqlglot.expressions.Substring": {"tf": 1}, "sqlglot.expressions.StandardHash": {"tf": 1}, "sqlglot.expressions.StrPosition": {"tf": 1}, "sqlglot.expressions.StrToDate": {"tf": 1}, "sqlglot.expressions.StrToTime": {"tf": 1}, "sqlglot.expressions.StrToUnix": {"tf": 1}, "sqlglot.expressions.NumberToStr": {"tf": 1}, "sqlglot.expressions.FromBase": {"tf": 1}, "sqlglot.expressions.Struct": {"tf": 1}, "sqlglot.expressions.StructExtract": {"tf": 1}, "sqlglot.expressions.Sqrt": {"tf": 1}, "sqlglot.expressions.TimeToStr": {"tf": 1}, "sqlglot.expressions.TimeToTimeStr": {"tf": 1}, "sqlglot.expressions.TimeToUnix": {"tf": 1}, "sqlglot.expressions.TimeStrToDate": {"tf": 1}, "sqlglot.expressions.TimeStrToTime": {"tf": 1}, "sqlglot.expressions.TimeStrToUnix": {"tf": 1}, "sqlglot.expressions.Trim": {"tf": 1}, "sqlglot.expressions.TsOrDsAdd": {"tf": 1}, "sqlglot.expressions.TsOrDsToDateStr": {"tf": 1}, "sqlglot.expressions.TsOrDsToDate": {"tf": 1}, "sqlglot.expressions.TsOrDiToDi": {"tf": 1}, "sqlglot.expressions.Unhex": {"tf": 1}, "sqlglot.expressions.UnixToStr": {"tf": 1}, "sqlglot.expressions.UnixToTime": {"tf": 1}, "sqlglot.expressions.UnixToTimeStr": {"tf": 1}, "sqlglot.expressions.Upper": {"tf": 1}, "sqlglot.expressions.Week": {"tf": 1}, "sqlglot.expressions.XMLTable": {"tf": 1}, "sqlglot.expressions.Year": {"tf": 1}, "sqlglot.expressions.When": {"tf": 1}, "sqlglot.expressions.NextValueFor": {"tf": 1}}, "df": 137}}}}, "j": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JSONExtractScalar": {"tf": 1}, "sqlglot.expressions.JSONBExtract": {"tf": 1}, "sqlglot.expressions.JSONBExtractScalar": {"tf": 1}}, "df": 3}}}}}}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.ApproxQuantile": {"tf": 1}}, "df": 1}}}}}}}}}}, "doc": {"root": {"0": {"0": {"0": {"9": {"9": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "1": {"0": {"7": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "9": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "3": {"6": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "3": {"4": {"2": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "6": {"2": {"5": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "5": {"2": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "1": {"0": {"5": {"4": {"5": {"5": {"2": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "1": {"2": {"6": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "6": {"8": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "3": {"0": {"8": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "9": {"9": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 1.7320508075688772}}, "df": 1}, "2": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}, "3": {"2": {"8": {"0": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"1": {"7": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "6": {"9": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "4": {"4": {"1": {"0": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"5": {"8": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "6": {"6": {"7": {"1": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "7": {"6": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "8": {"0": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "5": {"4": {"3": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 6.928203230275509}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 4.47213595499958}, "sqlglot.executor": {"tf": 2}, "sqlglot.expressions.Expression.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Star.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Dot.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Paren.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Alias.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Cast.output_name": {"tf": 1.7320508075688772}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.7320508075688772}, "sqlglot.trie.new_trie": {"tf": 2}, "sqlglot.trie.in_trie": {"tf": 2.23606797749979}}, "df": 23, "/": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "1": {"0": {"0": {"0": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}, "docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 20}, "4": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}}, "df": 4, "^": {"1": {"2": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "1": {"4": {"5": {"docs": {}, "df": 0, "/": {"2": {"6": {"4": {"2": {"9": {"3": {"7": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}, "2": {"2": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "3": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}, "6": {"3": {"2": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "3": {"1": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}}, "df": 2}, "4": {"1": {"3": {"4": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "5": {"1": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}, "7": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "6": {"1": {"8": {"0": {"8": {"8": {"0": {"2": {"8": {"2": {"9": {"5": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "7": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "8": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "9": {"8": {"6": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "9": {"6": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {"sqlglot": {"tf": 6}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.diff": {"tf": 4}, "sqlglot.executor": {"tf": 3.7416573867739413}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 2}, "sqlglot.expressions.Condition.or_": {"tf": 2}, "sqlglot.expressions.Condition.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Predicate": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 2}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 2.449489742783178}, "sqlglot.expressions.and_": {"tf": 2.449489742783178}, "sqlglot.expressions.or_": {"tf": 2.449489742783178}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.cast": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 2}, "sqlglot.helper.dict_depth": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1.4142135623730951}}, "df": 45, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 2}}}, "2": {"0": {"0": {"7": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "1": {"4": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "2": {"1": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}, "4": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "5": {"1": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "5": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "6": {"4": {"2": {"9": {"8": {"2": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "6": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 2}, "9": {"4": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 3.3166247903554}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 4.358898943540674}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 2}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1.4142135623730951}}, "df": 48}, "3": {"1": {"3": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "2": {"4": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "3": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "4": {"docs": {"sqlglot.dataframe": {"tf": 2.449489742783178}}, "df": 1}, "7": {"7": {"7": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"docs": {"sqlglot": {"tf": 6.855654600401044}, "sqlglot.dataframe": {"tf": 7.0710678118654755}, "sqlglot.dialects": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 2.8284271247461903}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Identifier.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Literal.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.where": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.hint": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subquery.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Star.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Dot.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Paren.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Alias.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Cast.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 2}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 2}, "sqlglot.expressions.paren": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 4}, "sqlglot.expressions.subquery": {"tf": 2.8284271247461903}, "sqlglot.expressions.cast": {"tf": 2.449489742783178}, "sqlglot.expressions.values": {"tf": 2}, "sqlglot.expressions.var": {"tf": 3.1622776601683795}, "sqlglot.expressions.column_table_names": {"tf": 2}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 2}, "sqlglot.expressions.func": {"tf": 2}, "sqlglot.helper.split_num_words": {"tf": 3.4641016151377544}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 2}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 2}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 2}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 2.8284271247461903}, "sqlglot.optimizer.simplify.simplify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_set": {"tf": 4}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 3.7416573867739413}, "sqlglot.trie.in_trie": {"tf": 2.8284271247461903}}, "df": 93}, "docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.paren": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}, "sqlglot.helper.flatten": {"tf": 2}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 53}, "4": {"0": {"0": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "3": {"4": {"3": {"4": {"1": {"6": {"6": {"2": {"4": {"docs": {"sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 3}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "7": {"9": {"8": {"3": {"1": {"3": {"6": {"docs": {"sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 3}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "4": {"6": {"2": {"4": {"docs": {"sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 3}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "5": {"2": {"9": {"6": {"docs": {"sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 3}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "6": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 1}, "8": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}, "9": {"3": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 5}, "5": {"0": {"4": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "3": {"1": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.paren": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.func": {"tf": 2}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}}, "df": 8}, "6": {"0": {"6": {"2": {"6": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "1": {"4": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 2.23606797749979}}, "df": 2, "m": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "7": {"0": {"4": {"3": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"5": {"docs": {}, "df": 0, "\u2013": {"7": {"4": {"3": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}, "6": {"7": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "9": {"1": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "2": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 2}, "8": {"0": {"5": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22}, "9": {"3": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2}, "9": {"0": {"6": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "7": {"0": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "8": {"7": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {"sqlglot": {"tf": 62.20932405998316}, "sqlglot.pretty": {"tf": 1.7320508075688772}, "sqlglot.schema": {"tf": 1.7320508075688772}, "sqlglot.parse": {"tf": 5.916079783099616}, "sqlglot.parse_one": {"tf": 6.324555320336759}, "sqlglot.transpile": {"tf": 7.211102550927978}, "sqlglot.dataframe": {"tf": 48.86716689148246}, "sqlglot.dataframe.sql": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.SparkSession": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 2.23606797749979}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 2.23606797749979}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 2.6457513110645907}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 2}, "sqlglot.dataframe.sql.GroupedData": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.when": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.like": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.between": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.over": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameNaFunctions": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.WindowSpec": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameReader": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameWriter": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1.7320508075688772}, "sqlglot.dialects": {"tf": 21.817424229271428}, "sqlglot.dialects.bigquery": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 4.58257569495584}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 8.06225774829855}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator.with_properties": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 4.58257569495584}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 8.06225774829855}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.safeconcat_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.after_limit_modifiers": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.parameterizedagg_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.placeholder_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.oncluster_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.createable_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 4.58257569495584}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 8.06225774829855}, "sqlglot.dialects.databricks.Databricks.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.SPARK2": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.tokenize": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.rename_func": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.if_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_comment_column_constraint_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 5.744562646538029}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.parse_date_delta_with_interval": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.date_trunc_to_time": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.timestamptrunc_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.left_to_substring_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.right_to_substring_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.min_or_least": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.max_or_greatest": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.count_if_to_sum": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.str_to_time_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.ts_or_ds_to_date_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.concat_to_dpipe_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.pivot_column_names": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 4.58257569495584}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 8.06225774829855}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 4.58257569495584}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 8.06225774829855}, "sqlglot.dialects.duckdb.DuckDB.Generator.interval_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator.tablesample_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 4.58257569495584}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 8.06225774829855}, "sqlglot.dialects.hive.Hive.Generator.arrayagg_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator.after_having_modifiers": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 4.58257569495584}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 8.06225774829855}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 4.58257569495584}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 8.06225774829855}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator.column_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator.xmltable_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 4.58257569495584}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 8.06225774829855}, "sqlglot.dialects.presto": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 4.58257569495584}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 8.06225774829855}, "sqlglot.dialects.presto.Presto.Generator.interval_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator.generateseries_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator.offset_limit_modifiers": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 4.58257569495584}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 8.06225774829855}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 3.3166247903554}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 4.47213595499958}, "sqlglot.dialects.snowflake": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 4.58257569495584}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 8.06225774829855}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator.settag_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 4.58257569495584}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 8.06225774829855}, "sqlglot.dialects.spark.Spark.Generator.datediff_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 4.58257569495584}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 8.06225774829855}, "sqlglot.dialects.spark2.Spark2.Generator.cast_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Generator.columndef_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 4.58257569495584}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 8.06225774829855}, "sqlglot.dialects.sqlite.SQLite.Generator.cast_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator.datediff_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator.groupconcat_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator.least_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 4.58257569495584}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 8.06225774829855}, "sqlglot.dialects.tableau": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 8.06225774829855}, "sqlglot.dialects.tableau.Tableau.Generator.if_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator.count_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 4.58257569495584}, "sqlglot.dialects.teradata": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 4.58257569495584}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 8.06225774829855}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator.mod_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator.datatype_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator.rangen_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator.createable_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 8.06225774829855}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 4.58257569495584}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 8.06225774829855}, "sqlglot.dialects.tsql.TSQL.Generator.offset_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 50.556898639058154}, "sqlglot.diff.Insert": {"tf": 1.4142135623730951}, "sqlglot.diff.Insert.__init__": {"tf": 1.7320508075688772}, "sqlglot.diff.Remove": {"tf": 1.4142135623730951}, "sqlglot.diff.Remove.__init__": {"tf": 1.7320508075688772}, "sqlglot.diff.Move": {"tf": 1.4142135623730951}, "sqlglot.diff.Move.__init__": {"tf": 1.7320508075688772}, "sqlglot.diff.Update": {"tf": 1.4142135623730951}, "sqlglot.diff.Update.__init__": {"tf": 1.7320508075688772}, "sqlglot.diff.Keep": {"tf": 1.4142135623730951}, "sqlglot.diff.Keep.__init__": {"tf": 1.7320508075688772}, "sqlglot.diff.diff": {"tf": 11.313708498984761}, "sqlglot.diff.ChangeDistiller": {"tf": 2.6457513110645907}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1.7320508075688772}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1.7320508075688772}, "sqlglot.errors": {"tf": 1.7320508075688772}, "sqlglot.errors.ErrorLevel": {"tf": 1.7320508075688772}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1.7320508075688772}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1.7320508075688772}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1.7320508075688772}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1.7320508075688772}, "sqlglot.errors.SqlglotError": {"tf": 1.7320508075688772}, "sqlglot.errors.UnsupportedError": {"tf": 1.7320508075688772}, "sqlglot.errors.ParseError": {"tf": 1.7320508075688772}, "sqlglot.errors.ParseError.__init__": {"tf": 1.7320508075688772}, "sqlglot.errors.ParseError.new": {"tf": 1.7320508075688772}, "sqlglot.errors.TokenError": {"tf": 1.7320508075688772}, "sqlglot.errors.OptimizeError": {"tf": 1.7320508075688772}, "sqlglot.errors.SchemaError": {"tf": 1.7320508075688772}, "sqlglot.errors.ExecuteError": {"tf": 1.7320508075688772}, "sqlglot.errors.concat_messages": {"tf": 1.7320508075688772}, "sqlglot.errors.merge_errors": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 35.11409973215888}, "sqlglot.executor.execute": {"tf": 7}, "sqlglot.executor.context": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context": {"tf": 3}, "sqlglot.executor.context.Context.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.eval": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.add_columns": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.table_iter": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.filter": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.sort": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.set_row": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.set_index": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.set_range": {"tf": 1.7320508075688772}, "sqlglot.executor.env": {"tf": 1.7320508075688772}, "sqlglot.executor.env.reverse_key": {"tf": 1.7320508075688772}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.env.filter_nulls": {"tf": 1.7320508075688772}, "sqlglot.executor.env.null_if_any": {"tf": 4.58257569495584}, "sqlglot.executor.env.str_position": {"tf": 1.7320508075688772}, "sqlglot.executor.env.substring": {"tf": 1.7320508075688772}, "sqlglot.executor.env.cast": {"tf": 1.7320508075688772}, "sqlglot.executor.env.ordered": {"tf": 1.7320508075688772}, "sqlglot.executor.env.interval": {"tf": 1.7320508075688772}, "sqlglot.executor.python": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.static": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 8.06225774829855}, "sqlglot.executor.table": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Table": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Table.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Table.add_columns": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Table.append": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Table.pop": {"tf": 1.7320508075688772}, "sqlglot.executor.table.TableIter": {"tf": 1.7320508075688772}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.table.RangeReader": {"tf": 1.7320508075688772}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.table.RowReader": {"tf": 1.7320508075688772}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Tables": {"tf": 4}, "sqlglot.executor.table.ensure_tables": {"tf": 1.7320508075688772}, "sqlglot.expressions": {"tf": 4}, "sqlglot.expressions.Expression": {"tf": 10.954451150103322}, "sqlglot.expressions.Expression.__init__": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.this": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.expressions": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.text": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.is_string": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.is_number": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.is_int": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.is_star": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.alias": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.Expression.copy": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.add_comments": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.append": {"tf": 4.358898943540674}, "sqlglot.expressions.Expression.set": {"tf": 4.898979485566356}, "sqlglot.expressions.Expression.depth": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find": {"tf": 5.291502622129181}, "sqlglot.expressions.Expression.find_all": {"tf": 5.291502622129181}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 4.795831523312719}, "sqlglot.expressions.Expression.parent_select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.same_parent": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.root": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.walk": {"tf": 5.5677643628300215}, "sqlglot.expressions.Expression.dfs": {"tf": 3.4641016151377544}, "sqlglot.expressions.Expression.bfs": {"tf": 3.4641016151377544}, "sqlglot.expressions.Expression.unnest": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.unalias": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.flatten": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression.sql": {"tf": 5.477225575051661}, "sqlglot.expressions.Expression.transform": {"tf": 5.477225575051661}, "sqlglot.expressions.Expression.replace": {"tf": 5.477225575051661}, "sqlglot.expressions.Expression.pop": {"tf": 3.4641016151377544}, "sqlglot.expressions.Expression.assert_is": {"tf": 9.643650760992955}, "sqlglot.expressions.Expression.error_messages": {"tf": 4.795831523312719}, "sqlglot.expressions.Expression.dump": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.load": {"tf": 2.23606797749979}, "sqlglot.expressions.Condition": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.and_": {"tf": 9.797958971132712}, "sqlglot.expressions.Condition.or_": {"tf": 9.797958971132712}, "sqlglot.expressions.Condition.not_": {"tf": 8.54400374531753}, "sqlglot.expressions.Condition.as_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.isin": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.between": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.is_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.like": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.ilike": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.eq": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.neq": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.rlike": {"tf": 1.7320508075688772}, "sqlglot.expressions.Predicate": {"tf": 1.7320508075688772}, "sqlglot.expressions.DerivedTable": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.union": {"tf": 10.488088481701515}, "sqlglot.expressions.Unionable.intersect": {"tf": 10.488088481701515}, "sqlglot.expressions.Unionable.except_": {"tf": 10.535653752852738}, "sqlglot.expressions.UDTF": {"tf": 1.7320508075688772}, "sqlglot.expressions.Cache": {"tf": 1.7320508075688772}, "sqlglot.expressions.Uncache": {"tf": 1.7320508075688772}, "sqlglot.expressions.Create": {"tf": 1.7320508075688772}, "sqlglot.expressions.Clone": {"tf": 1.7320508075688772}, "sqlglot.expressions.Describe": {"tf": 1.7320508075688772}, "sqlglot.expressions.Pragma": {"tf": 1.7320508075688772}, "sqlglot.expressions.Set": {"tf": 1.7320508075688772}, "sqlglot.expressions.SetItem": {"tf": 1.7320508075688772}, "sqlglot.expressions.Show": {"tf": 1.7320508075688772}, "sqlglot.expressions.UserDefinedFunction": {"tf": 1.7320508075688772}, "sqlglot.expressions.CharacterSet": {"tf": 1.7320508075688772}, "sqlglot.expressions.With": {"tf": 1.7320508075688772}, "sqlglot.expressions.WithinGroup": {"tf": 1.7320508075688772}, "sqlglot.expressions.CTE": {"tf": 1.7320508075688772}, "sqlglot.expressions.TableAlias": {"tf": 1.7320508075688772}, "sqlglot.expressions.BitString": {"tf": 1.7320508075688772}, "sqlglot.expressions.HexString": {"tf": 1.7320508075688772}, "sqlglot.expressions.ByteString": {"tf": 1.7320508075688772}, "sqlglot.expressions.RawString": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.Column.parts": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column.to_dot": {"tf": 1.7320508075688772}, "sqlglot.expressions.ColumnPosition": {"tf": 1.7320508075688772}, "sqlglot.expressions.ColumnDef": {"tf": 1.7320508075688772}, "sqlglot.expressions.AlterColumn": {"tf": 1.7320508075688772}, "sqlglot.expressions.RenameTable": {"tf": 1.7320508075688772}, "sqlglot.expressions.SetTag": {"tf": 1.7320508075688772}, "sqlglot.expressions.Comment": {"tf": 1.7320508075688772}, "sqlglot.expressions.MergeTreeTTLAction": {"tf": 1.7320508075688772}, "sqlglot.expressions.MergeTreeTTL": {"tf": 1.7320508075688772}, "sqlglot.expressions.ColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.ColumnConstraintKind": {"tf": 1.7320508075688772}, "sqlglot.expressions.AutoIncrementColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.CaseSpecificColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.CharacterSetColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.CheckColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.CollateColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.CommentColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.CompressColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateFormatColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.DefaultColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.EncodeColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.GeneratedAsIdentityColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.InlineLengthColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.NotNullColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.OnUpdateColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.PrimaryKeyColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.TitleColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.UniqueColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.UppercaseColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.PathColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.Constraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete.delete": {"tf": 9.055385138137417}, "sqlglot.expressions.Delete.where": {"tf": 10.198039027185569}, "sqlglot.expressions.Delete.returning": {"tf": 10.488088481701515}, "sqlglot.expressions.Drop": {"tf": 1.7320508075688772}, "sqlglot.expressions.Filter": {"tf": 1.7320508075688772}, "sqlglot.expressions.Check": {"tf": 1.7320508075688772}, "sqlglot.expressions.Directory": {"tf": 1.7320508075688772}, "sqlglot.expressions.ForeignKey": {"tf": 1.7320508075688772}, "sqlglot.expressions.PrimaryKey": {"tf": 1.7320508075688772}, "sqlglot.expressions.Into": {"tf": 1.7320508075688772}, "sqlglot.expressions.From": {"tf": 1.7320508075688772}, "sqlglot.expressions.Having": {"tf": 1.7320508075688772}, "sqlglot.expressions.Hint": {"tf": 1.7320508075688772}, "sqlglot.expressions.JoinHint": {"tf": 1.7320508075688772}, "sqlglot.expressions.Identifier": {"tf": 1.7320508075688772}, "sqlglot.expressions.Identifier.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.Index": {"tf": 1.7320508075688772}, "sqlglot.expressions.Insert": {"tf": 1.7320508075688772}, "sqlglot.expressions.Insert.with_": {"tf": 11.874342087037917}, "sqlglot.expressions.OnConflict": {"tf": 1.7320508075688772}, "sqlglot.expressions.Returning": {"tf": 1.7320508075688772}, "sqlglot.expressions.Introducer": {"tf": 1.7320508075688772}, "sqlglot.expressions.National": {"tf": 1.7320508075688772}, "sqlglot.expressions.LoadData": {"tf": 1.7320508075688772}, "sqlglot.expressions.Partition": {"tf": 1.7320508075688772}, "sqlglot.expressions.Fetch": {"tf": 1.7320508075688772}, "sqlglot.expressions.Group": {"tf": 1.7320508075688772}, "sqlglot.expressions.Lambda": {"tf": 1.7320508075688772}, "sqlglot.expressions.Limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal.number": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal.string": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.Join": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.on": {"tf": 11.135528725660043}, "sqlglot.expressions.Join.using": {"tf": 11.445523142259598}, "sqlglot.expressions.Lateral": {"tf": 1.7320508075688772}, "sqlglot.expressions.MatchRecognize": {"tf": 1.7320508075688772}, "sqlglot.expressions.Final": {"tf": 1.7320508075688772}, "sqlglot.expressions.Offset": {"tf": 1.7320508075688772}, "sqlglot.expressions.Order": {"tf": 1.7320508075688772}, "sqlglot.expressions.Cluster": {"tf": 1.7320508075688772}, "sqlglot.expressions.Distribute": {"tf": 1.7320508075688772}, "sqlglot.expressions.Sort": {"tf": 1.7320508075688772}, "sqlglot.expressions.Ordered": {"tf": 1.7320508075688772}, "sqlglot.expressions.Property": {"tf": 1.7320508075688772}, "sqlglot.expressions.AlgorithmProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.AutoIncrementProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.BlockCompressionProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.CharacterSetProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.ChecksumProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.CollateProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataBlocksizeProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.DefinerProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.DistKeyProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.DistStyleProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.EngineProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.ToTableProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.ExecuteAsProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.ExternalProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.FallbackProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.FileFormatProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.FreespaceProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.InputOutputFormat": {"tf": 1.7320508075688772}, "sqlglot.expressions.IsolatedLoadingProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.JournalProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.LanguageProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.DictProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.DictSubProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.DictRange": {"tf": 1.7320508075688772}, "sqlglot.expressions.OnCluster": {"tf": 1.7320508075688772}, "sqlglot.expressions.LikeProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.LocationProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.LockingProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.LogProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.MaterializedProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.MergeBlockRatioProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.NoPrimaryIndexProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.OnCommitProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.PartitionedByProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.ReturnsProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.RowFormatProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.RowFormatDelimitedProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.RowFormatSerdeProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.SchemaCommentProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.SerdeProperties": {"tf": 1.7320508075688772}, "sqlglot.expressions.SetProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.SettingsProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.SortKeyProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.SqlSecurityProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.StabilityProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.TemporaryProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.TransientProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.VolatileProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.WithDataProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.WithJournalTableProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location.POST_NAME": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location.POST_SCHEMA": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location.POST_WITH": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location.POST_ALIAS": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location.POST_EXPRESSION": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.from_dict": {"tf": 1.7320508075688772}, "sqlglot.expressions.Qualify": {"tf": 1.7320508075688772}, "sqlglot.expressions.Return": {"tf": 1.7320508075688772}, "sqlglot.expressions.Reference": {"tf": 1.7320508075688772}, "sqlglot.expressions.Tuple": {"tf": 1.7320508075688772}, "sqlglot.expressions.Tuple.isin": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 11.832159566199232}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.with_": {"tf": 12.449899597988733}, "sqlglot.expressions.Table": {"tf": 1.7320508075688772}, "sqlglot.expressions.Table.parts": {"tf": 1.7320508075688772}, "sqlglot.expressions.SystemTime": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 10.816653826391969}, "sqlglot.expressions.Union.select": {"tf": 10.723805294763608}, "sqlglot.expressions.Union.is_star": {"tf": 1.7320508075688772}, "sqlglot.expressions.Except": {"tf": 1.7320508075688772}, "sqlglot.expressions.Intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unnest": {"tf": 1.7320508075688772}, "sqlglot.expressions.Update": {"tf": 1.7320508075688772}, "sqlglot.expressions.Values": {"tf": 1.7320508075688772}, "sqlglot.expressions.Var": {"tf": 1.7320508075688772}, "sqlglot.expressions.Schema": {"tf": 1.7320508075688772}, "sqlglot.expressions.Lock": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.from_": {"tf": 10.488088481701515}, "sqlglot.expressions.Select.group_by": {"tf": 11.704699910719626}, "sqlglot.expressions.Select.order_by": {"tf": 11.445523142259598}, "sqlglot.expressions.Select.sort_by": {"tf": 11.874342087037917}, "sqlglot.expressions.Select.cluster_by": {"tf": 11.874342087037917}, "sqlglot.expressions.Select.limit": {"tf": 10.954451150103322}, "sqlglot.expressions.Select.offset": {"tf": 10.954451150103322}, "sqlglot.expressions.Select.select": {"tf": 10.246950765959598}, "sqlglot.expressions.Select.lateral": {"tf": 11.180339887498949}, "sqlglot.expressions.Select.join": {"tf": 18.894443627691185}, "sqlglot.expressions.Select.where": {"tf": 11.180339887498949}, "sqlglot.expressions.Select.having": {"tf": 12.041594578792296}, "sqlglot.expressions.Select.window": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.qualify": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.distinct": {"tf": 10.14889156509222}, "sqlglot.expressions.Select.ctas": {"tf": 11.135528725660043}, "sqlglot.expressions.Select.lock": {"tf": 14.696938456699069}, "sqlglot.expressions.Select.hint": {"tf": 11.180339887498949}, "sqlglot.expressions.Select.is_star": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subquery.unnest": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subquery.is_star": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subquery.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.TableSample": {"tf": 1.7320508075688772}, "sqlglot.expressions.Tag": {"tf": 2.23606797749979}, "sqlglot.expressions.Pivot": {"tf": 1.7320508075688772}, "sqlglot.expressions.Window": {"tf": 1.7320508075688772}, "sqlglot.expressions.WindowSpec": {"tf": 1.7320508075688772}, "sqlglot.expressions.Where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Star": {"tf": 1.7320508075688772}, "sqlglot.expressions.Star.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.Parameter": {"tf": 1.7320508075688772}, "sqlglot.expressions.SessionParameter": {"tf": 1.7320508075688772}, "sqlglot.expressions.Placeholder": {"tf": 1.7320508075688772}, "sqlglot.expressions.Null": {"tf": 1.7320508075688772}, "sqlglot.expressions.Boolean": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataTypeSize": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.BIGDECIMAL": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.BIT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.DATETIME64": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.ENUM": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.INT4RANGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.INT4MULTIRANGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.INT8RANGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.INT8MULTIRANGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.NUMRANGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.NUMMULTIRANGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TSRANGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TSMULTIRANGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TSTZRANGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TSTZMULTIRANGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.DATERANGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.DATEMULTIRANGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.INET": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.INT128": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.INT256": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.SET": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.UBIGINT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.UINT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.USMALLINT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.UTINYINT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.UINT128": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.UINT256": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.build": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.is_type": {"tf": 1.7320508075688772}, "sqlglot.expressions.PseudoType": {"tf": 1.7320508075688772}, "sqlglot.expressions.SubqueryPredicate": {"tf": 1.7320508075688772}, "sqlglot.expressions.All": {"tf": 1.7320508075688772}, "sqlglot.expressions.Any": {"tf": 1.7320508075688772}, "sqlglot.expressions.Exists": {"tf": 1.7320508075688772}, "sqlglot.expressions.Command": {"tf": 1.7320508075688772}, "sqlglot.expressions.Transaction": {"tf": 1.7320508075688772}, "sqlglot.expressions.Commit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Rollback": {"tf": 1.7320508075688772}, "sqlglot.expressions.AlterTable": {"tf": 1.7320508075688772}, "sqlglot.expressions.AddConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.DropPartition": {"tf": 1.7320508075688772}, "sqlglot.expressions.Binary": {"tf": 1.7320508075688772}, "sqlglot.expressions.Add": {"tf": 1.7320508075688772}, "sqlglot.expressions.Connector": {"tf": 1.7320508075688772}, "sqlglot.expressions.And": {"tf": 1.7320508075688772}, "sqlglot.expressions.Or": {"tf": 1.7320508075688772}, "sqlglot.expressions.BitwiseAnd": {"tf": 1.7320508075688772}, "sqlglot.expressions.BitwiseLeftShift": {"tf": 1.7320508075688772}, "sqlglot.expressions.BitwiseOr": {"tf": 1.7320508075688772}, "sqlglot.expressions.BitwiseRightShift": {"tf": 1.7320508075688772}, "sqlglot.expressions.BitwiseXor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Div": {"tf": 1.7320508075688772}, "sqlglot.expressions.Overlaps": {"tf": 1.7320508075688772}, "sqlglot.expressions.Dot": {"tf": 1.7320508075688772}, "sqlglot.expressions.Dot.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.Dot.build": {"tf": 1.7320508075688772}, "sqlglot.expressions.DPipe": {"tf": 1.7320508075688772}, "sqlglot.expressions.SafeDPipe": {"tf": 1.7320508075688772}, "sqlglot.expressions.EQ": {"tf": 1.7320508075688772}, "sqlglot.expressions.NullSafeEQ": {"tf": 1.7320508075688772}, "sqlglot.expressions.NullSafeNEQ": {"tf": 1.7320508075688772}, "sqlglot.expressions.Distance": {"tf": 1.7320508075688772}, "sqlglot.expressions.Escape": {"tf": 1.7320508075688772}, "sqlglot.expressions.Glob": {"tf": 1.7320508075688772}, "sqlglot.expressions.GT": {"tf": 1.7320508075688772}, "sqlglot.expressions.GTE": {"tf": 1.7320508075688772}, "sqlglot.expressions.ILike": {"tf": 1.7320508075688772}, "sqlglot.expressions.ILikeAny": {"tf": 1.7320508075688772}, "sqlglot.expressions.IntDiv": {"tf": 1.7320508075688772}, "sqlglot.expressions.Is": {"tf": 1.7320508075688772}, "sqlglot.expressions.Kwarg": {"tf": 1.7320508075688772}, "sqlglot.expressions.Like": {"tf": 1.7320508075688772}, "sqlglot.expressions.LikeAny": {"tf": 1.7320508075688772}, "sqlglot.expressions.LT": {"tf": 1.7320508075688772}, "sqlglot.expressions.LTE": {"tf": 1.7320508075688772}, "sqlglot.expressions.Mod": {"tf": 1.7320508075688772}, "sqlglot.expressions.Mul": {"tf": 1.7320508075688772}, "sqlglot.expressions.NEQ": {"tf": 1.7320508075688772}, "sqlglot.expressions.SimilarTo": {"tf": 1.7320508075688772}, "sqlglot.expressions.Slice": {"tf": 1.7320508075688772}, "sqlglot.expressions.Sub": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayOverlaps": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unary": {"tf": 1.7320508075688772}, "sqlglot.expressions.BitwiseNot": {"tf": 1.7320508075688772}, "sqlglot.expressions.Not": {"tf": 1.7320508075688772}, "sqlglot.expressions.Paren": {"tf": 1.7320508075688772}, "sqlglot.expressions.Paren.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.Neg": {"tf": 1.7320508075688772}, "sqlglot.expressions.Alias": {"tf": 1.7320508075688772}, "sqlglot.expressions.Alias.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.Aliases": {"tf": 1.7320508075688772}, "sqlglot.expressions.AtTimeZone": {"tf": 1.7320508075688772}, "sqlglot.expressions.Between": {"tf": 1.7320508075688772}, "sqlglot.expressions.Bracket": {"tf": 1.7320508075688772}, "sqlglot.expressions.Distinct": {"tf": 1.7320508075688772}, "sqlglot.expressions.In": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeUnit": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1.7320508075688772}, "sqlglot.expressions.Interval": {"tf": 1.7320508075688772}, "sqlglot.expressions.IgnoreNulls": {"tf": 1.7320508075688772}, "sqlglot.expressions.RespectNulls": {"tf": 1.7320508075688772}, "sqlglot.expressions.Func": {"tf": 4.69041575982343}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1.7320508075688772}, "sqlglot.expressions.Func.sql_names": {"tf": 1.7320508075688772}, "sqlglot.expressions.Func.sql_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 1.7320508075688772}, "sqlglot.expressions.AggFunc": {"tf": 1.7320508075688772}, "sqlglot.expressions.ParameterizedAgg": {"tf": 1.7320508075688772}, "sqlglot.expressions.Abs": {"tf": 1.7320508075688772}, "sqlglot.expressions.Anonymous": {"tf": 1.7320508075688772}, "sqlglot.expressions.Hll": {"tf": 1.7320508075688772}, "sqlglot.expressions.ApproxDistinct": {"tf": 1.7320508075688772}, "sqlglot.expressions.Array": {"tf": 1.7320508075688772}, "sqlglot.expressions.ToChar": {"tf": 1.7320508075688772}, "sqlglot.expressions.GenerateSeries": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayAgg": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayAll": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayAny": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayConcat": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayContains": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayContained": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayFilter": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayJoin": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArraySize": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArraySort": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArraySum": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayUnionAgg": {"tf": 1.7320508075688772}, "sqlglot.expressions.Avg": {"tf": 1.7320508075688772}, "sqlglot.expressions.AnyValue": {"tf": 1.7320508075688772}, "sqlglot.expressions.Case": {"tf": 1.7320508075688772}, "sqlglot.expressions.Case.when": {"tf": 1.7320508075688772}, "sqlglot.expressions.Case.else_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Cast": {"tf": 1.7320508075688772}, "sqlglot.expressions.Cast.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.Cast.is_type": {"tf": 1.7320508075688772}, "sqlglot.expressions.CastToStrType": {"tf": 1.7320508075688772}, "sqlglot.expressions.Collate": {"tf": 1.7320508075688772}, "sqlglot.expressions.TryCast": {"tf": 1.7320508075688772}, "sqlglot.expressions.Ceil": {"tf": 1.7320508075688772}, "sqlglot.expressions.Coalesce": {"tf": 1.7320508075688772}, "sqlglot.expressions.Concat": {"tf": 1.7320508075688772}, "sqlglot.expressions.SafeConcat": {"tf": 1.7320508075688772}, "sqlglot.expressions.ConcatWs": {"tf": 1.7320508075688772}, "sqlglot.expressions.Count": {"tf": 1.7320508075688772}, "sqlglot.expressions.CountIf": {"tf": 1.7320508075688772}, "sqlglot.expressions.CurrentDate": {"tf": 1.7320508075688772}, "sqlglot.expressions.CurrentDatetime": {"tf": 1.7320508075688772}, "sqlglot.expressions.CurrentTime": {"tf": 1.7320508075688772}, "sqlglot.expressions.CurrentTimestamp": {"tf": 1.7320508075688772}, "sqlglot.expressions.CurrentUser": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateAdd": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateSub": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateDiff": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateTrunc": {"tf": 1.7320508075688772}, "sqlglot.expressions.DatetimeAdd": {"tf": 1.7320508075688772}, "sqlglot.expressions.DatetimeSub": {"tf": 1.7320508075688772}, "sqlglot.expressions.DatetimeDiff": {"tf": 1.7320508075688772}, "sqlglot.expressions.DatetimeTrunc": {"tf": 1.7320508075688772}, "sqlglot.expressions.DayOfWeek": {"tf": 1.7320508075688772}, "sqlglot.expressions.DayOfMonth": {"tf": 1.7320508075688772}, "sqlglot.expressions.DayOfYear": {"tf": 1.7320508075688772}, "sqlglot.expressions.WeekOfYear": {"tf": 1.7320508075688772}, "sqlglot.expressions.LastDateOfMonth": {"tf": 1.7320508075688772}, "sqlglot.expressions.Extract": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimestampAdd": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimestampSub": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimestampDiff": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimestampTrunc": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeAdd": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeSub": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeDiff": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeTrunc": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateFromParts": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateStrToDate": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateToDateStr": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateToDi": {"tf": 1.7320508075688772}, "sqlglot.expressions.Date": {"tf": 1.7320508075688772}, "sqlglot.expressions.Day": {"tf": 1.7320508075688772}, "sqlglot.expressions.Decode": {"tf": 1.7320508075688772}, "sqlglot.expressions.DiToDate": {"tf": 1.7320508075688772}, "sqlglot.expressions.Encode": {"tf": 1.7320508075688772}, "sqlglot.expressions.Exp": {"tf": 1.7320508075688772}, "sqlglot.expressions.Explode": {"tf": 1.7320508075688772}, "sqlglot.expressions.Floor": {"tf": 1.7320508075688772}, "sqlglot.expressions.FromBase64": {"tf": 1.7320508075688772}, "sqlglot.expressions.ToBase64": {"tf": 1.7320508075688772}, "sqlglot.expressions.Greatest": {"tf": 1.7320508075688772}, "sqlglot.expressions.GroupConcat": {"tf": 1.7320508075688772}, "sqlglot.expressions.Hex": {"tf": 1.7320508075688772}, "sqlglot.expressions.If": {"tf": 1.7320508075688772}, "sqlglot.expressions.Initcap": {"tf": 1.7320508075688772}, "sqlglot.expressions.JSONKeyValue": {"tf": 1.7320508075688772}, "sqlglot.expressions.JSONObject": {"tf": 1.7320508075688772}, "sqlglot.expressions.OpenJSONColumnDef": {"tf": 1.7320508075688772}, "sqlglot.expressions.OpenJSON": {"tf": 1.7320508075688772}, "sqlglot.expressions.JSONBContains": {"tf": 1.7320508075688772}, "sqlglot.expressions.JSONExtract": {"tf": 1.7320508075688772}, "sqlglot.expressions.JSONExtractScalar": {"tf": 1.7320508075688772}, "sqlglot.expressions.JSONBExtract": {"tf": 1.7320508075688772}, "sqlglot.expressions.JSONBExtractScalar": {"tf": 1.7320508075688772}, "sqlglot.expressions.JSONFormat": {"tf": 1.7320508075688772}, "sqlglot.expressions.Least": {"tf": 1.7320508075688772}, "sqlglot.expressions.Left": {"tf": 1.7320508075688772}, "sqlglot.expressions.Right": {"tf": 1.7320508075688772}, "sqlglot.expressions.Length": {"tf": 1.7320508075688772}, "sqlglot.expressions.Levenshtein": {"tf": 1.7320508075688772}, "sqlglot.expressions.Ln": {"tf": 1.7320508075688772}, "sqlglot.expressions.Log": {"tf": 1.7320508075688772}, "sqlglot.expressions.Log2": {"tf": 1.7320508075688772}, "sqlglot.expressions.Log10": {"tf": 1.7320508075688772}, "sqlglot.expressions.LogicalOr": {"tf": 1.7320508075688772}, "sqlglot.expressions.LogicalAnd": {"tf": 1.7320508075688772}, "sqlglot.expressions.Lower": {"tf": 1.7320508075688772}, "sqlglot.expressions.Map": {"tf": 1.7320508075688772}, "sqlglot.expressions.StarMap": {"tf": 1.7320508075688772}, "sqlglot.expressions.VarMap": {"tf": 1.7320508075688772}, "sqlglot.expressions.MatchAgainst": {"tf": 1.7320508075688772}, "sqlglot.expressions.Max": {"tf": 1.7320508075688772}, "sqlglot.expressions.MD5": {"tf": 1.7320508075688772}, "sqlglot.expressions.Min": {"tf": 1.7320508075688772}, "sqlglot.expressions.Month": {"tf": 1.7320508075688772}, "sqlglot.expressions.Nvl2": {"tf": 1.7320508075688772}, "sqlglot.expressions.Posexplode": {"tf": 1.7320508075688772}, "sqlglot.expressions.Pow": {"tf": 1.7320508075688772}, "sqlglot.expressions.PercentileCont": {"tf": 1.7320508075688772}, "sqlglot.expressions.PercentileDisc": {"tf": 1.7320508075688772}, "sqlglot.expressions.Quantile": {"tf": 1.7320508075688772}, "sqlglot.expressions.ApproxQuantile": {"tf": 1.7320508075688772}, "sqlglot.expressions.RangeN": {"tf": 1.7320508075688772}, "sqlglot.expressions.ReadCSV": {"tf": 1.7320508075688772}, "sqlglot.expressions.Reduce": {"tf": 1.7320508075688772}, "sqlglot.expressions.RegexpExtract": {"tf": 1.7320508075688772}, "sqlglot.expressions.RegexpLike": {"tf": 1.7320508075688772}, "sqlglot.expressions.RegexpILike": {"tf": 1.7320508075688772}, "sqlglot.expressions.RegexpSplit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Repeat": {"tf": 1.7320508075688772}, "sqlglot.expressions.Round": {"tf": 1.7320508075688772}, "sqlglot.expressions.RowNumber": {"tf": 1.7320508075688772}, "sqlglot.expressions.SafeDivide": {"tf": 1.7320508075688772}, "sqlglot.expressions.SetAgg": {"tf": 1.7320508075688772}, "sqlglot.expressions.SHA": {"tf": 1.7320508075688772}, "sqlglot.expressions.SHA2": {"tf": 1.7320508075688772}, "sqlglot.expressions.SortArray": {"tf": 1.7320508075688772}, "sqlglot.expressions.Split": {"tf": 1.7320508075688772}, "sqlglot.expressions.Substring": {"tf": 1.7320508075688772}, "sqlglot.expressions.StandardHash": {"tf": 1.7320508075688772}, "sqlglot.expressions.StrPosition": {"tf": 1.7320508075688772}, "sqlglot.expressions.StrToDate": {"tf": 1.7320508075688772}, "sqlglot.expressions.StrToTime": {"tf": 1.7320508075688772}, "sqlglot.expressions.StrToUnix": {"tf": 1.7320508075688772}, "sqlglot.expressions.NumberToStr": {"tf": 1.7320508075688772}, "sqlglot.expressions.FromBase": {"tf": 1.7320508075688772}, "sqlglot.expressions.Struct": {"tf": 1.7320508075688772}, "sqlglot.expressions.StructExtract": {"tf": 1.7320508075688772}, "sqlglot.expressions.Sum": {"tf": 1.7320508075688772}, "sqlglot.expressions.Sqrt": {"tf": 1.7320508075688772}, "sqlglot.expressions.Stddev": {"tf": 1.7320508075688772}, "sqlglot.expressions.StddevPop": {"tf": 1.7320508075688772}, "sqlglot.expressions.StddevSamp": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeToStr": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeToTimeStr": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeToUnix": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeStrToDate": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeStrToTime": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeStrToUnix": {"tf": 1.7320508075688772}, "sqlglot.expressions.Trim": {"tf": 1.7320508075688772}, "sqlglot.expressions.TsOrDsAdd": {"tf": 1.7320508075688772}, "sqlglot.expressions.TsOrDsToDateStr": {"tf": 1.7320508075688772}, "sqlglot.expressions.TsOrDsToDate": {"tf": 1.7320508075688772}, "sqlglot.expressions.TsOrDiToDi": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unhex": {"tf": 1.7320508075688772}, "sqlglot.expressions.UnixToStr": {"tf": 1.7320508075688772}, "sqlglot.expressions.UnixToTime": {"tf": 1.7320508075688772}, "sqlglot.expressions.UnixToTimeStr": {"tf": 1.7320508075688772}, "sqlglot.expressions.Upper": {"tf": 1.7320508075688772}, "sqlglot.expressions.Variance": {"tf": 1.7320508075688772}, "sqlglot.expressions.VariancePop": {"tf": 1.7320508075688772}, "sqlglot.expressions.Week": {"tf": 1.7320508075688772}, "sqlglot.expressions.XMLTable": {"tf": 1.7320508075688772}, "sqlglot.expressions.Year": {"tf": 1.7320508075688772}, "sqlglot.expressions.Use": {"tf": 1.7320508075688772}, "sqlglot.expressions.Merge": {"tf": 1.7320508075688772}, "sqlglot.expressions.When": {"tf": 1.7320508075688772}, "sqlglot.expressions.NextValueFor": {"tf": 1.7320508075688772}, "sqlglot.expressions.maybe_parse": {"tf": 10.14889156509222}, "sqlglot.expressions.union": {"tf": 10}, "sqlglot.expressions.intersect": {"tf": 10}, "sqlglot.expressions.except_": {"tf": 10.04987562112089}, "sqlglot.expressions.select": {"tf": 9.797958971132712}, "sqlglot.expressions.from_": {"tf": 9.797958971132712}, "sqlglot.expressions.update": {"tf": 12}, "sqlglot.expressions.delete": {"tf": 9.433981132056603}, "sqlglot.expressions.insert": {"tf": 10.04987562112089}, "sqlglot.expressions.condition": {"tf": 14.071247279470288}, "sqlglot.expressions.and_": {"tf": 10.04987562112089}, "sqlglot.expressions.or_": {"tf": 10.04987562112089}, "sqlglot.expressions.not_": {"tf": 9}, "sqlglot.expressions.paren": {"tf": 8.48528137423857}, "sqlglot.expressions.to_identifier": {"tf": 5.744562646538029}, "sqlglot.expressions.to_interval": {"tf": 1.7320508075688772}, "sqlglot.expressions.to_table": {"tf": 6.244997998398398}, "sqlglot.expressions.to_column": {"tf": 5.291502622129181}, "sqlglot.expressions.alias_": {"tf": 12.84523257866513}, "sqlglot.expressions.subquery": {"tf": 10}, "sqlglot.expressions.column": {"tf": 6.557438524302}, "sqlglot.expressions.cast": {"tf": 8.888194417315589}, "sqlglot.expressions.table_": {"tf": 6.557438524302}, "sqlglot.expressions.values": {"tf": 8.888194417315589}, "sqlglot.expressions.var": {"tf": 10.488088481701515}, "sqlglot.expressions.rename_table": {"tf": 4.898979485566356}, "sqlglot.expressions.convert": {"tf": 5.744562646538029}, "sqlglot.expressions.replace_children": {"tf": 2}, "sqlglot.expressions.column_table_names": {"tf": 8.54400374531753}, "sqlglot.expressions.table_name": {"tf": 9.695359714832659}, "sqlglot.expressions.replace_tables": {"tf": 10.583005244258363}, "sqlglot.expressions.replace_placeholders": {"tf": 12.489995996796797}, "sqlglot.expressions.expand": {"tf": 14.247806848775006}, "sqlglot.expressions.func": {"tf": 12.884098726725126}, "sqlglot.expressions.true": {"tf": 1.7320508075688772}, "sqlglot.expressions.false": {"tf": 1.7320508075688772}, "sqlglot.expressions.null": {"tf": 1.7320508075688772}, "sqlglot.generator": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 8.06225774829855}, "sqlglot.generator.Generator.__init__": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.generate": {"tf": 5.477225575051661}, "sqlglot.generator.Generator.unsupported": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.sep": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.seg": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.pad_comment": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.wrap": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.no_identify": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.normalize_func": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.indent": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.cache_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.column_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.columnposition_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.compresscolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.createable_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.create_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.clone_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.describe_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.with_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.cte_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bytestring_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.rawstring_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.datatypesize_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.directory_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.delete_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.drop_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.except_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.except_op": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.filter_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.hint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.index_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.inputoutputformat_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.national_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.partition_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.properties_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.root_properties": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.properties": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.with_properties": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.locate_properties": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.property_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.lockingproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.withdataproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.insert_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.intersect_op": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.onconflict_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.returning_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.table_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.update_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.values_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.var_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.into_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.from_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.group_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.having_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.join_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.limit_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.offset_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.setitem_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.set_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.pragma_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.lock_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.literal_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.null_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.order_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.sort_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.offset_limit_modifiers": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.after_having_modifiers": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.after_limit_modifiers": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.select_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.schema_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.schema_columns_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.star_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.union_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.union_op": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.where_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.window_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.windowspec_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.between_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.all_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.any_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.exists_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.case_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.nextvaluefor_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.extract_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.trim_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.safeconcat_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.check_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.if_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.matchagainst_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.jsonkeyvalue_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.jsonobject_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.openjsoncolumndef_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.openjson_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.in_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.interval_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.return_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.reference_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.paren_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.neg_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.not_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.alias_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.add_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.and_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.connector_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.cast_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.collate_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.command_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.comment_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.mergetreettlaction_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.mergetreettl_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.commit_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.safedpipe_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.div_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.overlaps_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.distance_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.dot_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.eq_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.escape_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.glob_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.gt_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.gte_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.ilikeany_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.is_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.like_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.likeany_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.lt_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.lte_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.mod_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.mul_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.neq_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.or_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.slice_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.sub_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.use_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.binary": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.func": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.format_args": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.text_width": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.format_time": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.expressions": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.op_expressions": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.naked_property": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.set_operation": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.tag_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.token_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.when_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.merge_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.tochar_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.dictproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.dictrange_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.dictsubproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.oncluster_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.cached_generator": {"tf": 1.7320508075688772}, "sqlglot.helper": {"tf": 1.7320508075688772}, "sqlglot.helper.AutoName": {"tf": 2.449489742783178}, "sqlglot.helper.seq_get": {"tf": 3.3166247903554}, "sqlglot.helper.ensure_list": {"tf": 4.795831523312719}, "sqlglot.helper.ensure_collection": {"tf": 5.196152422706632}, "sqlglot.helper.csv": {"tf": 5.291502622129181}, "sqlglot.helper.subclasses": {"tf": 5.744562646538029}, "sqlglot.helper.apply_index_offset": {"tf": 5.830951894845301}, "sqlglot.helper.camel_to_snake_case": {"tf": 2.23606797749979}, "sqlglot.helper.while_changing": {"tf": 5.291502622129181}, "sqlglot.helper.tsort": {"tf": 4.795831523312719}, "sqlglot.helper.open_file": {"tf": 1.7320508075688772}, "sqlglot.helper.csv_reader": {"tf": 5.477225575051661}, "sqlglot.helper.find_new_name": {"tf": 5.291502622129181}, "sqlglot.helper.name_sequence": {"tf": 1.7320508075688772}, "sqlglot.helper.object_to_dict": {"tf": 1.7320508075688772}, "sqlglot.helper.split_num_words": {"tf": 12.649110640673518}, "sqlglot.helper.is_iterable": {"tf": 8.94427190999916}, "sqlglot.helper.flatten": {"tf": 11.224972160321824}, "sqlglot.helper.dict_depth": {"tf": 11.489125293076057}, "sqlglot.helper.first": {"tf": 2.449489742783178}, "sqlglot.helper.case_sensitive": {"tf": 1.7320508075688772}, "sqlglot.helper.should_identify": {"tf": 6.324555320336759}, "sqlglot.lineage": {"tf": 1.7320508075688772}, "sqlglot.lineage.Node": {"tf": 1.7320508075688772}, "sqlglot.lineage.Node.__init__": {"tf": 1.7320508075688772}, "sqlglot.lineage.Node.walk": {"tf": 1.7320508075688772}, "sqlglot.lineage.Node.to_html": {"tf": 1.7320508075688772}, "sqlglot.lineage.lineage": {"tf": 6.928203230275509}, "sqlglot.lineage.LineageHTML": {"tf": 2.6457513110645907}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1.7320508075688772}, "sqlglot.optimizer": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 13.152946437965905}, "sqlglot.optimizer.annotate_types.TypeAnnotator": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 4.123105625617661}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.ensure_bool_predicates": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_ctes": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 10.198039027185569}, "sqlglot.optimizer.eliminate_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 10.344080432788601}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 4.58257569495584}, "sqlglot.optimizer.eliminate_subqueries": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 12.727922061357855}, "sqlglot.optimizer.isolate_table_selects": {"tf": 1.7320508075688772}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1.7320508075688772}, "sqlglot.optimizer.merge_subqueries": {"tf": 1.7320508075688772}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 11.180339887498949}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1.7320508075688772}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.normalize": {"tf": 10.535653752852738}, "sqlglot.optimizer.normalize.normalized": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 9.591663046625438}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 2.23606797749979}, "sqlglot.optimizer.normalize_identifiers": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 9.9498743710662}, "sqlglot.optimizer.optimize_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 7.874007874011811}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimizer": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimizer.optimize": {"tf": 7.745966692414834}, "sqlglot.optimizer.pushdown_predicates": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 10.14889156509222}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_projections": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 10.44030650891055}, "sqlglot.optimizer.qualify": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify.qualify": {"tf": 13.674794331177344}, "sqlglot.optimizer.qualify_columns": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 11.832159566199232}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 2}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 2.449489742783178}, "sqlglot.optimizer.qualify_columns.Resolver.__init__": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 4.795831523312719}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 2}, "sqlglot.optimizer.qualify_tables": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 13.341664064126334}, "sqlglot.optimizer.scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.ScopeType": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope": {"tf": 9.539392014169456}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.find": {"tf": 5.744562646538029}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 5.656854249492381}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 5.477225575051661}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 4.47213595499958}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 4.47213595499958}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 3.4641016151377544}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 3.872983346207417}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 4}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 3.4641016151377544}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 3.1622776601683795}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 4.898979485566356}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 13.601470508735444}, "sqlglot.optimizer.scope.build_scope": {"tf": 4.69041575982343}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 5.385164807134504}, "sqlglot.optimizer.simplify": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.simplify": {"tf": 9.643650760992955}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 2.449489742783178}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 2}, "sqlglot.optimizer.simplify.flatten": {"tf": 2}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 2.6457513110645907}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 2.449489742783178}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 2.8284271247461903}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.simplify_parens": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.always_true": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.is_complement": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.is_false": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.is_null": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.date_literal": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1.7320508075688772}, "sqlglot.optimizer.unnest_subqueries": {"tf": 1.7320508075688772}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 9.899494936611665}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1.7320508075688772}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1.7320508075688772}, "sqlglot.parser": {"tf": 1.7320508075688772}, "sqlglot.parser.parse_var_map": {"tf": 1.7320508075688772}, "sqlglot.parser.parse_like": {"tf": 1.7320508075688772}, "sqlglot.parser.binary_range_parser": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser": {"tf": 4.58257569495584}, "sqlglot.parser.Parser.__init__": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.reset": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.parse": {"tf": 5.291502622129181}, "sqlglot.parser.Parser.parse_into": {"tf": 5.744562646538029}, "sqlglot.parser.Parser.check_errors": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.raise_error": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.expression": {"tf": 5.744562646538029}, "sqlglot.parser.Parser.validate_expression": {"tf": 5.291502622129181}, "sqlglot.planner": {"tf": 1.7320508075688772}, "sqlglot.planner.Plan": {"tf": 1.7320508075688772}, "sqlglot.planner.Plan.__init__": {"tf": 1.7320508075688772}, "sqlglot.planner.Step": {"tf": 1.7320508075688772}, "sqlglot.planner.Step.from_expression": {"tf": 7.681145747868608}, "sqlglot.planner.Step.add_dependency": {"tf": 1.7320508075688772}, "sqlglot.planner.Step.to_s": {"tf": 1.7320508075688772}, "sqlglot.planner.Scan": {"tf": 1.7320508075688772}, "sqlglot.planner.Scan.from_expression": {"tf": 7.681145747868608}, "sqlglot.planner.Join": {"tf": 1.7320508075688772}, "sqlglot.planner.Join.from_joins": {"tf": 1.7320508075688772}, "sqlglot.planner.Aggregate": {"tf": 1.7320508075688772}, "sqlglot.planner.Sort": {"tf": 1.7320508075688772}, "sqlglot.planner.SetOperation": {"tf": 1.7320508075688772}, "sqlglot.planner.SetOperation.__init__": {"tf": 1.7320508075688772}, "sqlglot.planner.SetOperation.from_expression": {"tf": 7.681145747868608}, "sqlglot.schema.Schema": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 5.291502622129181}, "sqlglot.schema.Schema.column_names": {"tf": 6.082762530298219}, "sqlglot.schema.Schema.get_column_type": {"tf": 6.082762530298219}, "sqlglot.schema.Schema.supported_table_args": {"tf": 2.449489742783178}, "sqlglot.schema.Schema.empty": {"tf": 1.7320508075688772}, "sqlglot.schema.AbstractMappingSchema": {"tf": 4}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1.7320508075688772}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1.7320508075688772}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1.7320508075688772}, "sqlglot.schema.AbstractMappingSchema.nested_get": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema": {"tf": 7.3484692283495345}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.copy": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.add_table": {"tf": 5.291502622129181}, "sqlglot.schema.MappingSchema.column_names": {"tf": 6.082762530298219}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 6.082762530298219}, "sqlglot.schema.ensure_schema": {"tf": 1.7320508075688772}, "sqlglot.schema.ensure_column_mapping": {"tf": 1.7320508075688772}, "sqlglot.schema.flatten_schema": {"tf": 1.7320508075688772}, "sqlglot.schema.nested_get": {"tf": 5.830951894845301}, "sqlglot.schema.nested_set": {"tf": 12.649110640673518}, "sqlglot.serde": {"tf": 1.7320508075688772}, "sqlglot.serde.dump": {"tf": 1.7320508075688772}, "sqlglot.serde.load": {"tf": 2.23606797749979}, "sqlglot.time": {"tf": 1.7320508075688772}, "sqlglot.time.format_time": {"tf": 7.681145747868608}, "sqlglot.tokens": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DOT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DASH": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COLON": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.STAR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LTE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GTE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NOT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.EQ": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.AND": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.OR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.AMP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CARET": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.HASH": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LT_AT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.AT_GT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DAMP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.STRING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DATABASE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.VAR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NATIONAL_STRING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.RAW_STRING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BIT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UTINYINT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.USMALLINT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UINT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UBIGINT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INT128": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UINT128": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INT256": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UINT256": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BIGDECIMAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.JSON": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TIME": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DATETIME64": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DATE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INT4RANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INT4MULTIRANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INT8RANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INT8MULTIRANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NUMRANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NUMMULTIRANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TSRANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TSMULTIRANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TSTZRANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TSTZMULTIRANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DATERANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DATEMULTIRANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UUID": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.XML": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INET": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ENUM": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ALL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ANY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ASC": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CASE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CURRENT_USER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DESC": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DICTIONARY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DIV": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DROP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.END": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FOR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FROM": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FULL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.HINT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.IF": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ILIKE_ANY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.IN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INNER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INTO": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.IS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.JOIN_MARKER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.KEEP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LIKE_ANY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LOAD": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LOCK": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.MAP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.MOD": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NEXT_VALUE_FOR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NULL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ON": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.OVER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.OVERLAPS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PRAGMA": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.RETURNING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ROW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SET": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SETTINGS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SOME": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TOP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.THEN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UNION": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.USE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.USING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.WITH": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1.7320508075688772}, "sqlglot.tokens.Token": {"tf": 1.7320508075688772}, "sqlglot.tokens.Token.__init__": {"tf": 6.6332495807108}, "sqlglot.tokens.Token.number": {"tf": 2.23606797749979}, "sqlglot.tokens.Token.string": {"tf": 2.23606797749979}, "sqlglot.tokens.Token.identifier": {"tf": 2.23606797749979}, "sqlglot.tokens.Token.var": {"tf": 2.23606797749979}, "sqlglot.tokens.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1.7320508075688772}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 2.23606797749979}, "sqlglot.tokens.Tokenizer.peek": {"tf": 1.7320508075688772}, "sqlglot.transforms": {"tf": 1.7320508075688772}, "sqlglot.transforms.unalias_group": {"tf": 9.327379053088816}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 5.0990195135927845}, "sqlglot.transforms.eliminate_qualify": {"tf": 3.1622776601683795}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.7320508075688772}, "sqlglot.transforms.unnest_to_explode": {"tf": 2}, "sqlglot.transforms.explode_to_unnest": {"tf": 2}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1.7320508075688772}, "sqlglot.transforms.remove_within_group_for_percentiles": {"tf": 1.7320508075688772}, "sqlglot.transforms.add_recursive_cte_column_names": {"tf": 1.7320508075688772}, "sqlglot.transforms.epoch_cast_to_ts": {"tf": 1.7320508075688772}, "sqlglot.transforms.preprocess": {"tf": 5}, "sqlglot.trie": {"tf": 1.7320508075688772}, "sqlglot.trie.new_trie": {"tf": 9.539392014169456}, "sqlglot.trie.in_trie": {"tf": 13.228756555322953}}, "df": 1866, "s": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 3.4641016151377544}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.7320508075688772}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1.4142135623730951}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}}, "df": 66, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 5.5677643628300215}, "sqlglot.pretty": {"tf": 1}, "sqlglot.parse": {"tf": 2.23606797749979}, "sqlglot.parse_one": {"tf": 2.23606797749979}, "sqlglot.transpile": {"tf": 2.449489742783178}, "sqlglot.dataframe": {"tf": 6.48074069840786}, "sqlglot.dialects": {"tf": 2.23606797749979}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 3.605551275463989}, "sqlglot.executor": {"tf": 6.324555320336759}, "sqlglot.executor.execute": {"tf": 2}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.hint": {"tf": 1.4142135623730951}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 2}, "sqlglot.expressions.maybe_parse": {"tf": 2.23606797749979}, "sqlglot.expressions.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.select": {"tf": 2}, "sqlglot.expressions.from_": {"tf": 2}, "sqlglot.expressions.update": {"tf": 2}, "sqlglot.expressions.delete": {"tf": 1.7320508075688772}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 2.23606797749979}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.paren": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_column": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.generate": {"tf": 1.7320508075688772}, "sqlglot.lineage.lineage": {"tf": 2}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}}, "df": 134, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 6.557438524302}, "sqlglot.schema": {"tf": 1}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 5}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects": {"tf": 3}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 4.242640687119285}, "sqlglot.expressions": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1.4142135623730951}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 2}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 2}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 2.23606797749979}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2.449489742783178}, "sqlglot.optimizer.normalize.normalize": {"tf": 2}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 2.23606797749979}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify.qualify": {"tf": 2}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 2}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 2}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.simplify": {"tf": 2.23606797749979}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2.23606797749979}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}}, "df": 58, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}, "o": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 3.4641016151377544}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}}, "df": 12, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 3.3166247903554}}, "df": 1}}}}}}, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}}, "df": 3}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "t": {"docs": {"sqlglot.helper.split_num_words": {"tf": 2.6457513110645907}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}}, "df": 5}}, "c": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "y": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}}, "df": 3}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}}, "df": 3}}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 2.8284271247461903}, "sqlglot.executor": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}}, "df": 4}}}}}}}, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Func": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}}, "df": 2}}}}, "y": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}}, "x": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.parse": {"tf": 1.4142135623730951}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}}, "df": 60}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}}, "df": 1}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.not_": {"tf": 1.4142135623730951}}, "df": 1, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2, "d": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 7, "s": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.executor.env.null_if_any": {"tf": 1}}, "df": 2}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 2.23606797749979}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}}, "df": 7}}}}}}}, "b": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 3, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.expressions": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.subclasses": {"tf": 2}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "t": {"docs": {"sqlglot.expressions.Func": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}}, "df": 2}}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.diff": {"tf": 1}}, "df": 2}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.trie.in_trie": {"tf": 1.4142135623730951}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.7320508075688772}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 13}}}, "y": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 2.449489742783178}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 12, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.expressions.expand": {"tf": 1}}, "df": 1}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot": {"tf": 2.6457513110645907}}, "df": 1}}}, "m": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}}, "df": 5, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 5}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 3}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}}, "df": 6}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 1}}}}}}, "g": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 32, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}, "w": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}}, "df": 1, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 2.8284271247461903}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 15, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 12, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "w": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 2}, "sqlglot.diff": {"tf": 7.937253933193772}, "sqlglot.diff.diff": {"tf": 2.8284271247461903}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1.7320508075688772}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 2}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}}, "df": 24, "s": {"docs": {"sqlglot.expressions.expand": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}}, "df": 8}}}}}, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 2}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}}, "df": 2}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.tsort": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}}, "df": 4, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}}, "df": 5, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 2.23606797749979}, "sqlglot.expressions.delete": {"tf": 2}, "sqlglot.expressions.insert": {"tf": 1.7320508075688772}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}}, "df": 18, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 2.6457513110645907}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}}, "df": 8}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}, "b": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "r": {"docs": {"sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 9, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}}}}}}, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 2}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 5, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 3.7416573867739413}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.parse": {"tf": 1.4142135623730951}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 2}, "sqlglot.dataframe": {"tf": 2.6457513110645907}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2}, "sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 2}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 2.449489742783178}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.from_": {"tf": 1.7320508075688772}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1.7320508075688772}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 2}, "sqlglot.generator.Generator.generate": {"tf": 1.7320508075688772}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1.7320508075688772}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.string": {"tf": 1.4142135623730951}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 108, "s": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 26}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 2.6457513110645907}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify.qualify": {"tf": 2}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}}, "df": 7, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 6}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 2}}}}, "s": {"docs": {"sqlglot.trie.in_trie": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1.7320508075688772}}, "df": 3}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}}, "df": 2}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.simplify.simplify": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 2.449489742783178}, "sqlglot.executor.execute": {"tf": 1}}, "df": 3}, "y": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 4.242640687119285}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 10}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 4}}}, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}}, "df": 22}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}}, "df": 4}}, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.generate": {"tf": 1}}, "df": 3}}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 6.164414002968976}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.executor": {"tf": 3.605551275463989}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.assert_is": {"tf": 2}, "sqlglot.expressions.Unionable.union": {"tf": 2}, "sqlglot.expressions.Unionable.intersect": {"tf": 2}, "sqlglot.expressions.Unionable.except_": {"tf": 2}, "sqlglot.expressions.Column.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Insert.with_": {"tf": 2}, "sqlglot.expressions.Literal.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 2.449489742783178}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2.23606797749979}, "sqlglot.expressions.Union.limit": {"tf": 2.23606797749979}, "sqlglot.expressions.Union.select": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.from_": {"tf": 2}, "sqlglot.expressions.Select.group_by": {"tf": 2}, "sqlglot.expressions.Select.order_by": {"tf": 2}, "sqlglot.expressions.Select.sort_by": {"tf": 2}, "sqlglot.expressions.Select.cluster_by": {"tf": 2}, "sqlglot.expressions.Select.limit": {"tf": 2}, "sqlglot.expressions.Select.offset": {"tf": 2}, "sqlglot.expressions.Select.select": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.lateral": {"tf": 2}, "sqlglot.expressions.Select.join": {"tf": 3.1622776601683795}, "sqlglot.expressions.Select.where": {"tf": 2}, "sqlglot.expressions.Select.having": {"tf": 2}, "sqlglot.expressions.Select.distinct": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.ctas": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.lock": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.hint": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Dot.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Paren.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Alias.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Cast.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.union": {"tf": 2}, "sqlglot.expressions.intersect": {"tf": 2}, "sqlglot.expressions.except_": {"tf": 2}, "sqlglot.expressions.select": {"tf": 2.449489742783178}, "sqlglot.expressions.from_": {"tf": 2.23606797749979}, "sqlglot.expressions.condition": {"tf": 1.7320508075688772}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 2.23606797749979}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 3.1622776601683795}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 3}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2.6457513110645907}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 2}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 2}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 2.449489742783178}, "sqlglot.optimizer.scope.Scope": {"tf": 2.6457513110645907}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 2.449489742783178}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.7320508075688772}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 102, "s": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 4}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 11, "s": {"docs": {"sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.7320508075688772}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 3}}, "|": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 1}}}}}}}, "f": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 6}}, "e": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 5, "n": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 2}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "q": {"docs": {"sqlglot.helper.seq_get": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 8, "s": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 2, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}}}}}}, "r": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}}, "df": 2}}}}}}}}}}, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.diff": {"tf": 5.916079783099616}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.7320508075688772}, "sqlglot.schema.nested_set": {"tf": 2}}, "df": 48, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "s": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}}, "df": 4}, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 3}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.schema.nested_set": {"tf": 2}}, "df": 2, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Expression.assert_is": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 2.449489742783178}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}}, "df": 22}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1.4142135623730951}}, "df": 9, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.find_new_name": {"tf": 1}}, "df": 1}, "d": {"docs": {"sqlglot.trie.in_trie": {"tf": 1}}, "df": 1}}}}}, "m": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {"sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.helper.csv": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 2}}}}}}}}, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.schema": {"tf": 1}, "sqlglot.dataframe": {"tf": 4.242640687119285}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1.7320508075688772}, "sqlglot.expressions.to_table": {"tf": 1.7320508075688772}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2.23606797749979}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2.449489742783178}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.6457513110645907}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 2.449489742783178}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.Schema.empty": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 2}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}}, "df": 21, "s": {"docs": {"sqlglot.schema.Schema": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 3.605551275463989}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 4.242640687119285}}, "df": 1, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.build_scope": {"tf": 2}}, "df": 35, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 3.605551275463989}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 2.449489742783178}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 4}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "r": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}}, "df": 4}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 3}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22}}}}}}, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}}, "df": 2, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 8}}, "f": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}}, "df": 23}}, "w": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}}, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Expression.replace": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 4.795831523312719}, "sqlglot.executor": {"tf": 4.69041575982343}, "sqlglot.expressions.Expression": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot": {"tf": 3.7416573867739413}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 3.4641016151377544}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.23606797749979}, "sqlglot.diff": {"tf": 7.54983443527075}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.executor": {"tf": 4.58257569495584}, "sqlglot.executor.context.Context": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 2.23606797749979}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.assert_is": {"tf": 2}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 2.23606797749979}, "sqlglot.expressions.Literal.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2.23606797749979}, "sqlglot.expressions.Union.limit": {"tf": 2}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 2}, "sqlglot.expressions.Select.group_by": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.order_by": {"tf": 2}, "sqlglot.expressions.Select.sort_by": {"tf": 2}, "sqlglot.expressions.Select.cluster_by": {"tf": 2}, "sqlglot.expressions.Select.limit": {"tf": 2}, "sqlglot.expressions.Select.offset": {"tf": 2}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.hint": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Star.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Dot.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Paren.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Alias.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.Cast.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 1.7320508075688772}, "sqlglot.expressions.union": {"tf": 2.23606797749979}, "sqlglot.expressions.intersect": {"tf": 2.23606797749979}, "sqlglot.expressions.except_": {"tf": 2.23606797749979}, "sqlglot.expressions.select": {"tf": 2.23606797749979}, "sqlglot.expressions.from_": {"tf": 2.23606797749979}, "sqlglot.expressions.condition": {"tf": 2.449489742783178}, "sqlglot.expressions.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.not_": {"tf": 1.7320508075688772}, "sqlglot.expressions.paren": {"tf": 1.7320508075688772}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_column": {"tf": 1.7320508075688772}, "sqlglot.expressions.alias_": {"tf": 2}, "sqlglot.expressions.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2.23606797749979}, "sqlglot.helper.AutoName": {"tf": 1.4142135623730951}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 2.23606797749979}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 2}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.empty": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 1.7320508075688772}, "sqlglot.trie.in_trie": {"tf": 2.23606797749979}}, "df": 167, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}, "n": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 3}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}}, "df": 1, "d": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}, "t": {"docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 3.7416573867739413}, "sqlglot.executor": {"tf": 4.898979485566356}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1.4142135623730951}, "sqlglot.helper.ensure_collection": {"tf": 1.4142135623730951}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}}, "df": 98, "e": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 3}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.is_iterable": {"tf": 2.23606797749979}, "sqlglot.helper.flatten": {"tf": 2}, "sqlglot.helper.first": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.helper.flatten": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 13, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 3}}}}, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "n": {"docs": {"sqlglot": {"tf": 4.242640687119285}, "sqlglot.schema": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 3.605551275463989}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 3}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 3}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 3}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 3}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 3}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 3}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 3}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 3}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 3}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 3}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 3}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 3}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 3}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 3}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 3}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 3}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 3}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 3}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 3}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 3}, "sqlglot.diff": {"tf": 8.12403840463596}, "sqlglot.diff.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.ChangeDistiller": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 3.605551275463989}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 3}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.bfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 3}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.helper.tsort": {"tf": 1.4142135623730951}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_set": {"tf": 1.4142135623730951}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 2.6457513110645907}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.7320508075688772}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 2.449489742783178}}, "df": 168, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 2}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 2}, "sqlglot.expressions.from_": {"tf": 2}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 2}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1}}, "df": 45, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1.7320508075688772}}, "df": 1}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}}, "df": 45, "s": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1}}, "df": 1}, "d": {"docs": {"sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 2}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.dataframe": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.group_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.order_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.offset": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}}, "df": 60, "s": {"docs": {"sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 6}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 6}}}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 11}}}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 2.8284271247461903}}, "df": 5, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 3}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}}, "df": 23}}}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 2}}}}}}, "t": {"6": {"4": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.dialects": {"tf": 2.23606797749979}}, "df": 2}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.expressions.cast": {"tf": 1.4142135623730951}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 11, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}, "o": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 4.242640687119285}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.parse_into": {"tf": 1.7320508075688772}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}}, "df": 44}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_interval": {"tf": 1}}, "df": 2}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}}, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.func": {"tf": 1.7320508075688772}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "f": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Unionable.intersect": {"tf": 2}, "sqlglot.expressions.intersect": {"tf": 2}}, "df": 2}}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}}}}, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}}, "df": 5, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 2.6457513110645907}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "\u2019": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}, "d": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}}, "df": 3}}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}}, "df": 2}}}}}}}, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}}, "df": 6}}}}}, "s": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}}, "df": 3, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}, "s": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22}}}}}}}, "x": {"docs": {"sqlglot.helper.seq_get": {"tf": 1.4142135623730951}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}}, "df": 3}}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 6}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.helper.is_iterable": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.condition": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}}, "df": 5}, "r": {"docs": {"sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}}, "df": 6, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 4.358898943540674}, "sqlglot.dataframe": {"tf": 4.47213595499958}, "sqlglot.dialects": {"tf": 2}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 44, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}}, "df": 3}}}, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}}, "df": 2}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 3.4641016151377544}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions": {"tf": 1}}, "df": 7, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 20, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}}, "df": 1}}}}}}}}}}, "d": {"1": {"docs": {"sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}}, "df": 1}, "2": {"docs": {"sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {"sqlglot": {"tf": 4.69041575982343}, "sqlglot.dataframe": {"tf": 3}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 6, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 3.3166247903554}, "sqlglot.diff.diff": {"tf": 2}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_identifier": {"tf": 2}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1.4142135623730951}}, "df": 32, "s": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 2}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}}, "df": 31}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.7320508075688772}}, "df": 2}}}, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 25, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.transpile": {"tf": 1}}, "df": 1}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.to_identifier": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 2}}, "s": {"docs": {"sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 3}}, "f": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 4.69041575982343}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.env.null_if_any": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 2}, "sqlglot.expressions.Literal.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.using": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2}, "sqlglot.expressions.Union.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.from_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.group_by": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.order_by": {"tf": 2}, "sqlglot.expressions.Select.sort_by": {"tf": 2}, "sqlglot.expressions.Select.cluster_by": {"tf": 2}, "sqlglot.expressions.Select.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.offset": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.lateral": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.having": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.hint": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Star.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Dot.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Paren.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Alias.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 2}, "sqlglot.expressions.intersect": {"tf": 2}, "sqlglot.expressions.except_": {"tf": 2}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1.7320508075688772}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 162}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}}}}}, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}}, "df": 2}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}, "\u2019": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "m": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.lineage.LineageHTML": {"tf": 1}}, "df": 1}}}}}}, "a": {"0": {"docs": {"sqlglot.helper.name_sequence": {"tf": 1}}, "df": 1}, "1": {"docs": {"sqlglot.helper.name_sequence": {"tf": 1}}, "df": 1}, "2": {"docs": {"sqlglot.helper.name_sequence": {"tf": 1}}, "df": 1}, "docs": {"sqlglot": {"tf": 7.745966692414834}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 3.1622776601683795}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 2.449489742783178}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 2}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 2}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 2}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 2}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 2}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.449489742783178}, "sqlglot.diff": {"tf": 8.94427190999916}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.diff": {"tf": 2.449489742783178}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.executor": {"tf": 8.660254037844387}, "sqlglot.executor.execute": {"tf": 1.7320508075688772}, "sqlglot.executor.env.null_if_any": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 2.449489742783178}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 2.6457513110645907}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.is_string": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.is_number": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.transform": {"tf": 2}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 2.449489742783178}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 2}, "sqlglot.expressions.Select.order_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 2}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Star.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Dot.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Dot.build": {"tf": 1.4142135623730951}, "sqlglot.expressions.Paren.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Alias.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.7320508075688772}, "sqlglot.expressions.Cast.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.maybe_parse": {"tf": 2.23606797749979}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 2}, "sqlglot.expressions.from_": {"tf": 2.23606797749979}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.7320508075688772}, "sqlglot.expressions.condition": {"tf": 1.7320508075688772}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 2.23606797749979}, "sqlglot.expressions.to_column": {"tf": 2}, "sqlglot.expressions.alias_": {"tf": 2}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1.7320508075688772}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1.7320508075688772}, "sqlglot.expressions.table_name": {"tf": 2}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.expressions.false": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2.449489742783178}, "sqlglot.generator.cached_generator": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 2.449489742783178}, "sqlglot.helper.ensure_collection": {"tf": 2.23606797749979}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.7320508075688772}, "sqlglot.helper.while_changing": {"tf": 1.7320508075688772}, "sqlglot.helper.tsort": {"tf": 1.4142135623730951}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1.7320508075688772}, "sqlglot.helper.find_new_name": {"tf": 1.4142135623730951}, "sqlglot.helper.name_sequence": {"tf": 1.7320508075688772}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 2}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 2}, "sqlglot.helper.should_identify": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 2}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 2}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2.8284271247461903}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 2}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 2.8284271247461903}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2.6457513110645907}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 3.1622776601683795}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 2.8284271247461903}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 2.6457513110645907}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 2}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 3}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify.flatten": {"tf": 2}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 2}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 4.242640687119285}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 3.4641016151377544}, "sqlglot.parser.Parser": {"tf": 2}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 3.4641016151377544}, "sqlglot.planner.Scan.from_expression": {"tf": 3.4641016151377544}, "sqlglot.planner.SetOperation.from_expression": {"tf": 3.4641016151377544}, "sqlglot.schema.Schema.add_table": {"tf": 1.7320508075688772}, "sqlglot.schema.Schema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 2}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_get": {"tf": 1.7320508075688772}, "sqlglot.schema.nested_set": {"tf": 1.4142135623730951}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.7320508075688772}, "sqlglot.trie.new_trie": {"tf": 2.6457513110645907}, "sqlglot.trie.in_trie": {"tf": 2.449489742783178}}, "df": 239, "n": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.Dialects": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 3}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.executor": {"tf": 2.449489742783178}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 2.6457513110645907}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 2}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.7320508075688772}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_identifier": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1.7320508075688772}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.tokens.TokenType": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 136, "d": {"docs": {"sqlglot": {"tf": 5.196152422706632}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 3.1622776601683795}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 7.0710678118654755}, "sqlglot.diff.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.executor": {"tf": 7.0710678118654755}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 2}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 2.449489742783178}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 2.6457513110645907}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.flatten": {"tf": 2}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 2.449489742783178}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 2.8284271247461903}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 120}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 13}}}}, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.func": {"tf": 1}}, "df": 1}}}}}}}, "y": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}}, "df": 28, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}}, "df": 3, "d": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.7320508075688772}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}, "l": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 16}}, "l": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}, "sqlglot.executor": {"tf": 2.8284271247461903}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 2}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 70, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}}, "df": 2}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "/": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 2.8284271247461903}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.table_": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}}, "df": 17, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 6}, "s": {"docs": {"sqlglot.expressions.Func": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 5}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1.4142135623730951}, "sqlglot.helper.find_new_name": {"tf": 1}}, "df": 3, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.should_identify": {"tf": 1.4142135623730951}}, "df": 25}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "g": {"docs": {"sqlglot.parser.Parser.expression": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "g": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.diff": {"tf": 5}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.transform": {"tf": 1}}, "df": 2}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2.8284271247461903}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 4.47213595499958}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 3.605551275463989}, "sqlglot.executor.env.null_if_any": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 60, "n": {"docs": {"sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}}, "df": 1}}, "g": {"docs": {"sqlglot.expressions.Expression": {"tf": 2.6457513110645907}, "sqlglot.expressions.Expression.append": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.set": {"tf": 2}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 7, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1}, "sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.7320508075688772}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 12, "s": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.csv": {"tf": 2}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.validate_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 191}}}}}}, "s": {"docs": {"sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.7320508075688772}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}}, "df": 11}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}}, "df": 4}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 3}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Tag": {"tf": 1}}, "df": 2}}}}}}}}, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 4}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.expressions.Expression": {"tf": 1}}, "df": 2}}}, "s": {"docs": {"sqlglot.expressions.func": {"tf": 1.4142135623730951}}, "df": 1, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.Schema": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 5}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "b": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 2}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}}, "df": 3}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot": {"tf": 2}}, "df": 1, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}}, "df": 2, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.7320508075688772}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 7}, "s": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}}, "df": 10}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transforms.preprocess": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}}, "df": 17, "s": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 3}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}, "s": {"docs": {"sqlglot": {"tf": 4.47213595499958}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 3}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 2.23606797749979}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.diff": {"tf": 4}, "sqlglot.executor": {"tf": 4.242640687119285}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 2.6457513110645907}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2.6457513110645907}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 2}, "sqlglot.expressions.Select.hint": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 2}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 2.23606797749979}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1.4142135623730951}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2.23606797749979}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 3}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 2.8284271247461903}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 2.6457513110645907}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 2}, "sqlglot.optimizer.scope.Scope": {"tf": 2.23606797749979}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 2}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2.6457513110645907}, "sqlglot.planner.Step.from_expression": {"tf": 2}, "sqlglot.planner.Scan.from_expression": {"tf": 2}, "sqlglot.planner.SetOperation.from_expression": {"tf": 2}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 108, "t": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.executor": {"tf": 3.4641016151377544}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 21, "s": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}}, "df": 2, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Expression.assert_is": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}}, "df": 4}}}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}}, "df": 15, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1.7320508075688772}}, "df": 1}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}}, "df": 3, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 5}}}}}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 3}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "j": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.Column.cast": {"tf": 1.4142135623730951}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2}}, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transpile": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 3, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 2}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 2.449489742783178}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 3}}}}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}}, "df": 3}}}}, "m": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.helper.tsort": {"tf": 1}}, "df": 1}}}}}}, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 4}, "/": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}, "e": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 4}}}}}}}, "e": {"docs": {"sqlglot.dataframe": {"tf": 3.872983346207417}}, "df": 1}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}}, "df": 4, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}}, "df": 2}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}}}, "x": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.expressions.update": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1}}, "df": 2, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}}, "df": 3}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1}}}}}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 20, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 2}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {"sqlglot.diff": {"tf": 3.4641016151377544}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 14, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 4}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 2}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}}, "df": 3}}}}}}}, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "n": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2, "o": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}}, "df": 17, "t": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.449489742783178}, "sqlglot.diff": {"tf": 3.605551275463989}, "sqlglot.executor": {"tf": 3}, "sqlglot.executor.python.Python.Generator": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 2.23606797749979}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 2}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.convert": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2.449489742783178}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.449489742783178}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 2.449489742783178}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 2}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.schema.Schema.empty": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 64, "e": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 7, "d": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Select.group_by": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.diff": {"tf": 7.745966692414834}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1.4142135623730951}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 28, "s": {"docs": {"sqlglot.diff": {"tf": 8.12403840463596}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1.4142135623730951}}, "df": 12}, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {"sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}}, "df": 10, "e": {"docs": {"sqlglot.dataframe": {"tf": 2}, "sqlglot.executor.env.null_if_any": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 2.23606797749979}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 12}}, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.7320508075688772}}, "df": 2, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 2}, "sqlglot.generator.Generator": {"tf": 2}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema": {"tf": 1.4142135623730951}}, "df": 26, "d": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 3}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 23}}}}}}}}}}}, "w": {"docs": {"sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.dataframe": {"tf": 3}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.helper.split_num_words": {"tf": 2}}, "df": 3, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.number": {"tf": 1.4142135623730951}}, "df": 52}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}}, "df": 2}}}}, "p": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.null": {"tf": 1}}, "df": 5}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Star.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Dot.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Paren.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Alias.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Func": {"tf": 2.23606797749979}, "sqlglot.expressions.Cast.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.to_identifier": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.column": {"tf": 2}, "sqlglot.expressions.table_": {"tf": 1.7320508075688772}, "sqlglot.expressions.var": {"tf": 1.7320508075688772}, "sqlglot.expressions.rename_table": {"tf": 2}, "sqlglot.expressions.table_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1.7320508075688772}, "sqlglot.helper.name_sequence": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 2}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1.7320508075688772}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1.4142135623730951}}, "df": 73, "s": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 2}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1.4142135623730951}}, "df": 37}, "d": {"docs": {"sqlglot.expressions.replace_placeholders": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}, "\u00ef": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.replace": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1.7320508075688772}, "sqlglot.trie.in_trie": {"tf": 1.7320508075688772}}, "df": 63, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}}, "df": 7, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}}, "df": 3}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 3}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1.7320508075688772}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 5}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1}}}}, "w": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.lineage.LineageHTML": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.7320508075688772}}, "df": 2}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.find_ancestor": {"tf": 1}}, "df": 1}}}}}}, "^": {"2": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1}, "docs": {}, "df": 0}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "d": {"docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.executor": {"tf": 2}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}}, "df": 8, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}}, "df": 6}}}}, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 6}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 2.6457513110645907}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}}, "df": 6}}}, "v": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}}, "df": 7, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.23606797749979}, "sqlglot.executor.python.Python.Generator": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2.23606797749979}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 44}}}}}}}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}}, "df": 3, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.ChangeDistiller": {"tf": 1.4142135623730951}}, "df": 2}, "s": {"docs": {"sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 22}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.trie.new_trie": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "f": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}}, "df": 5, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.pretty": {"tf": 1}, "sqlglot.schema": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 3.4641016151377544}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 3.4641016151377544}, "sqlglot.executor.python.Python.Generator": {"tf": 3.4641016151377544}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 3.4641016151377544}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1.7320508075688772}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}}, "df": 52, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}, "s": {"docs": {"sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}}, "df": 3}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 9}, "s": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}}, "df": 2}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}}, "df": 3, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.env.null_if_any": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.env.null_if_any": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.helper.csv_reader": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {"sqlglot.expressions.Delete.delete": {"tf": 2.449489742783178}, "sqlglot.expressions.Delete.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete.returning": {"tf": 1.7320508075688772}, "sqlglot.expressions.delete": {"tf": 2.23606797749979}}, "df": 4}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1, "d": {"docs": {"sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 2}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}}, "df": 5}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}}, "df": 1}}}}}}}}}}, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 3}}}, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Expression.copy": {"tf": 1}}, "df": 1}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.simplify.simplify_not": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.diff": {"tf": 3.4641016151377544}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}}, "df": 3, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.4142135623730951}}, "df": 5}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 2.6457513110645907}, "sqlglot.dataframe": {"tf": 2.449489742783178}, "sqlglot.dialects": {"tf": 3.1622776601683795}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1.7320508075688772}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.hint": {"tf": 1.7320508075688772}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.7320508075688772}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.4142135623730951}}, "df": 89, "s": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects": {"tf": 2}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 9}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 2.23606797749979}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}}, "df": 11, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.7320508075688772}}, "df": 2}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 20}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "j": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 2.449489742783178}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 5, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 2}}}, "y": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1.7320508075688772}, "sqlglot.schema.nested_set": {"tf": 2}, "sqlglot.time.format_time": {"tf": 1}}, "df": 13}}}}}}, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}}, "df": 3}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.tsort": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 1}}, "df": 2}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1}}, "df": 6}}}}, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 4}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 4, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 2}}}}}}}}}}, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}}, "df": 6, "n": {"docs": {"sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 6, "\u2019": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}}, "df": 4}}}}, "n": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 6, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.4142135623730951}}, "df": 5}, "\u2019": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "m": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 2}}, "t": {"docs": {"sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}}, "df": 2}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}}, "df": 2, "/": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {"sqlglot.dataframe": {"tf": 2.449489742783178}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 3.605551275463989}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}}, "df": 8, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 3}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}}, "df": 8}}}}, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 4.58257569495584}, "sqlglot.executor": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.schema.Schema": {"tf": 1}}, "df": 9}}}}}}, "g": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 2.23606797749979}, "sqlglot.planner.Scan.from_expression": {"tf": 2.23606797749979}, "sqlglot.planner.SetOperation.from_expression": {"tf": 2.23606797749979}}, "df": 5}, "y": {"docs": {"sqlglot.expressions.to_interval": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {"sqlglot.dataframe": {"tf": 2.8284271247461903}}, "df": 1, "s": {"docs": {"sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 5}}, "y": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "b": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 2.449489742783178}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 2}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 2}}, "df": 12, "/": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 4}}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "d": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 3.872983346207417}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.output_name": {"tf": 2}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column.output_name": {"tf": 2}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete.returning": {"tf": 1.7320508075688772}, "sqlglot.expressions.Identifier.output_name": {"tf": 2}, "sqlglot.expressions.Insert.with_": {"tf": 2}, "sqlglot.expressions.Literal.output_name": {"tf": 2}, "sqlglot.expressions.Join.on": {"tf": 2}, "sqlglot.expressions.Join.using": {"tf": 2}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2}, "sqlglot.expressions.Union.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.select": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.from_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.group_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.order_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.offset": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.lateral": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.having": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.ctas": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.hint": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subquery.output_name": {"tf": 2}, "sqlglot.expressions.Star.output_name": {"tf": 2}, "sqlglot.expressions.Dot.output_name": {"tf": 2}, "sqlglot.expressions.Paren.output_name": {"tf": 2}, "sqlglot.expressions.Alias.output_name": {"tf": 2}, "sqlglot.expressions.Cast.output_name": {"tf": 2}, "sqlglot.expressions.maybe_parse": {"tf": 2.23606797749979}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.from_": {"tf": 1.7320508075688772}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.7320508075688772}, "sqlglot.expressions.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.not_": {"tf": 1.7320508075688772}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 2.449489742783178}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 93, "r": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.parse": {"tf": 1.4142135623730951}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.executor": {"tf": 2.6457513110645907}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 26, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "/": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 21}}}}}, "d": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}}, "df": 29}, "s": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 6}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 2}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 8, "/": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "t": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}}, "df": 5, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}}, "df": 4}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.paren": {"tf": 1}}, "df": 1, "t": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 9, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.unnest": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.paren": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {"sqlglot.expressions.Expression.flatten": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}}, "h": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_column": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1.4142135623730951}}, "df": 5, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "/": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 3, "w": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "d": {"docs": {"sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}}, "df": 44}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}}, "df": 22}, "i": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.diff": {"tf": 3.4641016151377544}, "sqlglot.diff.diff": {"tf": 1}}, "df": 2}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2}}}}, "r": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1, "e": {"docs": {"sqlglot.diff.diff": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}}, "df": 10}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 23, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}}, "df": 25}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.4142135623730951}}, "df": 2}}}, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}}, "df": 6}}}}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.helper.name_sequence": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 3}}}}, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions": {"tf": 1}}, "df": 3}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 2}, "sqlglot.planner.Scan.from_expression": {"tf": 2}, "sqlglot.planner.SetOperation.from_expression": {"tf": 2}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 7}}}}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 3, "d": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 12}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 4}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 5, "d": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 47}, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 22}}, "t": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}, "c": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.dataframe": {"tf": 2}}, "df": 2}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2.449489742783178}}, "df": 1}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}}, "df": 1, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 5, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.time.format_time": {"tf": 1}}, "df": 3}}}}, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 2}}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 3, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}}, "df": 3}}}}}}}, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 4.898979485566356}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1.4142135623730951}, "sqlglot.helper.csv_reader": {"tf": 1}}, "df": 7, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "k": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dataframe": {"tf": 2.8284271247461903}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}}, "df": 3, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "p": {"3": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2}}}}}}, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "f": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}}, "df": 30, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.replace_placeholders": {"tf": 2}}, "df": 1}}}}}}}}}, "n": {"docs": {"sqlglot.executor": {"tf": 2.8284271247461903}}, "df": 1, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}}, "df": 29}, "y": {"docs": {"sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}}, "df": 3}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.replace_placeholders": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Delete.returning": {"tf": 1}}, "df": 1, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 2}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.pop": {"tf": 1}}, "df": 1}}}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}, "h": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}}, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.diff": {"tf": 3.3166247903554}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.executor": {"tf": 2.6457513110645907}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1.4142135623730951}}, "df": 21, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2.8284271247461903}, "sqlglot.transpile": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3, "r": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "d": {"docs": {"sqlglot.transpile": {"tf": 1}}, "df": 1}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}}, "df": 3}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.7320508075688772}}, "df": 7, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}, "d": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1.7320508075688772}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}}, "df": 12}}, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}}, "df": 6}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 3}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.7320508075688772}}, "df": 5, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.walk": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1.4142135623730951}}, "df": 2, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22}}}}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 3.872983346207417}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.replace": {"tf": 1.7320508075688772}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}}, "df": 74, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.parse": {"tf": 1}, "sqlglot.diff": {"tf": 3.3166247903554}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}}, "df": 6}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Func": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 2}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.7320508075688772}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1.7320508075688772}, "sqlglot.trie.in_trie": {"tf": 1.7320508075688772}}, "df": 69}}, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 7}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {"sqlglot.time.format_time": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 2.8284271247461903}, "sqlglot.trie.in_trie": {"tf": 3.4641016151377544}}, "df": 3, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {"sqlglot": {"tf": 4.358898943540674}, "sqlglot.pretty": {"tf": 1}, "sqlglot.parse": {"tf": 1.4142135623730951}, "sqlglot.parse_one": {"tf": 1.7320508075688772}, "sqlglot.transpile": {"tf": 2.23606797749979}, "sqlglot.dataframe": {"tf": 3.7416573867739413}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 2.23606797749979}, "sqlglot.dialects": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 3}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 3}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 3}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 3}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 3}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 3}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 3}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 3}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 3}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 3}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 3}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 3}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 3}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 3}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 3}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 3}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 3}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 3}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 3}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 3}, "sqlglot.diff": {"tf": 8.48528137423857}, "sqlglot.diff.diff": {"tf": 2}, "sqlglot.executor": {"tf": 6.708203932499369}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 2}, "sqlglot.executor.python.Python.Generator": {"tf": 3}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.set": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 2.449489742783178}, "sqlglot.expressions.Condition.or_": {"tf": 2.449489742783178}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete.delete": {"tf": 2}, "sqlglot.expressions.Delete.where": {"tf": 2.449489742783178}, "sqlglot.expressions.Delete.returning": {"tf": 2}, "sqlglot.expressions.Insert.with_": {"tf": 2.8284271247461903}, "sqlglot.expressions.Join.on": {"tf": 2.449489742783178}, "sqlglot.expressions.Join.using": {"tf": 2.449489742783178}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2.8284271247461903}, "sqlglot.expressions.Union.limit": {"tf": 2}, "sqlglot.expressions.Union.select": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.from_": {"tf": 2}, "sqlglot.expressions.Select.group_by": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.order_by": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.sort_by": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.cluster_by": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.limit": {"tf": 2}, "sqlglot.expressions.Select.offset": {"tf": 2}, "sqlglot.expressions.Select.select": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.lateral": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.join": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.where": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.having": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.hint": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 2.449489742783178}, "sqlglot.expressions.maybe_parse": {"tf": 2.6457513110645907}, "sqlglot.expressions.union": {"tf": 2.23606797749979}, "sqlglot.expressions.intersect": {"tf": 2.23606797749979}, "sqlglot.expressions.except_": {"tf": 2.23606797749979}, "sqlglot.expressions.select": {"tf": 2}, "sqlglot.expressions.from_": {"tf": 2}, "sqlglot.expressions.update": {"tf": 2.23606797749979}, "sqlglot.expressions.delete": {"tf": 1.7320508075688772}, "sqlglot.expressions.insert": {"tf": 2.6457513110645907}, "sqlglot.expressions.condition": {"tf": 2.449489742783178}, "sqlglot.expressions.and_": {"tf": 2.449489742783178}, "sqlglot.expressions.or_": {"tf": 2.449489742783178}, "sqlglot.expressions.not_": {"tf": 2.23606797749979}, "sqlglot.expressions.paren": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_identifier": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 2.8284271247461903}, "sqlglot.expressions.subquery": {"tf": 2.23606797749979}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 2.23606797749979}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.convert": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1.7320508075688772}, "sqlglot.expressions.replace_placeholders": {"tf": 1.7320508075688772}, "sqlglot.expressions.expand": {"tf": 2}, "sqlglot.expressions.func": {"tf": 2.23606797749979}, "sqlglot.generator.Generator": {"tf": 3}, "sqlglot.generator.Generator.generate": {"tf": 1.7320508075688772}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.7320508075688772}, "sqlglot.helper.apply_index_offset": {"tf": 1.7320508075688772}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1.7320508075688772}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 2}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.7320508075688772}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 2.23606797749979}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 3.3166247903554}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.expression": {"tf": 2}, "sqlglot.parser.Parser.validate_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 2.23606797749979}, "sqlglot.planner.Scan.from_expression": {"tf": 2.23606797749979}, "sqlglot.planner.SetOperation.from_expression": {"tf": 2.23606797749979}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 2}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1.7320508075688772}, "sqlglot.schema.nested_set": {"tf": 1.7320508075688772}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 2.449489742783178}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 2}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 203, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "g": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.__init__": {"tf": 2.8284271247461903}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 7, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.executor": {"tf": 2}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.parse_into": {"tf": 1.7320508075688772}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 26}, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dialects": {"tf": 2.23606797749979}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.executor": {"tf": 2}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 23}, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dialects": {"tf": 1.7320508075688772}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 3}}}}}}}, "o": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 2, "l": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "k": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "p": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 2.23606797749979}}, "df": 2, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}}, "df": 3}}}}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1}}}, "b": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 5}, "sqlglot.schema": {"tf": 1.4142135623730951}, "sqlglot.parse": {"tf": 2}, "sqlglot.parse_one": {"tf": 2.6457513110645907}, "sqlglot.transpile": {"tf": 4}, "sqlglot.dataframe": {"tf": 4.358898943540674}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 3}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects": {"tf": 2.6457513110645907}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 2}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 2}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 2}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 2}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 2}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 2}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 3.605551275463989}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 3.605551275463989}, "sqlglot.diff": {"tf": 15.066519173319364}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.diff": {"tf": 3.7416573867739413}, "sqlglot.diff.ChangeDistiller": {"tf": 1.7320508075688772}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.executor": {"tf": 8.48528137423857}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 3.605551275463989}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 3.1622776601683795}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1}, "sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.set": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 2.6457513110645907}, "sqlglot.expressions.Expression.find_all": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.dfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.bfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.transform": {"tf": 2.6457513110645907}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 2.449489742783178}, "sqlglot.expressions.Condition.or_": {"tf": 2.449489742783178}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 2.449489742783178}, "sqlglot.expressions.Unionable.intersect": {"tf": 2.449489742783178}, "sqlglot.expressions.Unionable.except_": {"tf": 2.449489742783178}, "sqlglot.expressions.Column.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 2.449489742783178}, "sqlglot.expressions.Delete.where": {"tf": 2.8284271247461903}, "sqlglot.expressions.Delete.returning": {"tf": 2.449489742783178}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 3.4641016151377544}, "sqlglot.expressions.Literal.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 2.8284271247461903}, "sqlglot.expressions.Join.using": {"tf": 3}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 3.4641016151377544}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 2.449489742783178}, "sqlglot.expressions.Union.select": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.from_": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.group_by": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.order_by": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.sort_by": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.cluster_by": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.limit": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.offset": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.select": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.lateral": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.join": {"tf": 3.4641016151377544}, "sqlglot.expressions.Select.where": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.having": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.distinct": {"tf": 2}, "sqlglot.expressions.Select.ctas": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.lock": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.hint": {"tf": 2.23606797749979}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Star.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Dot.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Paren.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Alias.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 2.8284271247461903}, "sqlglot.expressions.Cast.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 3.1622776601683795}, "sqlglot.expressions.union": {"tf": 3}, "sqlglot.expressions.intersect": {"tf": 3}, "sqlglot.expressions.except_": {"tf": 3}, "sqlglot.expressions.select": {"tf": 3}, "sqlglot.expressions.from_": {"tf": 3.3166247903554}, "sqlglot.expressions.update": {"tf": 2.23606797749979}, "sqlglot.expressions.delete": {"tf": 2.23606797749979}, "sqlglot.expressions.insert": {"tf": 3.1622776601683795}, "sqlglot.expressions.condition": {"tf": 3}, "sqlglot.expressions.and_": {"tf": 2.23606797749979}, "sqlglot.expressions.or_": {"tf": 2.23606797749979}, "sqlglot.expressions.not_": {"tf": 2.449489742783178}, "sqlglot.expressions.paren": {"tf": 1.7320508075688772}, "sqlglot.expressions.to_identifier": {"tf": 1.7320508075688772}, "sqlglot.expressions.to_table": {"tf": 2}, "sqlglot.expressions.alias_": {"tf": 3}, "sqlglot.expressions.subquery": {"tf": 2.449489742783178}, "sqlglot.expressions.column": {"tf": 1.4142135623730951}, "sqlglot.expressions.cast": {"tf": 1.7320508075688772}, "sqlglot.expressions.table_": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 2}, "sqlglot.expressions.rename_table": {"tf": 2}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.7320508075688772}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1.7320508075688772}, "sqlglot.expressions.func": {"tf": 3}, "sqlglot.generator.Generator": {"tf": 3.605551275463989}, "sqlglot.generator.Generator.generate": {"tf": 2.23606797749979}, "sqlglot.helper.AutoName": {"tf": 1.4142135623730951}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1.7320508075688772}, "sqlglot.helper.ensure_collection": {"tf": 1.7320508075688772}, "sqlglot.helper.csv": {"tf": 1.7320508075688772}, "sqlglot.helper.subclasses": {"tf": 2.23606797749979}, "sqlglot.helper.apply_index_offset": {"tf": 2.8284271247461903}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1.7320508075688772}, "sqlglot.helper.tsort": {"tf": 1.4142135623730951}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 2.6457513110645907}, "sqlglot.helper.is_iterable": {"tf": 1.7320508075688772}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 2.449489742783178}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 2.449489742783178}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 2}, "sqlglot.optimizer.optimizer.optimize": {"tf": 3}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 2}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 2.23606797749979}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 2.449489742783178}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser": {"tf": 2}, "sqlglot.parser.Parser.parse": {"tf": 2}, "sqlglot.parser.Parser.parse_into": {"tf": 2.8284271247461903}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 2.23606797749979}, "sqlglot.parser.Parser.validate_expression": {"tf": 1.7320508075688772}, "sqlglot.planner.Step.from_expression": {"tf": 2.449489742783178}, "sqlglot.planner.Scan.from_expression": {"tf": 2.449489742783178}, "sqlglot.planner.SetOperation.from_expression": {"tf": 2.449489742783178}, "sqlglot.schema.Schema.add_table": {"tf": 2.23606797749979}, "sqlglot.schema.Schema.column_names": {"tf": 2}, "sqlglot.schema.Schema.get_column_type": {"tf": 2.449489742783178}, "sqlglot.schema.Schema.empty": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 2.449489742783178}, "sqlglot.schema.MappingSchema.add_table": {"tf": 2.23606797749979}, "sqlglot.schema.MappingSchema.column_names": {"tf": 2}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 2.449489742783178}, "sqlglot.schema.nested_get": {"tf": 2.23606797749979}, "sqlglot.schema.nested_set": {"tf": 2.449489742783178}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 3.605551275463989}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 2.6457513110645907}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.7320508075688772}, "sqlglot.transforms.preprocess": {"tf": 2}, "sqlglot.trie.new_trie": {"tf": 2.23606797749979}, "sqlglot.trie.in_trie": {"tf": 2.23606797749979}}, "df": 255, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.executor": {"tf": 2}, "sqlglot.helper.should_identify": {"tf": 1}}, "df": 5, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 16}, "m": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}}, "df": 6}, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 8}, "i": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 9}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 4}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 3.1622776601683795}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 5.744562646538029}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 3.605551275463989}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.7320508075688772}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 92}, "n": {"docs": {"sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 3, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}}, "df": 4}}, "k": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "s": {"docs": {"sqlglot": {"tf": 4.58257569495584}, "sqlglot.dataframe": {"tf": 2.449489742783178}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 6.244997998398398}, "sqlglot.diff.diff": {"tf": 3.4641016151377544}, "sqlglot.executor": {"tf": 2.6457513110645907}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.order_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.offset": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.hint": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1.7320508075688772}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 2}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 3}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 183}, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 2}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}}, "df": 5}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 1}}, "df": 3, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 2}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.diff": {"tf": 7.280109889280518}, "sqlglot.diff.diff": {"tf": 2.8284271247461903}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 13, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 3.3166247903554}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 2}, "sqlglot.executor.execute": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1.7320508075688772}, "sqlglot.expressions.Insert.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Table.parts": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 2.6457513110645907}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 2.8284271247461903}, "sqlglot.expressions.to_column": {"tf": 1.7320508075688772}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.column": {"tf": 1.4142135623730951}, "sqlglot.expressions.table_": {"tf": 2.449489742783178}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 2}, "sqlglot.expressions.column_table_names": {"tf": 1.7320508075688772}, "sqlglot.expressions.table_name": {"tf": 2.449489742783178}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 2.449489742783178}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 2.6457513110645907}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 2.449489742783178}, "sqlglot.schema.Schema.column_names": {"tf": 2}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.7320508075688772}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 2.449489742783178}, "sqlglot.schema.MappingSchema.add_table": {"tf": 2.449489742783178}, "sqlglot.schema.MappingSchema.column_names": {"tf": 2}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.7320508075688772}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}}, "df": 48, "s": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 2}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 22}, "|": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}}, "df": 2}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 4, "s": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1}}, "df": 3}, "n": {"docs": {"sqlglot.helper.find_new_name": {"tf": 1.4142135623730951}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "g": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Tag": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.helper.is_iterable": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 2.23606797749979}, "sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 33, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Expression.text": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "m": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.time.format_time": {"tf": 2.23606797749979}}, "df": 7, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {"sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}}, "df": 2}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "b": {"docs": {}, "df": 0, "l": {"1": {"docs": {"sqlglot.expressions.Select.join": {"tf": 2}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}}, "df": 2}, "2": {"docs": {"sqlglot.expressions.Subqueryable.with_": {"tf": 2}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2.8284271247461903}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}}, "df": 4}, "docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression.replace": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 2}, "sqlglot.expressions.Select.hint": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1.7320508075688772}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 2}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}}, "df": 32}}, "w": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.diff": {"tf": 5}, "sqlglot.executor": {"tf": 1}}, "df": 5}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 2.23606797749979}, "sqlglot.dialects": {"tf": 3.1622776601683795}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Tables": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2.23606797749979}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema": {"tf": 2}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 30, "s": {"docs": {"sqlglot.dataframe": {"tf": 5.656854249492381}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.4142135623730951}}, "df": 22}, "o": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}}}}}}, "p": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1, "h": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}}, "df": 3}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}}, "df": 4, "[": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.schema.nested_get": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.trie.new_trie": {"tf": 1.4142135623730951}}, "df": 3, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}}, "df": 10, "r": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.schema": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}}, "df": 7, "/": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}}, "df": 9}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 3}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 2}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}}, "df": 14, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1}}, "df": 3}}, "[": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}}, "df": 48}}}}, "s": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}}, "df": 46}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}}, "df": 7, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.unnest_operands": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}}, "df": 3}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}}, "r": {"docs": {"sqlglot": {"tf": 4.123105625617661}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.8284271247461903}, "sqlglot.diff": {"tf": 3.7416573867739413}, "sqlglot.executor": {"tf": 2.6457513110645907}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 2.8284271247461903}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 2.449489742783178}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 2.23606797749979}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1.7320508075688772}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 2.6457513110645907}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2.8284271247461903}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1.7320508075688772}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.7320508075688772}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 2}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 2.8284271247461903}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.449489742783178}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.flatten": {"tf": 2}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 2.8284271247461903}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.empty": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 102, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1.4142135623730951}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 22, "s": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}}, "df": 3}}}}}, "g": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "/": {"3": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "/": {"4": {"3": {"3": {"9": {"2": {"3": {"0": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 5}}}}}}}, "f": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.parse": {"tf": 1}, "sqlglot.transpile": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 3.1622776601683795}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 2}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 2}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 2}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 2}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 2}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 10.488088481701515}, "sqlglot.diff.diff": {"tf": 2}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.executor": {"tf": 4.69041575982343}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1.4142135623730951}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 2}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_children": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 2.23606797749979}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 2}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 2}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 2}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 2}, "sqlglot.parser.Parser.parse": {"tf": 2}, "sqlglot.parser.Parser.parse_into": {"tf": 2}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1.7320508075688772}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 2}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 167, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "f": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Select.offset": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 2.23606797749979}}, "df": 3}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 5, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Identifier.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Literal.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Subquery.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Star.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Dot.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Paren.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Alias.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Cast.output_name": {"tf": 2.23606797749979}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 37, "s": {"docs": {"sqlglot.optimizer.scope.Scope.selects": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}}, "df": 7}}}, "r": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 1}}, "df": 3}}, "n": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 4.795831523312719}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.executor": {"tf": 3}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 2}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}}, "df": 55, "e": {"docs": {"sqlglot": {"tf": 4.242640687119285}, "sqlglot.parse": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 3.3166247903554}, "sqlglot.diff.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 2}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 2}, "sqlglot.expressions.Identifier.output_name": {"tf": 2}, "sqlglot.expressions.Literal.output_name": {"tf": 2}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subquery.output_name": {"tf": 2}, "sqlglot.expressions.Star.output_name": {"tf": 2}, "sqlglot.expressions.Dot.output_name": {"tf": 2}, "sqlglot.expressions.Paren.output_name": {"tf": 2}, "sqlglot.expressions.Alias.output_name": {"tf": 2}, "sqlglot.expressions.Cast.output_name": {"tf": 2}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 2.449489742783178}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 66, "s": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 3}}, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 2}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 72}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 3}}, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "s": {"docs": {"sqlglot.expressions.Select.distinct": {"tf": 1}}, "df": 1}}, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.dfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.bfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1.7320508075688772}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 13, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}}, "df": 6}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 50, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 22}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 5, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.insert": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "s": {"docs": {"sqlglot.dataframe": {"tf": 2}}, "df": 1}, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.rename_table": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1.7320508075688772}}, "df": 2}}}, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.schema": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}}, "df": 11, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 4}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 2}}, "df": 3}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 26, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1}}, "s": {"docs": {"sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}}, "df": 1}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 1}}}}, "v": {"docs": {"sqlglot.executor.context.Context.__init__": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22}}}}}}}, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}}}}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 2, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.Dialects": {"tf": 1}, "sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType": {"tf": 1}, "sqlglot.tokens.TokenType": {"tf": 1}}, "df": 6}}}}}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}}, "df": 2}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}}, "df": 4}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 4}}}, "y": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 2.8284271247461903}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 12}}, "r": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}, "x": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}}, "df": 1, "p": {"docs": {"sqlglot": {"tf": 4.123105625617661}, "sqlglot.dialects": {"tf": 3.3166247903554}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}}, "df": 19, "r": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}}, "df": 1, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 3.4641016151377544}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.expressions": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 3.7416573867739413}, "sqlglot.expressions.Expression.expression": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 2}, "sqlglot.expressions.Expression.pop": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 2.23606797749979}, "sqlglot.expressions.Unionable.intersect": {"tf": 2.23606797749979}, "sqlglot.expressions.Unionable.except_": {"tf": 2.23606797749979}, "sqlglot.expressions.Column.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 2.23606797749979}, "sqlglot.expressions.Delete.where": {"tf": 2.23606797749979}, "sqlglot.expressions.Delete.returning": {"tf": 2.23606797749979}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 2.6457513110645907}, "sqlglot.expressions.Literal.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 2.23606797749979}, "sqlglot.expressions.Join.using": {"tf": 2}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2.6457513110645907}, "sqlglot.expressions.Union.limit": {"tf": 2.23606797749979}, "sqlglot.expressions.Union.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.group_by": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.order_by": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.sort_by": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.cluster_by": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.limit": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.offset": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.lateral": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.where": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.having": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.distinct": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.ctas": {"tf": 2}, "sqlglot.expressions.Select.lock": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.hint": {"tf": 2}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Star.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Dot.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Paren.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Alias.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.Cast.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 3}, "sqlglot.expressions.union": {"tf": 2}, "sqlglot.expressions.intersect": {"tf": 2}, "sqlglot.expressions.except_": {"tf": 2}, "sqlglot.expressions.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.from_": {"tf": 2.449489742783178}, "sqlglot.expressions.insert": {"tf": 1.7320508075688772}, "sqlglot.expressions.condition": {"tf": 2.6457513110645907}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 2}, "sqlglot.expressions.paren": {"tf": 2.23606797749979}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1.7320508075688772}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 2.449489742783178}, "sqlglot.expressions.subquery": {"tf": 2.23606797749979}, "sqlglot.expressions.cast": {"tf": 1.7320508075688772}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.convert": {"tf": 1.7320508075688772}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1.7320508075688772}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 2.23606797749979}, "sqlglot.expressions.replace_placeholders": {"tf": 2}, "sqlglot.expressions.expand": {"tf": 2.23606797749979}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.expressions.false": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1.7320508075688772}, "sqlglot.helper.apply_index_offset": {"tf": 1.7320508075688772}, "sqlglot.helper.while_changing": {"tf": 2}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2.449489742783178}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 2.8284271247461903}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 2.8284271247461903}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 3}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 3}, "sqlglot.optimizer.normalize.normalize": {"tf": 2.449489742783178}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 2.449489742783178}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 2.23606797749979}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 2.6457513110645907}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 2.6457513110645907}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 2.449489742783178}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 2.6457513110645907}, "sqlglot.optimizer.scope.Scope": {"tf": 2}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 2}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 3}, "sqlglot.optimizer.scope.build_scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 2}, "sqlglot.optimizer.simplify.simplify": {"tf": 2.6457513110645907}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2.6457513110645907}, "sqlglot.parser.Parser.parse_into": {"tf": 2.23606797749979}, "sqlglot.parser.Parser.expression": {"tf": 2.23606797749979}, "sqlglot.parser.Parser.validate_expression": {"tf": 2.23606797749979}, "sqlglot.planner.Step.from_expression": {"tf": 2.6457513110645907}, "sqlglot.planner.Scan.from_expression": {"tf": 2.6457513110645907}, "sqlglot.planner.SetOperation.from_expression": {"tf": 2.6457513110645907}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.7320508075688772}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.7320508075688772}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}}, "df": 156, "s": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 2.23606797749979}, "sqlglot.expressions.Condition.or_": {"tf": 2.23606797749979}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 2.449489742783178}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Insert.with_": {"tf": 2}, "sqlglot.expressions.Literal.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.on": {"tf": 2.449489742783178}, "sqlglot.expressions.Join.using": {"tf": 2.23606797749979}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.order_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.lateral": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.join": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.where": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.having": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Star.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Dot.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Alias.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 2.23606797749979}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 2}, "sqlglot.expressions.or_": {"tf": 2}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.apply_index_offset": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.4142135623730951}}, "df": 97}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Expression.iter_expressions": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}}, "df": 3, "/": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transforms.explode_to_unnest": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}}, "df": 4, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.expand": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 96, "s": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 18}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 2.8284271247461903}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 7}}, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "e": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 2}, "sqlglot.expressions.except_": {"tf": 2}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 27, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}}, "df": 7}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}}, "df": 2}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}}, "df": 4}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}}, "df": 2}}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}}, "df": 1}}, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}}, "df": 1}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}}, "df": 21}}}}}, "t": {"docs": {"sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}}, "df": 7}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 2.449489742783178}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 2.449489742783178}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 30, "s": {"docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 29}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}}, "df": 42}}}}}}}}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 3, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}}, "df": 2, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}}, "df": 2, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "s": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}, "g": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}}, "df": 6}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 4.123105625617661}}, "df": 1, "s": {"docs": {"sqlglot.dataframe": {"tf": 3}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.schema.Schema.empty": {"tf": 1}}, "df": 12}}}, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.helper.first": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}}, "df": 2}, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 5}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}}, "df": 3}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}}, "df": 1}}}}}}}}}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 11}}}}}, "s": {"docs": {"sqlglot.helper.subclasses": {"tf": 1.4142135623730951}}, "df": 1, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}}, "df": 2}}}}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 1, "d": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1}}, "df": 1}}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 3.872983346207417}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 3}}, "df": 1}}}, "u": {"docs": {}, "df": 0, ":": {"8": {"0": {"9": {"0": {"docs": {}, "df": 0, "/": {"1": {"1": {"5": {"docs": {}, "df": 0, "/": {"1": {"docs": {}, "df": 0, "/": {"1": {"9": {"9": {"5": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 3, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 1}}}}}}}}}}}, "t": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2, "c": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 3}}, "u": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}}}, "c": {"docs": {"sqlglot": {"tf": 3.605551275463989}, "sqlglot.diff": {"tf": 3.4641016151377544}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.flatten": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Literal.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Star.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Dot.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Paren.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Alias.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Cast.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 1.7320508075688772}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.flatten": {"tf": 2}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 24, "a": {"docs": {"sqlglot.trie.in_trie": {"tf": 1}}, "df": 1, "n": {"docs": {"sqlglot": {"tf": 4}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 4.242640687119285}, "sqlglot.executor": {"tf": 4}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 30, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 3, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2.6457513110645907}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 2.449489742783178}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.helper.ensure_list": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.helper.ensure_list": {"tf": 1}}, "df": 1}}, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}}, "df": 35, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 26}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.helper.csv_reader": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 4}, "r": {"docs": {"sqlglot.diff.diff": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}}, "df": 2}}}}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "d": {"docs": {"sqlglot.diff.diff": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}}, "df": 3, "d": {"docs": {"sqlglot.generator.cached_generator": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 20, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 2}}, "t": {"docs": {"sqlglot.trie.in_trie": {"tf": 2}}, "df": 1, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1.7320508075688772}, "sqlglot.expressions.column": {"tf": 1.4142135623730951}, "sqlglot.expressions.table_": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.4142135623730951}}, "df": 11}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.camel_to_snake_case": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.func": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 39}}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}}, "df": 3}}}}}}}, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}}, "df": 1, "d": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}}, "df": 1}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.4142135623730951}}, "df": 5, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "x": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}}, "df": 2}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 3.7416573867739413}}, "df": 1, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}}, "df": 27}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 3.4641016151377544}, "sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}}, "df": 14, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}}, "df": 22, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}}, "df": 1}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}}, "df": 2, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}}, "df": 5}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 3, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "/": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "/": {"8": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.transpile": {"tf": 1}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.__init__": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 2}, "sqlglot.planner.Scan.from_expression": {"tf": 2}, "sqlglot.planner.SetOperation.from_expression": {"tf": 2}}, "df": 29}}}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.helper.flatten": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}}, "df": 6}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}}, "df": 2}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 2}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}}, "df": 10, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 4}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}}, "df": 2}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Join.using": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 21}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.transform": {"tf": 1}}, "df": 1}}, "s": {"docs": {"sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 2.8284271247461903}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.7320508075688772}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}}, "df": 34, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 31}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 2.449489742783178}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}}, "df": 4}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.convert": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}}, "df": 1}}}}}}}, "j": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}}, "df": 2}}}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 3.605551275463989}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 73}}, "l": {"1": {"docs": {"sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}}, "df": 3}, "2": {"docs": {"sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}}, "df": 3}, "docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 3.1622776601683795}, "sqlglot.executor.execute": {"tf": 1.7320508075688772}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify.qualify": {"tf": 2}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 2}, "sqlglot.schema.MappingSchema": {"tf": 1.7320508075688772}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 10, "a": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2.6457513110645907}}, "df": 3}, "b": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 2}}, "df": 2}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 3.3166247903554}, "sqlglot.dataframe": {"tf": 2.23606797749979}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.diff.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Column.to_dot": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 2.449489742783178}, "sqlglot.expressions.column": {"tf": 2}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 1.7320508075688772}, "sqlglot.schema.Schema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 2}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 2}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 43, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.all_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}}, "df": 27}, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.execute": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.parse": {"tf": 1.4142135623730951}, "sqlglot.helper.ensure_collection": {"tf": 1.4142135623730951}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}}, "df": 2}}}}}}}}, "s": {"docs": {"sqlglot.schema.MappingSchema": {"tf": 1.7320508075688772}}, "df": 1}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 2}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1.4142135623730951}}, "df": 6, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 3, "n": {"docs": {}, "df": 0, "\u2019": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 1}, "d": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.paren": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_identifier": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.convert": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1.4142135623730951}}, "df": 44}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 5, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 8, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 3}}}}}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}}, "df": 7, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 14}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 3.3166247903554}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.diff.diff": {"tf": 1}}, "df": 3}, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}}, "df": 3}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.case_sensitive": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 44}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2}}}}}, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.assert_is": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.transforms.preprocess": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "n": {"docs": {"sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 2.23606797749979}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 7, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.expressions.replace_children": {"tf": 1}}, "df": 2}}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 2}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}}, "df": 5, "s": {"docs": {"sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}}, "df": 9}}, "x": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Select.ctas": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1.7320508075688772}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 9, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.helper.object_to_dict": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}}, "df": 1}}}}}}}}}, "s": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 5}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 25}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.diff": {"tf": 3.3166247903554}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}}, "df": 4}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}}, "df": 5}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 2.23606797749979}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 2}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.4142135623730951}, "sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 2}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 2}}, "df": 23, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 1}}, "df": 4}}}}, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}, "m": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 2}}, "df": 2, "s": {"docs": {"sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Select.cluster_by": {"tf": 2}}, "df": 1}}}}}}, "s": {"docs": {"sqlglot.dataframe": {"tf": 2}}, "df": 1, "v": {"docs": {"sqlglot.helper.csv": {"tf": 1.4142135623730951}, "sqlglot.helper.csv_reader": {"tf": 2}}, "df": 2}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}}, "df": 2, "/": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1}}, "df": 1}}}}}}}, "b": {"docs": {"sqlglot": {"tf": 4.795831523312719}, "sqlglot.diff": {"tf": 3.7416573867739413}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 2}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.helper.dict_depth": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.flatten": {"tf": 2}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 3.1622776601683795}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 1.7320508075688772}, "sqlglot.trie.new_trie": {"tf": 1.4142135623730951}}, "df": 38, "e": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 3}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 4.242640687119285}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 4}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.helper.while_changing": {"tf": 1.4142135623730951}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.7320508075688772}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.7320508075688772}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2.449489742783178}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 113, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}}, "df": 6}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.diff.diff": {"tf": 1}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 3}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 2}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 24}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 24}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 2}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}}, "df": 27}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.var": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.executor": {"tf": 2.23606797749979}}, "df": 6}}, "y": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 2.449489742783178}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.4142135623730951}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.diff": {"tf": 3.872983346207417}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1}}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.build_scope": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 16, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}}, "df": 4}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}, "s": {"docs": {"sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 10}}, "t": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 2}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "t": {"docs": {"sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 6}}, "y": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.pretty": {"tf": 1}, "sqlglot.schema": {"tf": 1}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 4.47213595499958}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 2}, "sqlglot.expressions.Select.order_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.7320508075688772}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1.4142135623730951}}, "df": 80, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}}, "df": 3}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "z": {"docs": {"sqlglot": {"tf": 2.8284271247461903}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}}, "df": 3}, "r": {"docs": {"sqlglot": {"tf": 2.8284271247461903}, "sqlglot.executor": {"tf": 2}, "sqlglot.expressions.alias_": {"tf": 2}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 2}}, "df": 5, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "a": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "c": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "e": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 15, "d": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.executor": {"tf": 2}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 6}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 13, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.expressions.false": {"tf": 1}}, "df": 5, "s": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}}, "df": 4}, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.seq_get": {"tf": 1}}, "df": 1}}}}, "b": {"docs": {"sqlglot.trie.in_trie": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}}, "df": 3}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Select.hint": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {"sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 9, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.not_": {"tf": 1.4142135623730951}}, "df": 1}}, "b": {"docs": {"sqlglot.trie.new_trie": {"tf": 1}}, "df": 1}}}, "f": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.find": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1.4142135623730951}}, "df": 7}}}, "u": {"docs": {"sqlglot.executor": {"tf": 2}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.7320508075688772}}, "df": 2, "s": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 55, "d": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.schema": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.returning": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.hint": {"tf": 1.4142135623730951}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 87}, "s": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1}}, "df": 4}, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}}, "df": 6}}}, "r": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}}, "df": 5, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 2}, "sqlglot.expressions.Select.join": {"tf": 2}, "sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 11}}}, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}}, "df": 4, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}}, "df": 2}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.replace_placeholders": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.23606797749979}, "sqlglot.executor.python.Python.Generator": {"tf": 2.23606797749979}, "sqlglot.generator.Generator": {"tf": 2.23606797749979}}, "df": 23, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22}}}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}}}}, "r": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.trie.in_trie": {"tf": 1}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.TimeUnit": {"tf": 1}}, "df": 3}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}}, "df": 3}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Unionable.union": {"tf": 2}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 2.449489742783178}, "sqlglot.expressions.union": {"tf": 2}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1.4142135623730951}}, "df": 1}, "/": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "q": {"docs": {"sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}}, "df": 3}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 4}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.apply_index_offset": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.helper.while_changing": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.7320508075688772}}, "df": 3}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}, "p": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}}, "df": 5, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}}, "df": 23, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}}, "df": 23}}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 2}, "sqlglot.expressions.update": {"tf": 2.23606797749979}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}}, "df": 7, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 2}, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}}, "df": 3}}}}}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}, "d": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}}, "df": 1}}}}}, "f": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.dataframe": {"tf": 4.69041575982343}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 5, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 3.4641016151377544}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 2.8284271247461903}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.diff": {"tf": 6.855654600401044}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}, "sqlglot.executor": {"tf": 4}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 2}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 2}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 3}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1.4142135623730951}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 70, "m": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.4142135623730951}}, "df": 10, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.pretty": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1.7320508075688772}}, "df": 27, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.csv": {"tf": 1}}, "df": 24}}}, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}}, "df": 2}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "s": {"docs": {"sqlglot.executor.execute": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 3}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 12}}}, "s": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 3}}}}, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "o": {"docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 14}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}}, "df": 9, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}, "/": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot": {"tf": 6.557438524302}, "sqlglot.dataframe": {"tf": 4.58257569495584}, "sqlglot.dialects": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.diff": {"tf": 4.358898943540674}, "sqlglot.executor": {"tf": 4}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 2}, "sqlglot.expressions.Unionable.intersect": {"tf": 2}, "sqlglot.expressions.Unionable.except_": {"tf": 2}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 2}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 2}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.from_": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 2}, "sqlglot.expressions.Select.hint": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.union": {"tf": 2.23606797749979}, "sqlglot.expressions.intersect": {"tf": 2.23606797749979}, "sqlglot.expressions.except_": {"tf": 2.23606797749979}, "sqlglot.expressions.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.from_": {"tf": 2.23606797749979}, "sqlglot.expressions.update": {"tf": 2}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.7320508075688772}, "sqlglot.expressions.replace_placeholders": {"tf": 1.7320508075688772}, "sqlglot.expressions.expand": {"tf": 3.3166247903554}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 2}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 2}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 3}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2.8284271247461903}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 2}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 2}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 2.23606797749979}, "sqlglot.optimizer.scope.Scope": {"tf": 2.449489742783178}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 2.23606797749979}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.7320508075688772}, "sqlglot.planner.Scan.from_expression": {"tf": 1.7320508075688772}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.7320508075688772}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 124, "s": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}}}, "e": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.helper.while_changing": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1.7320508075688772}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}}, "df": 8, "/": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 3}}, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 14}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}}, "df": 1}}, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}, "w": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}}, "df": 4, "c": {"docs": {"sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.func": {"tf": 2}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 5, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 2}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.7320508075688772}, "sqlglot.expressions.func": {"tf": 2.23606797749979}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}}, "df": 35, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 2.23606797749979}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 34}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 4}}}}}}}}}}}, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}}, "df": 3, "y": {"docs": {"sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 2}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"6": {"4": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dialects": {"tf": 1.7320508075688772}}, "df": 2}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dialects": {"tf": 1}}, "df": 2}}, "r": {"docs": {}, "df": 0, "\u00e9": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.helper.flatten": {"tf": 1.4142135623730951}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "s": {"docs": {"sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}}, "df": 5}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.flatten": {"tf": 1}}, "df": 1}}}}}}, "g": {"docs": {"sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 7}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.dataframe": {"tf": 3.4641016151377544}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.23606797749979}, "sqlglot.diff": {"tf": 1}, "sqlglot.diff.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.false": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2.23606797749979}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}}, "df": 64}}, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 2, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 2}}, "df": 1}}}}}, "l": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.expressions.Predicate": {"tf": 1}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}}, "df": 9}}, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}}, "df": 24, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 23}, "/": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.lineage.lineage": {"tf": 2}}, "df": 1}}}}, "k": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}}, "df": 8, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}}, "df": 2}}}}}, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}}, "df": 2, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.transpile": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 2}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.7320508075688772}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 2}, "sqlglot.helper.ensure_collection": {"tf": 1.4142135623730951}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 3}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 2}, "sqlglot.parser.Parser.parse_into": {"tf": 2}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 57, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 2}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 2.449489742783178}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}}, "df": 11}}}}}}, "b": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.limit": {"tf": 2.23606797749979}}, "df": 4, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Union.limit": {"tf": 1}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}}, "df": 22}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.7320508075688772}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 5.196152422706632}}, "df": 1}}}, "f": {"docs": {"sqlglot.diff": {"tf": 6.324555320336759}, "sqlglot.expressions.Expression.text": {"tf": 1}}, "df": 2}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}}, "df": 4}}}, "t": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}}, "df": 10}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 47}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}}, "df": 2}}}}}}}, "n": {"docs": {"sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 2, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}}, "df": 25}}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "x": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "k": {"docs": {"sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}}, "df": 1, "e": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}}, "df": 4}}}}, "s": {"docs": {"sqlglot.parser.Parser.check_errors": {"tf": 1}}, "df": 1}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}}, "df": 23, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.should_identify": {"tf": 1}}, "df": 23}}}}}}}, "t": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}}, "df": 3}, "o": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}, "p": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 3}}}, "t": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}}, "df": 9, "/": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "a": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}}, "df": 4}}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3, "r": {"docs": {"sqlglot.expressions.condition": {"tf": 1}}, "df": 1}}}}, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 3, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Select.lateral": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 2}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "z": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}}, "df": 1}}}}, "w": {"docs": {"sqlglot.optimizer.simplify.simplify_not": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1}}, "l": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "r": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2, "e": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 3, "a": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1.4142135623730951}}, "df": 8, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.helper.csv_reader": {"tf": 1.4142135623730951}}, "df": 1}}, "c": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.helper.csv_reader": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {"sqlglot": {"tf": 1.7320508075688772}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "d": {"docs": {"sqlglot.helper.while_changing": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}}, "df": 4, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.env.null_if_any": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 1}}, "df": 4}, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}}, "df": 2}}}}}}, "f": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}}, "df": 4, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 8}, "d": {"docs": {"sqlglot.diff.diff": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 6}, "/": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {"sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}}, "df": 3}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}}, "df": 5, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 5}}}}}}}}, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}}}}, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.parts": {"tf": 1}, "sqlglot.expressions.Table.parts": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}}, "df": 12, "s": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.same_parent": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.dfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.bfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.expressions.false": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.cached_generator": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.udtfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.Schema.empty": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 180}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 19}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Delete.returning": {"tf": 1.7320508075688772}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1}, "sqlglot.expressions.Expression.expressions": {"tf": 1}}, "df": 3}}}}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}}, "df": 13}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.expressions.var": {"tf": 1.4142135623730951}}, "df": 2, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 3}}, "s": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 26}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 4}}}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 10, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}}, "df": 2}}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}}, "df": 3}, "s": {"docs": {"sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.cast": {"tf": 1}}, "df": 1}}}}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}}, "df": 13, "d": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}}, "df": 4}, "s": {"docs": {"sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 3}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Predicate": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 45}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}}, "df": 2}, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "y": {"docs": {"sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 10, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}}, "df": 6}}}, "s": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 4}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.expression": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}}, "df": 11}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.flatten": {"tf": 1}}, "df": 1}}}}}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}}, "df": 2}}}}}, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "w": {"docs": {"sqlglot.dataframe": {"tf": 2.449489742783178}, "sqlglot.executor.context.Context": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}}, "df": 2}}, "o": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "t": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.root": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}}, "df": 5}}, "u": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 46, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 5}, "d": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 42}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1}}, "df": 2}}, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "j": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "w": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 3}}, "u": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}}, "df": 6, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}}, "df": 3}}}}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2}}, "df": 2}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}, "\u00e9": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 4}}}}}, "w": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22}}, "o": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 2.8284271247461903}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 4.58257569495584}, "sqlglot.executor": {"tf": 3.4641016151377544}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1}, "sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Dot.build": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}}, "df": 48, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1}}, "df": 5}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 5}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.hint": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 62}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 3}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}}, "df": 5}}, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 4}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 31}}, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 3.4641016151377544}, "sqlglot.executor": {"tf": 2}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.transforms.remove_target_from_merge": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 53}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 3}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.lock": {"tf": 2}, "sqlglot.expressions.update": {"tf": 2}, "sqlglot.expressions.delete": {"tf": 2}, "sqlglot.expressions.condition": {"tf": 2.449489742783178}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2}, "sqlglot.schema.nested_get": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1.4142135623730951}}, "df": 41}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.pretty": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.23606797749979}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.is_star": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Union.is_star": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.is_star": {"tf": 1}, "sqlglot.expressions.Subquery.is_star": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2.23606797749979}, "sqlglot.helper.should_identify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.449489742783178}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.empty": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 60}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 6, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}}}, "o": {"docs": {"sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "y": {"docs": {"sqlglot.executor": {"tf": 2}}, "df": 1}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 5}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 2}}, "df": 4}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {"sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}}, "df": 3, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}}, "df": 12}}}, "s": {"docs": {"sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 23}, "e": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}}, "df": 2, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 4, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}, "s": {"docs": {"sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 2.6457513110645907}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 6, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 5.5677643628300215}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.7320508075688772}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 14, "l": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 3}}, "b": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}}, "df": 3}}, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "\u2019": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 5, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "\u2019": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 2}, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "t": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "k": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 5, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.split_num_words": {"tf": 2.8284271247461903}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.var": {"tf": 2.449489742783178}, "sqlglot.tokens.Token.var": {"tf": 1.4142135623730951}}, "df": 4, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.var": {"tf": 1.4142135623730951}}, "df": 2, "s": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 2.23606797749979}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.error_messages": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 3, "d": {"docs": {"sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 2}, "s": {"docs": {"sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.append": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.set": {"tf": 1.7320508075688772}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1.7320508075688772}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 2.23606797749979}, "sqlglot.helper.ensure_collection": {"tf": 2.23606797749979}, "sqlglot.helper.split_num_words": {"tf": 2}, "sqlglot.helper.is_iterable": {"tf": 2}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_set": {"tf": 3.1622776601683795}, "sqlglot.trie.in_trie": {"tf": 1.7320508075688772}}, "df": 17, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.insert": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 2.6457513110645907}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1}}, "df": 32}}}}}, "e": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 4}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 2.449489742783178}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1.7320508075688772}}, "df": 1}}}, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}}, "df": 2}}}}, "s": {"docs": {"sqlglot.executor.context.Context": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.lineage.LineageHTML": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 7}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}}, "df": 3}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}}, "df": 3}}}, "\u00e4": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {"sqlglot.executor.table.Tables": {"tf": 2.23606797749979}, "sqlglot.schema.AbstractMappingSchema": {"tf": 2.23606797749979}}, "df": 2}}, "g": {"docs": {"sqlglot.schema": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}}, "df": 7, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1.7320508075688772}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.7320508075688772}}, "df": 5, "[": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 2.23606797749979}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.walk": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.dfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.bfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.cached_generator": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}, "sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}}, "df": 40}}, "e": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 3, "d": {"docs": {"sqlglot.pretty": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}, "s": {"docs": {"sqlglot.generator.Generator.generate": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 25}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 3}}}}, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.nested_get": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 16, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 3}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2}}}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "m": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.name_sequence": {"tf": 1}, "sqlglot.helper.should_identify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver.get_source_columns": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.nested_set": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}}, "df": 47}}}, "t": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1, "h": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.lineage.LineageHTML": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 5.656854249492381}, "sqlglot.diff.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.Expression.replace": {"tf": 3}, "sqlglot.expressions.Expression.assert_is": {"tf": 2.449489742783178}, "sqlglot.expressions.Condition.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.not_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Predicate": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 2.449489742783178}, "sqlglot.expressions.Unionable.intersect": {"tf": 2.449489742783178}, "sqlglot.expressions.Unionable.except_": {"tf": 2.449489742783178}, "sqlglot.expressions.Column.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.Delete.delete": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete.returning": {"tf": 1.7320508075688772}, "sqlglot.expressions.Identifier.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.Insert.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.Join.on": {"tf": 2.449489742783178}, "sqlglot.expressions.Join.using": {"tf": 2.449489742783178}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 2.449489742783178}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.select": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.from_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.group_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.order_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.offset": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.lateral": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 3}, "sqlglot.expressions.Select.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.having": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.distinct": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.ctas": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.lock": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.hint": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subquery.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.Star.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.Dot.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.Alias.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.Cast.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.maybe_parse": {"tf": 2.449489742783178}, "sqlglot.expressions.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.from_": {"tf": 1.7320508075688772}, "sqlglot.expressions.update": {"tf": 2.23606797749979}, "sqlglot.expressions.delete": {"tf": 2.23606797749979}, "sqlglot.expressions.insert": {"tf": 1.7320508075688772}, "sqlglot.expressions.condition": {"tf": 3.4641016151377544}, "sqlglot.expressions.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.not_": {"tf": 1.7320508075688772}, "sqlglot.expressions.paren": {"tf": 1.7320508075688772}, "sqlglot.expressions.alias_": {"tf": 2.449489742783178}, "sqlglot.expressions.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.cast": {"tf": 1.7320508075688772}, "sqlglot.expressions.values": {"tf": 1.7320508075688772}, "sqlglot.expressions.var": {"tf": 2.449489742783178}, "sqlglot.expressions.column_table_names": {"tf": 2.449489742783178}, "sqlglot.expressions.table_name": {"tf": 2.449489742783178}, "sqlglot.expressions.replace_tables": {"tf": 2.449489742783178}, "sqlglot.expressions.replace_placeholders": {"tf": 2.449489742783178}, "sqlglot.expressions.expand": {"tf": 3}, "sqlglot.expressions.func": {"tf": 2.449489742783178}, "sqlglot.helper.split_num_words": {"tf": 3}, "sqlglot.helper.is_iterable": {"tf": 2.449489742783178}, "sqlglot.helper.flatten": {"tf": 2.449489742783178}, "sqlglot.helper.dict_depth": {"tf": 3.872983346207417}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 4}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 3.4641016151377544}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 3.4641016151377544}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 3.872983346207417}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 3}, "sqlglot.optimizer.normalize.normalize": {"tf": 3}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 3}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 3}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 2.449489742783178}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 3.4641016151377544}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 3.4641016151377544}, "sqlglot.optimizer.qualify.qualify": {"tf": 3.4641016151377544}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 3.4641016151377544}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 4.242640687119285}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 3.872983346207417}, "sqlglot.optimizer.simplify.simplify": {"tf": 3}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 3}, "sqlglot.schema.nested_set": {"tf": 2.449489742783178}, "sqlglot.time.format_time": {"tf": 1.7320508075688772}, "sqlglot.transforms.unalias_group": {"tf": 2.449489742783178}, "sqlglot.trie.new_trie": {"tf": 1.7320508075688772}, "sqlglot.trie.in_trie": {"tf": 3}}, "df": 104}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 2}}, "df": 13, "b": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1.7320508075688772}, "sqlglot.lineage.lineage": {"tf": 1}}, "df": 3}}, "m": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "z": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1}}}}, "q": {"docs": {"sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 3}}, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 2.8284271247461903}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 7}, "d": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}}, "df": 1}}}, "y": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 3.7416573867739413}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 29, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 14.594519519326424}, "sqlglot.dataframe": {"tf": 10.392304845413264}, "sqlglot.dialects": {"tf": 5.385164807134504}, "sqlglot.diff": {"tf": 5.830951894845301}, "sqlglot.diff.diff": {"tf": 2}, "sqlglot.executor": {"tf": 3.7416573867739413}, "sqlglot.expressions.Expression": {"tf": 2}, "sqlglot.expressions.Expression.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression.assert_is": {"tf": 2}, "sqlglot.expressions.Condition.and_": {"tf": 2}, "sqlglot.expressions.Condition.or_": {"tf": 2}, "sqlglot.expressions.Condition.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 2}, "sqlglot.expressions.Unionable.intersect": {"tf": 2}, "sqlglot.expressions.Unionable.except_": {"tf": 2}, "sqlglot.expressions.Column.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Delete.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.Delete.where": {"tf": 2.449489742783178}, "sqlglot.expressions.Delete.returning": {"tf": 2.449489742783178}, "sqlglot.expressions.Identifier.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Insert.with_": {"tf": 2.8284271247461903}, "sqlglot.expressions.Literal.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Join.on": {"tf": 2}, "sqlglot.expressions.Join.using": {"tf": 2.449489742783178}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 2.449489742783178}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2.8284271247461903}, "sqlglot.expressions.Union.limit": {"tf": 2}, "sqlglot.expressions.Union.select": {"tf": 2}, "sqlglot.expressions.Select.from_": {"tf": 2}, "sqlglot.expressions.Select.group_by": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.order_by": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.sort_by": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.cluster_by": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.limit": {"tf": 2}, "sqlglot.expressions.Select.offset": {"tf": 2}, "sqlglot.expressions.Select.select": {"tf": 2}, "sqlglot.expressions.Select.lateral": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.join": {"tf": 5.477225575051661}, "sqlglot.expressions.Select.where": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.having": {"tf": 3.1622776601683795}, "sqlglot.expressions.Select.distinct": {"tf": 2}, "sqlglot.expressions.Select.ctas": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.lock": {"tf": 4.47213595499958}, "sqlglot.expressions.Select.hint": {"tf": 2.8284271247461903}, "sqlglot.expressions.Subquery.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Star.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Dot.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Paren.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Alias.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Cast.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.maybe_parse": {"tf": 2}, "sqlglot.expressions.union": {"tf": 2}, "sqlglot.expressions.intersect": {"tf": 2}, "sqlglot.expressions.except_": {"tf": 2}, "sqlglot.expressions.select": {"tf": 2.449489742783178}, "sqlglot.expressions.from_": {"tf": 2.449489742783178}, "sqlglot.expressions.update": {"tf": 4}, "sqlglot.expressions.delete": {"tf": 2}, "sqlglot.expressions.insert": {"tf": 2}, "sqlglot.expressions.condition": {"tf": 3.1622776601683795}, "sqlglot.expressions.and_": {"tf": 2.449489742783178}, "sqlglot.expressions.or_": {"tf": 2.449489742783178}, "sqlglot.expressions.not_": {"tf": 2}, "sqlglot.expressions.paren": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 1.4142135623730951}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 2.449489742783178}, "sqlglot.expressions.replace_placeholders": {"tf": 3.1622776601683795}, "sqlglot.expressions.expand": {"tf": 4}, "sqlglot.expressions.func": {"tf": 2.449489742783178}, "sqlglot.helper.split_num_words": {"tf": 3.4641016151377544}, "sqlglot.helper.is_iterable": {"tf": 1.4142135623730951}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.helper.dict_depth": {"tf": 3.1622776601683795}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 3.1622776601683795}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 2}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 2}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify.qualify": {"tf": 4.242640687119285}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 2.8284271247461903}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 2.449489742783178}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.simplify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_set": {"tf": 4.242640687119285}, "sqlglot.time.format_time": {"tf": 2.449489742783178}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 2.449489742783178}, "sqlglot.trie.in_trie": {"tf": 3.4641016151377544}}, "df": 97, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.7320508075688772}}, "df": 26, "d": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff.diff": {"tf": 2}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1.4142135623730951}}, "df": 29}, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}}, "df": 4}}}, "k": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}, "sqlglot.transforms.eliminate_qualify": {"tf": 1.7320508075688772}}, "df": 6}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "d": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify.qualify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}}, "df": 5}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.lineage.lineage": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "y": {"docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 3.4641016151377544}, "sqlglot.expressions.Expression.replace": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Predicate": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 3.1622776601683795}, "sqlglot.expressions.Select.having": {"tf": 2}, "sqlglot.expressions.Select.hint": {"tf": 1.4142135623730951}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.var": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 2.449489742783178}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 2}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 3}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 2.8284271247461903}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 2}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 2.23606797749979}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 2}, "sqlglot.optimizer.scope.Scope": {"tf": 2.23606797749979}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 2}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2.8284271247461903}, "sqlglot.planner.Step.from_expression": {"tf": 2.8284271247461903}, "sqlglot.planner.Scan.from_expression": {"tf": 2.8284271247461903}, "sqlglot.planner.SetOperation.from_expression": {"tf": 2.8284271247461903}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}}, "df": 41, "o": {"docs": {}, "df": 0, "u": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 2}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}}, "df": 8, "r": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 4}}}, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "y": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.time.format_time": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 9}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "h": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}, "sqlglot.transforms.explode_to_unnest": {"tf": 1}}, "df": 11}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "y": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.diff": {"tf": 3.7416573867739413}}, "df": 1, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}}, "m": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Select.hint": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.expressions.Select.hint": {"tf": 2}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1.4142135623730951}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}}, "df": 5, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 3}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "q": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.diff": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.optimizer.qualify_columns.Resolver": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.condition": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 4}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.diff": {"tf": 1}}, "df": 2}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 4.69041575982343}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 10, "n": {"docs": {}, "df": 0, "\u2019": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 2}, "sqlglot.expressions.Select.having": {"tf": 1.7320508075688772}}, "df": 2}}}}, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.executor": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Dot.output_name": {"tf": 1}, "sqlglot.expressions.Paren.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 21, "n": {"docs": {"sqlglot.diff.Keep": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2, "\u2019": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "h": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}}, "df": 2, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}}, "df": 3, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}}, "df": 2}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "f": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, ":": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 1}}}}}}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.lineage.LineageHTML": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.transforms.eliminate_qualify": {"tf": 1}}, "df": 1}}}}, "w": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 1}}}}}}}, ":": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 1}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 4}}}}, "m": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 3, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}}, "df": 9, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}}, "df": 1}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.optimizer.normalize_identifiers.normalize_identifiers": {"tf": 1}, "sqlglot.optimizer.qualify_columns.quote_identifiers": {"tf": 1}}, "df": 5}, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.schema.nested_set": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 2}}}}, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}}}, "p": {"docs": {"sqlglot.expressions.Func": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1.7320508075688772}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.7320508075688772}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}}, "df": 18, "[": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}}, "s": {"docs": {"sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}}, "df": 2}}}, "s": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 5}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 3, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}}, "df": 6}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 24}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}}, "df": 7, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}}, "df": 3}, "d": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.diff": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 7.14142842854285}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.diff": {"tf": 3}, "sqlglot.diff.diff": {"tf": 1}}, "df": 2}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "h": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "x": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 45, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 42}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2, "e": {"docs": {}, "df": 0, "z": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 2}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 9}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}}, "df": 27}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}}, "df": 2}}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Delete.delete": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Delete.returning": {"tf": 1}, "sqlglot.expressions.Insert.with_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Select.hint": {"tf": 1}}, "df": 24}}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}}, "df": 4}}}, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.open_file": {"tf": 1}}, "df": 24, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"sqlglot.expressions.to_interval": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.diff.diff": {"tf": 1}}, "df": 3, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1}}}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "z": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 4, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1}}}, "e": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}}, "df": 2, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}}, "l": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 6}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}}, "df": 20, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Parser": {"tf": 1}, "sqlglot.dialects.spark2.Spark2.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Parser": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 46}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}}, "df": 1, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2}}, "df": 1, "s": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1}}}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "x": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.optimizer.qualify.qualify": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 7}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "e": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Delete.where": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 11}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 5}}, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}}, "df": 2}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.trie.new_trie": {"tf": 1}}, "df": 1}}}}}, "y": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}}, "df": 4, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 9}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 2.6457513110645907}}, "df": 1}}}}}, "j": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.executor": {"tf": 2.449489742783178}, "sqlglot.expressions.Join.on": {"tf": 2}, "sqlglot.expressions.Join.using": {"tf": 2}, "sqlglot.expressions.Select.join": {"tf": 3.872983346207417}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 2.23606797749979}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 2}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.transforms.unnest_to_explode": {"tf": 1}}, "df": 18, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}}, "df": 8}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Select.join": {"tf": 1}}, "df": 1}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}}, "df": 1}}}}}}, "h": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "b": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 2.23606797749979}}, "df": 5}}, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "v": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "s": {"docs": {"sqlglot.lineage.LineageHTML": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}}, "df": 2}}}}, "x": {"docs": {"sqlglot": {"tf": 4.898979485566356}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 4.358898943540674}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Predicate": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete.where": {"tf": 2}, "sqlglot.expressions.Insert.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 2}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 2}, "sqlglot.expressions.Select.order_by": {"tf": 2}, "sqlglot.expressions.Select.sort_by": {"tf": 2}, "sqlglot.expressions.Select.cluster_by": {"tf": 2}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.having": {"tf": 2}, "sqlglot.expressions.Select.distinct": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.hint": {"tf": 1.4142135623730951}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 2}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 2}, "sqlglot.expressions.cast": {"tf": 1.4142135623730951}, "sqlglot.expressions.var": {"tf": 2}, "sqlglot.expressions.expand": {"tf": 2.449489742783178}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 2.23606797749979}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 2.23606797749979}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2.8284271247461903}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 2.449489742783178}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 2}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 2.6457513110645907}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 2.23606797749979}, "sqlglot.optimizer.scope.Scope": {"tf": 2.6457513110645907}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 2}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 2}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2.449489742783178}, "sqlglot.planner.Step.from_expression": {"tf": 4}, "sqlglot.planner.Scan.from_expression": {"tf": 4}, "sqlglot.planner.SetOperation.from_expression": {"tf": 4}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}}, "df": 63, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "z": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 2.449489742783178}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 2.23606797749979}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 2.449489742783178}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.4142135623730951}}, "df": 15, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 3.605551275463989}, "sqlglot.diff.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}}, "df": 4}}, "y": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 2}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1}, "sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.set": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.iter_expressions": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}, "sqlglot.schema.nested_get": {"tf": 2}, "sqlglot.schema.nested_set": {"tf": 3.4641016151377544}, "sqlglot.trie.in_trie": {"tf": 2}}, "df": 16, "w": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 5, "s": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 2}}, "df": 3}}}}}, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.schema.nested_set": {"tf": 1.4142135623730951}}, "df": 4}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}}, "d": {"docs": {"sqlglot.trie.new_trie": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.scope.Scope.replace": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 3, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {"sqlglot.diff.diff": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}}, "df": 2}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Kwarg": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.7320508075688772}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1}}, "df": 6}}}}}}}}}, "pipeline": ["trimmer"], "_isPrebuiltIndex": true}; // mirrored in build-search-index.js (part 1) // Also split on html tags. this is a cheap heuristic, but good enough. diff --git a/docs/sqlglot.html b/docs/sqlglot.html index 0edd2e4..f9e5c98 100644 --- a/docs/sqlglot.html +++ b/docs/sqlglot.html @@ -101,6 +101,8 @@

Syntax errors are highlighted and dialect incompatibilities can warn or raise depending on configurations. However, it should be noted that SQL validation is not SQLGlot’s goal, so some syntax errors may go unnoticed.

+

Learn more about the SQLGlot API in the documentation.

+

Contributions are very welcome in SQLGlot; read the contribution guide to get started!

Table of Contents

@@ -358,7 +360,7 @@
-
'SELECT x FROM y, z'
+
'SELECT x FROM z'
 
@@ -581,7 +583,9 @@

Documentation

-

SQLGlot uses pdoc to serve its API documentation:

+

SQLGlot uses pdoc to serve its API documentation.

+ +

A hosted version is on the SQLGlot website, or you can build locally with:

make docs-serve
 
diff --git a/docs/sqlglot/_version.html b/docs/sqlglot/_version.html index 9942e5f..6988e7d 100644 --- a/docs/sqlglot/_version.html +++ b/docs/sqlglot/_version.html @@ -51,8 +51,8 @@
1# file generated by setuptools_scm
 2# don't change, don't track in version control
-3__version__ = version = '15.1.0'
-4__version_tuple__ = version_tuple = (15, 1, 0)
+3__version__ = version = '16.2.0'
+4__version_tuple__ = version_tuple = (16, 2, 0)
 
diff --git a/docs/sqlglot/dataframe/sql.html b/docs/sqlglot/dataframe/sql.html index 3cb2b9a..5e41262 100644 --- a/docs/sqlglot/dataframe/sql.html +++ b/docs/sqlglot/dataframe/sql.html @@ -602,7 +602,7 @@
def - createDataFrame( self, data: Sequence[Union[Dict[str, <MagicMock id='140604724939168'>], List[<MagicMock id='140604724939168'>], Tuple]], schema: Optional[<MagicMock id='140604726296240'>] = None, samplingRatio: Optional[float] = None, verifySchema: bool = False) -> sqlglot.dataframe.sql.DataFrame: + createDataFrame( self, data: Sequence[Union[Dict[str, <MagicMock id='140043311982688'>], List[<MagicMock id='140043311982688'>], Tuple]], schema: Optional[<MagicMock id='140043311598608'>] = None, samplingRatio: Optional[float] = None, verifySchema: bool = False) -> sqlglot.dataframe.sql.DataFrame: @@ -1505,7 +1505,7 @@
- DataFrame( spark: <MagicMock id='140604728440864'>, expression: sqlglot.expressions.Select, branch_id: Optional[str] = None, sequence_id: Optional[str] = None, last_op: sqlglot.dataframe.sql.operations.Operation = <Operation.INIT: -1>, pending_hints: Optional[List[sqlglot.expressions.Expression]] = None, output_expression_container: Optional[<MagicMock id='140604729565168'>] = None, **kwargs) + DataFrame( spark: <MagicMock id='140043314786224'>, expression: sqlglot.expressions.Select, branch_id: Optional[str] = None, sequence_id: Optional[str] = None, last_op: sqlglot.dataframe.sql.operations.Operation = <Operation.INIT: -1>, pending_hints: Optional[List[sqlglot.expressions.Expression]] = None, output_expression_container: Optional[<MagicMock id='140043314972080'>] = None, **kwargs) @@ -2280,7 +2280,7 @@ is unlikely to come up.

@operation(Operation.FROM)
def - fillna( self, value: <MagicMock id='140604723599328'>, subset: Union[str, Tuple[str, ...], List[str], NoneType] = None) -> sqlglot.dataframe.sql.DataFrame: + fillna( self, value: <MagicMock id='140043310407712'>, subset: Union[str, Tuple[str, ...], List[str], NoneType] = None) -> sqlglot.dataframe.sql.DataFrame: @@ -2349,7 +2349,7 @@ and check if it matches the type of the value provided. If not then make it null
@operation(Operation.FROM)
def - replace( self, to_replace: Union[bool, int, float, str, List, Dict], value: Union[bool, int, float, str, List, NoneType] = None, subset: Union[Collection[<MagicMock id='140604723535904'>], <MagicMock id='140604723535904'>, NoneType] = None) -> sqlglot.dataframe.sql.DataFrame: + replace( self, to_replace: Union[bool, int, float, str, List, Dict], value: Union[bool, int, float, str, List, NoneType] = None, subset: Union[Collection[<MagicMock id='140043310703920'>], <MagicMock id='140043310703920'>, NoneType] = None) -> sqlglot.dataframe.sql.DataFrame: @@ -2554,7 +2554,7 @@ and check if it matches the type of the value provided. If not then make it null
@operation(Operation.NO_OP)
def - repartition( self, numPartitions: Union[int, <MagicMock id='140604723822992'>], *cols: <MagicMock id='140604723880880'>) -> sqlglot.dataframe.sql.DataFrame: + repartition( self, numPartitions: Union[int, <MagicMock id='140043310895632'>], *cols: <MagicMock id='140043310935312'>) -> sqlglot.dataframe.sql.DataFrame: @@ -3222,7 +3222,7 @@ and check if it matches the type of the value provided. If not then make it null
- Column( expression: Union[<MagicMock id='140604725847056'>, sqlglot.expressions.Expression, NoneType]) + Column( expression: Union[<MagicMock id='140043312805776'>, sqlglot.expressions.Expression, NoneType]) @@ -3250,7 +3250,7 @@ and check if it matches the type of the value provided. If not then make it null
@classmethod
def - ensure_col( cls, value: Union[<MagicMock id='140604723943728'>, sqlglot.expressions.Expression, NoneType]): + ensure_col( cls, value: Union[<MagicMock id='140043311007728'>, sqlglot.expressions.Expression, NoneType]): @@ -3271,7 +3271,7 @@ and check if it matches the type of the value provided. If not then make it null
@classmethod
def - ensure_cols( cls, args: List[Union[<MagicMock id='140604723933680'>, sqlglot.expressions.Expression]]) -> List[sqlglot.dataframe.sql.Column]: + ensure_cols( cls, args: List[Union[<MagicMock id='140043311137744'>, sqlglot.expressions.Expression]]) -> List[sqlglot.dataframe.sql.Column]: @@ -3292,7 +3292,7 @@ and check if it matches the type of the value provided. If not then make it null
@classmethod
def - invoke_anonymous_function( cls, column: Optional[<MagicMock id='140604724460464'>], func_name: str, *args: Optional[<MagicMock id='140604724156480'>]) -> sqlglot.dataframe.sql.Column: + invoke_anonymous_function( cls, column: Optional[<MagicMock id='140043310280000'>], func_name: str, *args: Optional[<MagicMock id='140043311061728'>]) -> sqlglot.dataframe.sql.Column: @@ -3319,7 +3319,7 @@ and check if it matches the type of the value provided. If not then make it null
@classmethod
def - invoke_expression_over_column( cls, column: Optional[<MagicMock id='140604724090944'>], callable_expression: Callable, **kwargs) -> sqlglot.dataframe.sql.Column: + invoke_expression_over_column( cls, column: Optional[<MagicMock id='140043309080992'>], callable_expression: Callable, **kwargs) -> sqlglot.dataframe.sql.Column: @@ -3356,7 +3356,7 @@ and check if it matches the type of the value provided. If not then make it null
def - binary_op( self, klass: Callable, other: <MagicMock id='140604724048944'>, **kwargs) -> sqlglot.dataframe.sql.Column: + binary_op( self, klass: Callable, other: <MagicMock id='140043309221248'>, **kwargs) -> sqlglot.dataframe.sql.Column: @@ -3377,7 +3377,7 @@ and check if it matches the type of the value provided. If not then make it null
def - inverse_binary_op( self, klass: Callable, other: <MagicMock id='140604724339344'>, **kwargs) -> sqlglot.dataframe.sql.Column: + inverse_binary_op( self, klass: Callable, other: <MagicMock id='140043309230512'>, **kwargs) -> sqlglot.dataframe.sql.Column: @@ -3887,7 +3887,7 @@ Sqlglot doesn't currently replicate this class so it only accepts a string

def - isin( self, *cols: Union[<MagicMock id='140604722499344'>, Iterable[<MagicMock id='140604722499344'>]]): + isin( self, *cols: Union[<MagicMock id='140043309453744'>, Iterable[<MagicMock id='140043309453744'>]]): @@ -3908,7 +3908,7 @@ Sqlglot doesn't currently replicate this class so it only accepts a string

def - between( self, lowerBound: <MagicMock id='140604722556992'>, upperBound: <MagicMock id='140604722616528'>) -> sqlglot.dataframe.sql.Column: + between( self, lowerBound: <MagicMock id='140043309563968'>, upperBound: <MagicMock id='140043309589040'>) -> sqlglot.dataframe.sql.Column: @@ -3943,7 +3943,7 @@ Sqlglot doesn't currently replicate this class so it only accepts a string

def - over( self, window: <MagicMock id='140604722692160'>) -> sqlglot.dataframe.sql.Column: + over( self, window: <MagicMock id='140043309646896'>) -> sqlglot.dataframe.sql.Column: @@ -4141,7 +4141,7 @@ Sqlglot doesn't currently replicate this class so it only accepts a string

@classmethod
def - partitionBy( cls, *cols: Union[<MagicMock id='140604722964432'>, List[<MagicMock id='140604722964432'>]]) -> sqlglot.dataframe.sql.WindowSpec: + partitionBy( cls, *cols: Union[<MagicMock id='140043310053360'>, List[<MagicMock id='140043310053360'>]]) -> sqlglot.dataframe.sql.WindowSpec: @@ -4162,7 +4162,7 @@ Sqlglot doesn't currently replicate this class so it only accepts a string

@classmethod
def - orderBy( cls, *cols: Union[<MagicMock id='140604723170512'>, List[<MagicMock id='140604723170512'>]]) -> sqlglot.dataframe.sql.WindowSpec: + orderBy( cls, *cols: Union[<MagicMock id='140043309956208'>, List[<MagicMock id='140043309956208'>]]) -> sqlglot.dataframe.sql.WindowSpec: @@ -4387,7 +4387,7 @@ Sqlglot doesn't currently replicate this class so it only accepts a string

def - partitionBy( self, *cols: Union[<MagicMock id='140604723060480'>, List[<MagicMock id='140604723060480'>]]) -> sqlglot.dataframe.sql.WindowSpec: + partitionBy( self, *cols: Union[<MagicMock id='140043309879920'>, List[<MagicMock id='140043309879920'>]]) -> sqlglot.dataframe.sql.WindowSpec: @@ -4414,7 +4414,7 @@ Sqlglot doesn't currently replicate this class so it only accepts a string

def - orderBy( self, *cols: Union[<MagicMock id='140604722718768'>, List[<MagicMock id='140604722718768'>]]) -> sqlglot.dataframe.sql.WindowSpec: + orderBy( self, *cols: Union[<MagicMock id='140043309740432'>, List[<MagicMock id='140043309740432'>]]) -> sqlglot.dataframe.sql.WindowSpec: diff --git a/docs/sqlglot/dialects/bigquery.html b/docs/sqlglot/dialects/bigquery.html index 246130a..7edabe1 100644 --- a/docs/sqlglot/dialects/bigquery.html +++ b/docs/sqlglot/dialects/bigquery.html @@ -110,359 +110,388 @@ 7from sqlglot.dialects.dialect import ( 8 Dialect, 9 datestrtodate_sql, - 10 inline_array_sql, - 11 max_or_greatest, - 12 min_or_least, - 13 no_ilike_sql, - 14 parse_date_delta_with_interval, - 15 rename_func, - 16 timestrtotime_sql, - 17 ts_or_ds_to_date_sql, - 18) - 19from sqlglot.helper import seq_get, split_num_words - 20from sqlglot.tokens import TokenType - 21 + 10 format_time_lambda, + 11 inline_array_sql, + 12 max_or_greatest, + 13 min_or_least, + 14 no_ilike_sql, + 15 parse_date_delta_with_interval, + 16 rename_func, + 17 timestrtotime_sql, + 18 ts_or_ds_to_date_sql, + 19) + 20from sqlglot.helper import seq_get, split_num_words + 21from sqlglot.tokens import TokenType 22 - 23def _date_add_sql( - 24 data_type: str, kind: str - 25) -> t.Callable[[generator.Generator, exp.Expression], str]: - 26 def func(self, expression): - 27 this = self.sql(expression, "this") - 28 unit = expression.args.get("unit") - 29 unit = exp.var(unit.name.upper() if unit else "DAY") - 30 interval = exp.Interval(this=expression.expression, unit=unit) - 31 return f"{data_type}_{kind}({this}, {self.sql(interval)})" - 32 - 33 return func - 34 + 23 + 24def _date_add_sql( + 25 data_type: str, kind: str + 26) -> t.Callable[[generator.Generator, exp.Expression], str]: + 27 def func(self, expression): + 28 this = self.sql(expression, "this") + 29 unit = expression.args.get("unit") + 30 unit = exp.var(unit.name.upper() if unit else "DAY") + 31 interval = exp.Interval(this=expression.expression, unit=unit) + 32 return f"{data_type}_{kind}({this}, {self.sql(interval)})" + 33 + 34 return func 35 - 36def _derived_table_values_to_unnest(self: generator.Generator, expression: exp.Values) -> str: - 37 if not isinstance(expression.unnest().parent, exp.From): - 38 return self.values_sql(expression) - 39 - 40 alias = expression.args.get("alias") - 41 - 42 structs = [ - 43 exp.Struct( - 44 expressions=[ - 45 exp.alias_(value, column_name) - 46 for value, column_name in zip( - 47 t.expressions, - 48 alias.columns - 49 if alias and alias.columns - 50 else (f"_c{i}" for i in range(len(t.expressions))), - 51 ) - 52 ] - 53 ) - 54 for t in expression.find_all(exp.Tuple) - 55 ] - 56 - 57 return self.unnest_sql(exp.Unnest(expressions=[exp.Array(expressions=structs)])) - 58 + 36 + 37def _derived_table_values_to_unnest(self: generator.Generator, expression: exp.Values) -> str: + 38 if not isinstance(expression.unnest().parent, exp.From): + 39 return self.values_sql(expression) + 40 + 41 alias = expression.args.get("alias") + 42 + 43 structs = [ + 44 exp.Struct( + 45 expressions=[ + 46 exp.alias_(value, column_name) + 47 for value, column_name in zip( + 48 t.expressions, + 49 alias.columns + 50 if alias and alias.columns + 51 else (f"_c{i}" for i in range(len(t.expressions))), + 52 ) + 53 ] + 54 ) + 55 for t in expression.find_all(exp.Tuple) + 56 ] + 57 + 58 return self.unnest_sql(exp.Unnest(expressions=[exp.Array(expressions=structs)])) 59 - 60def _returnsproperty_sql(self: generator.Generator, expression: exp.ReturnsProperty) -> str: - 61 this = expression.this - 62 if isinstance(this, exp.Schema): - 63 this = f"{this.this} <{self.expressions(this)}>" - 64 else: - 65 this = self.sql(this) - 66 return f"RETURNS {this}" - 67 + 60 + 61def _returnsproperty_sql(self: generator.Generator, expression: exp.ReturnsProperty) -> str: + 62 this = expression.this + 63 if isinstance(this, exp.Schema): + 64 this = f"{this.this} <{self.expressions(this)}>" + 65 else: + 66 this = self.sql(this) + 67 return f"RETURNS {this}" 68 - 69def _create_sql(self: generator.Generator, expression: exp.Create) -> str: - 70 kind = expression.args["kind"] - 71 returns = expression.find(exp.ReturnsProperty) - 72 if kind.upper() == "FUNCTION" and returns and returns.args.get("is_table"): - 73 expression = expression.copy() - 74 expression.set("kind", "TABLE FUNCTION") - 75 if isinstance( - 76 expression.expression, - 77 ( - 78 exp.Subquery, - 79 exp.Literal, - 80 ), - 81 ): - 82 expression.set("expression", expression.expression.this) - 83 - 84 return self.create_sql(expression) - 85 - 86 return self.create_sql(expression) - 87 + 69 + 70def _create_sql(self: generator.Generator, expression: exp.Create) -> str: + 71 kind = expression.args["kind"] + 72 returns = expression.find(exp.ReturnsProperty) + 73 if kind.upper() == "FUNCTION" and returns and returns.args.get("is_table"): + 74 expression = expression.copy() + 75 expression.set("kind", "TABLE FUNCTION") + 76 if isinstance( + 77 expression.expression, + 78 ( + 79 exp.Subquery, + 80 exp.Literal, + 81 ), + 82 ): + 83 expression.set("expression", expression.expression.this) + 84 + 85 return self.create_sql(expression) + 86 + 87 return self.create_sql(expression) 88 - 89def _unqualify_unnest(expression: exp.Expression) -> exp.Expression: - 90 """Remove references to unnest table aliases since bigquery doesn't allow them. - 91 - 92 These are added by the optimizer's qualify_column step. - 93 """ - 94 if isinstance(expression, exp.Select): - 95 for unnest in expression.find_all(exp.Unnest): - 96 if isinstance(unnest.parent, (exp.From, exp.Join)) and unnest.alias: - 97 for select in expression.selects: - 98 for column in select.find_all(exp.Column): - 99 if column.table == unnest.alias: -100 column.set("table", None) -101 -102 return expression -103 + 89 + 90def _unqualify_unnest(expression: exp.Expression) -> exp.Expression: + 91 """Remove references to unnest table aliases since bigquery doesn't allow them. + 92 + 93 These are added by the optimizer's qualify_column step. + 94 """ + 95 if isinstance(expression, exp.Select): + 96 for unnest in expression.find_all(exp.Unnest): + 97 if isinstance(unnest.parent, (exp.From, exp.Join)) and unnest.alias: + 98 for select in expression.selects: + 99 for column in select.find_all(exp.Column): +100 if column.table == unnest.alias: +101 column.set("table", None) +102 +103 return expression 104 -105class BigQuery(Dialect): -106 unnest_column_only = True -107 time_mapping = { -108 "%M": "%-M", -109 "%d": "%-d", -110 "%m": "%-m", -111 "%y": "%-y", -112 "%H": "%-H", -113 "%I": "%-I", -114 "%S": "%-S", -115 "%j": "%-j", -116 } -117 -118 class Tokenizer(tokens.Tokenizer): -119 QUOTES = ["'", '"', '"""', "'''"] -120 COMMENTS = ["--", "#", ("/*", "*/")] -121 IDENTIFIERS = ["`"] -122 STRING_ESCAPES = ["\\"] -123 -124 HEX_STRINGS = [("0x", ""), ("0X", "")] -125 -126 BYTE_STRINGS = [ -127 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B") -128 ] -129 -130 RAW_STRINGS = [ -131 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R") -132 ] -133 -134 KEYWORDS = { -135 **tokens.Tokenizer.KEYWORDS, -136 "ANY TYPE": TokenType.VARIANT, -137 "BEGIN": TokenType.COMMAND, -138 "BEGIN TRANSACTION": TokenType.BEGIN, -139 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, -140 "BYTES": TokenType.BINARY, -141 "DECLARE": TokenType.COMMAND, -142 "FLOAT64": TokenType.DOUBLE, -143 "INT64": TokenType.BIGINT, -144 "RECORD": TokenType.STRUCT, -145 "NOT DETERMINISTIC": TokenType.VOLATILE, -146 "UNKNOWN": TokenType.NULL, -147 } -148 KEYWORDS.pop("DIV") -149 -150 class Parser(parser.Parser): -151 PREFIXED_PIVOT_COLUMNS = True -152 -153 LOG_BASE_FIRST = False -154 LOG_DEFAULTS_TO_LN = True -155 -156 FUNCTIONS = { -157 **parser.Parser.FUNCTIONS, -158 "DATE_TRUNC": lambda args: exp.DateTrunc( -159 unit=exp.Literal.string(str(seq_get(args, 1))), -160 this=seq_get(args, 0), -161 ), -162 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), -163 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), -164 "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)), -165 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, -166 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( -167 this=seq_get(args, 0), -168 expression=seq_get(args, 1), -169 position=seq_get(args, 2), -170 occurrence=seq_get(args, 3), -171 group=exp.Literal.number(1) -172 if re.compile(str(seq_get(args, 1))).groups == 1 -173 else None, -174 ), -175 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), -176 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), -177 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), -178 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), -179 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), -180 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), -181 "PARSE_TIMESTAMP": lambda args: exp.StrToTime( -182 this=seq_get(args, 1), format=seq_get(args, 0) -183 ), -184 } -185 -186 FUNCTION_PARSERS = { -187 **parser.Parser.FUNCTION_PARSERS, -188 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), -189 } -190 FUNCTION_PARSERS.pop("TRIM") -191 -192 NO_PAREN_FUNCTIONS = { -193 **parser.Parser.NO_PAREN_FUNCTIONS, -194 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, -195 } -196 -197 NESTED_TYPE_TOKENS = { -198 *parser.Parser.NESTED_TYPE_TOKENS, -199 TokenType.TABLE, -200 } -201 -202 ID_VAR_TOKENS = { -203 *parser.Parser.ID_VAR_TOKENS, -204 TokenType.VALUES, -205 } -206 -207 PROPERTY_PARSERS = { -208 **parser.Parser.PROPERTY_PARSERS, -209 "NOT DETERMINISTIC": lambda self: self.expression( -210 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") -211 ), -212 "OPTIONS": lambda self: self._parse_with_property(), -213 } -214 -215 CONSTRAINT_PARSERS = { -216 **parser.Parser.CONSTRAINT_PARSERS, -217 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), -218 } -219 -220 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: -221 this = super()._parse_table_part(schema=schema) -222 -223 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names -224 if isinstance(this, exp.Identifier): -225 table_name = this.name -226 while self._match(TokenType.DASH, advance=False) and self._next: -227 self._advance(2) -228 table_name += f"-{self._prev.text}" -229 -230 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) -231 -232 return this -233 -234 def _parse_table_parts(self, schema: bool = False) -> exp.Table: -235 table = super()._parse_table_parts(schema=schema) -236 if isinstance(table.this, exp.Identifier) and "." in table.name: -237 catalog, db, this, *rest = ( -238 t.cast(t.Optional[exp.Expression], exp.to_identifier(x)) -239 for x in split_num_words(table.name, ".", 3) -240 ) -241 -242 if rest and this: -243 this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest])) -244 -245 table = exp.Table(this=this, db=db, catalog=catalog) -246 -247 return table -248 -249 class Generator(generator.Generator): -250 EXPLICIT_UNION = True -251 INTERVAL_ALLOWS_PLURAL_FORM = False -252 JOIN_HINTS = False -253 TABLE_HINTS = False -254 LIMIT_FETCH = "LIMIT" -255 RENAME_TABLE_WITH_DB = False -256 -257 TRANSFORMS = { -258 **generator.Generator.TRANSFORMS, -259 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), -260 exp.ArraySize: rename_func("ARRAY_LENGTH"), -261 exp.AtTimeZone: lambda self, e: self.func( -262 "TIMESTAMP", self.func("DATETIME", e.this, e.args.get("zone")) -263 ), -264 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), -265 exp.DateAdd: _date_add_sql("DATE", "ADD"), -266 exp.DateSub: _date_add_sql("DATE", "SUB"), -267 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), -268 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), -269 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", -270 exp.DateStrToDate: datestrtodate_sql, -271 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), -272 exp.GroupConcat: rename_func("STRING_AGG"), -273 exp.ILike: no_ilike_sql, -274 exp.IntDiv: rename_func("DIV"), -275 exp.Max: max_or_greatest, -276 exp.Min: min_or_least, -277 exp.Select: transforms.preprocess( -278 [_unqualify_unnest, transforms.eliminate_distinct_on] -279 ), -280 exp.StrToTime: lambda self, e: f"PARSE_TIMESTAMP({self.format_time(e)}, {self.sql(e, 'this')})", -281 exp.TimeAdd: _date_add_sql("TIME", "ADD"), -282 exp.TimeSub: _date_add_sql("TIME", "SUB"), -283 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), -284 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), -285 exp.TimeStrToTime: timestrtotime_sql, -286 exp.TryCast: lambda self, e: f"SAFE_CAST({self.sql(e, 'this')} AS {self.sql(e, 'to')})", -287 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), -288 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), -289 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", -290 exp.VariancePop: rename_func("VAR_POP"), -291 exp.Values: _derived_table_values_to_unnest, -292 exp.ReturnsProperty: _returnsproperty_sql, -293 exp.Create: _create_sql, -294 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), -295 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" -296 if e.name == "IMMUTABLE" -297 else "NOT DETERMINISTIC", -298 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), -299 } -300 -301 TYPE_MAPPING = { -302 **generator.Generator.TYPE_MAPPING, -303 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", -304 exp.DataType.Type.BIGINT: "INT64", -305 exp.DataType.Type.BINARY: "BYTES", -306 exp.DataType.Type.BOOLEAN: "BOOL", -307 exp.DataType.Type.CHAR: "STRING", -308 exp.DataType.Type.DECIMAL: "NUMERIC", -309 exp.DataType.Type.DOUBLE: "FLOAT64", -310 exp.DataType.Type.FLOAT: "FLOAT64", -311 exp.DataType.Type.INT: "INT64", -312 exp.DataType.Type.NCHAR: "STRING", -313 exp.DataType.Type.NVARCHAR: "STRING", -314 exp.DataType.Type.SMALLINT: "INT64", -315 exp.DataType.Type.TEXT: "STRING", -316 exp.DataType.Type.TIMESTAMP: "DATETIME", -317 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", -318 exp.DataType.Type.TINYINT: "INT64", -319 exp.DataType.Type.VARBINARY: "BYTES", -320 exp.DataType.Type.VARCHAR: "STRING", -321 exp.DataType.Type.VARIANT: "ANY TYPE", -322 } -323 -324 PROPERTIES_LOCATION = { -325 **generator.Generator.PROPERTIES_LOCATION, -326 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, -327 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, -328 } -329 -330 RESERVED_KEYWORDS = {*generator.Generator.RESERVED_KEYWORDS, "hash"} -331 -332 def array_sql(self, expression: exp.Array) -> str: -333 first_arg = seq_get(expression.expressions, 0) -334 if isinstance(first_arg, exp.Subqueryable): -335 return f"ARRAY{self.wrap(self.sql(first_arg))}" -336 -337 return inline_array_sql(self, expression) -338 -339 def transaction_sql(self, *_) -> str: -340 return "BEGIN TRANSACTION" -341 -342 def commit_sql(self, *_) -> str: -343 return "COMMIT TRANSACTION" -344 -345 def rollback_sql(self, *_) -> str: -346 return "ROLLBACK TRANSACTION" -347 -348 def in_unnest_op(self, expression: exp.Unnest) -> str: -349 return self.sql(expression) -350 -351 def except_op(self, expression: exp.Except) -> str: -352 if not expression.args.get("distinct", False): -353 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") -354 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" -355 -356 def intersect_op(self, expression: exp.Intersect) -> str: -357 if not expression.args.get("distinct", False): -358 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") -359 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" +105 +106class BigQuery(Dialect): +107 UNNEST_COLUMN_ONLY = True +108 +109 TIME_MAPPING = { +110 "%D": "%m/%d/%y", +111 } +112 +113 FORMAT_MAPPING = { +114 "DD": "%d", +115 "MM": "%m", +116 "MON": "%b", +117 "MONTH": "%B", +118 "YYYY": "%Y", +119 "YY": "%y", +120 "HH": "%I", +121 "HH12": "%I", +122 "HH24": "%H", +123 "MI": "%M", +124 "SS": "%S", +125 "SSSSS": "%f", +126 "TZH": "%z", +127 } +128 +129 class Tokenizer(tokens.Tokenizer): +130 QUOTES = ["'", '"', '"""', "'''"] +131 COMMENTS = ["--", "#", ("/*", "*/")] +132 IDENTIFIERS = ["`"] +133 STRING_ESCAPES = ["\\"] +134 +135 HEX_STRINGS = [("0x", ""), ("0X", "")] +136 +137 BYTE_STRINGS = [ +138 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B") +139 ] +140 +141 RAW_STRINGS = [ +142 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R") +143 ] +144 +145 KEYWORDS = { +146 **tokens.Tokenizer.KEYWORDS, +147 "ANY TYPE": TokenType.VARIANT, +148 "BEGIN": TokenType.COMMAND, +149 "BEGIN TRANSACTION": TokenType.BEGIN, +150 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, +151 "BYTES": TokenType.BINARY, +152 "DECLARE": TokenType.COMMAND, +153 "FLOAT64": TokenType.DOUBLE, +154 "INT64": TokenType.BIGINT, +155 "RECORD": TokenType.STRUCT, +156 "TIMESTAMP": TokenType.TIMESTAMPTZ, +157 "NOT DETERMINISTIC": TokenType.VOLATILE, +158 "UNKNOWN": TokenType.NULL, +159 } +160 KEYWORDS.pop("DIV") +161 +162 class Parser(parser.Parser): +163 PREFIXED_PIVOT_COLUMNS = True +164 +165 LOG_BASE_FIRST = False +166 LOG_DEFAULTS_TO_LN = True +167 +168 FUNCTIONS = { +169 **parser.Parser.FUNCTIONS, +170 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), +171 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), +172 "DATE_TRUNC": lambda args: exp.DateTrunc( +173 unit=exp.Literal.string(str(seq_get(args, 1))), +174 this=seq_get(args, 0), +175 ), +176 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), +177 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), +178 "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)), +179 "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")( +180 [seq_get(args, 1), seq_get(args, 0)] +181 ), +182 "PARSE_TIMESTAMP": lambda args: format_time_lambda(exp.StrToTime, "bigquery")( +183 [seq_get(args, 1), seq_get(args, 0)] +184 ), +185 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, +186 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( +187 this=seq_get(args, 0), +188 expression=seq_get(args, 1), +189 position=seq_get(args, 2), +190 occurrence=seq_get(args, 3), +191 group=exp.Literal.number(1) +192 if re.compile(str(seq_get(args, 1))).groups == 1 +193 else None, +194 ), +195 "SPLIT": lambda args: exp.Split( +196 # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split +197 this=seq_get(args, 0), +198 expression=seq_get(args, 1) or exp.Literal.string(","), +199 ), +200 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), +201 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), +202 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), +203 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), +204 } +205 +206 FUNCTION_PARSERS = { +207 **parser.Parser.FUNCTION_PARSERS, +208 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), +209 } +210 FUNCTION_PARSERS.pop("TRIM") +211 +212 NO_PAREN_FUNCTIONS = { +213 **parser.Parser.NO_PAREN_FUNCTIONS, +214 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, +215 } +216 +217 NESTED_TYPE_TOKENS = { +218 *parser.Parser.NESTED_TYPE_TOKENS, +219 TokenType.TABLE, +220 } +221 +222 ID_VAR_TOKENS = { +223 *parser.Parser.ID_VAR_TOKENS, +224 TokenType.VALUES, +225 } +226 +227 PROPERTY_PARSERS = { +228 **parser.Parser.PROPERTY_PARSERS, +229 "NOT DETERMINISTIC": lambda self: self.expression( +230 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") +231 ), +232 "OPTIONS": lambda self: self._parse_with_property(), +233 } +234 +235 CONSTRAINT_PARSERS = { +236 **parser.Parser.CONSTRAINT_PARSERS, +237 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), +238 } +239 +240 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: +241 this = super()._parse_table_part(schema=schema) +242 +243 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names +244 if isinstance(this, exp.Identifier): +245 table_name = this.name +246 while self._match(TokenType.DASH, advance=False) and self._next: +247 self._advance(2) +248 table_name += f"-{self._prev.text}" +249 +250 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) +251 +252 return this +253 +254 def _parse_table_parts(self, schema: bool = False) -> exp.Table: +255 table = super()._parse_table_parts(schema=schema) +256 if isinstance(table.this, exp.Identifier) and "." in table.name: +257 catalog, db, this, *rest = ( +258 t.cast(t.Optional[exp.Expression], exp.to_identifier(x)) +259 for x in split_num_words(table.name, ".", 3) +260 ) +261 +262 if rest and this: +263 this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest])) +264 +265 table = exp.Table(this=this, db=db, catalog=catalog) +266 +267 return table +268 +269 class Generator(generator.Generator): +270 EXPLICIT_UNION = True +271 INTERVAL_ALLOWS_PLURAL_FORM = False +272 JOIN_HINTS = False +273 TABLE_HINTS = False +274 LIMIT_FETCH = "LIMIT" +275 RENAME_TABLE_WITH_DB = False +276 +277 TRANSFORMS = { +278 **generator.Generator.TRANSFORMS, +279 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), +280 exp.ArraySize: rename_func("ARRAY_LENGTH"), +281 exp.AtTimeZone: lambda self, e: self.func( +282 "TIMESTAMP", self.func("DATETIME", e.this, e.args.get("zone")) +283 ), +284 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), +285 exp.DateAdd: _date_add_sql("DATE", "ADD"), +286 exp.DateSub: _date_add_sql("DATE", "SUB"), +287 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), +288 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), +289 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", +290 exp.DateStrToDate: datestrtodate_sql, +291 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), +292 exp.GroupConcat: rename_func("STRING_AGG"), +293 exp.ILike: no_ilike_sql, +294 exp.IntDiv: rename_func("DIV"), +295 exp.Max: max_or_greatest, +296 exp.Min: min_or_least, +297 exp.RegexpExtract: lambda self, e: self.func( +298 "REGEXP_EXTRACT", +299 e.this, +300 e.expression, +301 e.args.get("position"), +302 e.args.get("occurrence"), +303 ), +304 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), +305 exp.Select: transforms.preprocess( +306 [_unqualify_unnest, transforms.eliminate_distinct_on] +307 ), +308 exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})", +309 exp.StrToTime: lambda self, e: f"PARSE_TIMESTAMP({self.format_time(e)}, {self.sql(e, 'this')})", +310 exp.TimeAdd: _date_add_sql("TIME", "ADD"), +311 exp.TimeSub: _date_add_sql("TIME", "SUB"), +312 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), +313 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), +314 exp.TimeStrToTime: timestrtotime_sql, +315 exp.TryCast: lambda self, e: f"SAFE_CAST({self.sql(e, 'this')} AS {self.sql(e, 'to')})", +316 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), +317 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), +318 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", +319 exp.VariancePop: rename_func("VAR_POP"), +320 exp.Values: _derived_table_values_to_unnest, +321 exp.ReturnsProperty: _returnsproperty_sql, +322 exp.Create: _create_sql, +323 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), +324 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" +325 if e.name == "IMMUTABLE" +326 else "NOT DETERMINISTIC", +327 } +328 +329 TYPE_MAPPING = { +330 **generator.Generator.TYPE_MAPPING, +331 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", +332 exp.DataType.Type.BIGINT: "INT64", +333 exp.DataType.Type.BINARY: "BYTES", +334 exp.DataType.Type.BOOLEAN: "BOOL", +335 exp.DataType.Type.CHAR: "STRING", +336 exp.DataType.Type.DECIMAL: "NUMERIC", +337 exp.DataType.Type.DOUBLE: "FLOAT64", +338 exp.DataType.Type.FLOAT: "FLOAT64", +339 exp.DataType.Type.INT: "INT64", +340 exp.DataType.Type.NCHAR: "STRING", +341 exp.DataType.Type.NVARCHAR: "STRING", +342 exp.DataType.Type.SMALLINT: "INT64", +343 exp.DataType.Type.TEXT: "STRING", +344 exp.DataType.Type.TIMESTAMP: "DATETIME", +345 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", +346 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", +347 exp.DataType.Type.TINYINT: "INT64", +348 exp.DataType.Type.VARBINARY: "BYTES", +349 exp.DataType.Type.VARCHAR: "STRING", +350 exp.DataType.Type.VARIANT: "ANY TYPE", +351 } +352 +353 PROPERTIES_LOCATION = { +354 **generator.Generator.PROPERTIES_LOCATION, +355 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, +356 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, +357 } +358 +359 RESERVED_KEYWORDS = {*generator.Generator.RESERVED_KEYWORDS, "hash"} 360 -361 def with_properties(self, properties: exp.Properties) -> str: -362 return self.properties(properties, prefix=self.seg("OPTIONS")) +361 def array_sql(self, expression: exp.Array) -> str: +362 first_arg = seq_get(expression.expressions, 0) +363 if isinstance(first_arg, exp.Subqueryable): +364 return f"ARRAY{self.wrap(self.sql(first_arg))}" +365 +366 return inline_array_sql(self, expression) +367 +368 def transaction_sql(self, *_) -> str: +369 return "BEGIN TRANSACTION" +370 +371 def commit_sql(self, *_) -> str: +372 return "COMMIT TRANSACTION" +373 +374 def rollback_sql(self, *_) -> str: +375 return "ROLLBACK TRANSACTION" +376 +377 def in_unnest_op(self, expression: exp.Unnest) -> str: +378 return self.sql(expression) +379 +380 def except_op(self, expression: exp.Except) -> str: +381 if not expression.args.get("distinct", False): +382 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") +383 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" +384 +385 def intersect_op(self, expression: exp.Intersect) -> str: +386 if not expression.args.get("distinct", False): +387 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") +388 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" +389 +390 def with_properties(self, properties: exp.Properties) -> str: +391 return self.properties(properties, prefix=self.seg("OPTIONS"))
@@ -478,264 +507,292 @@
-
106class BigQuery(Dialect):
-107    unnest_column_only = True
-108    time_mapping = {
-109        "%M": "%-M",
-110        "%d": "%-d",
-111        "%m": "%-m",
-112        "%y": "%-y",
-113        "%H": "%-H",
-114        "%I": "%-I",
-115        "%S": "%-S",
-116        "%j": "%-j",
-117    }
-118
-119    class Tokenizer(tokens.Tokenizer):
-120        QUOTES = ["'", '"', '"""', "'''"]
-121        COMMENTS = ["--", "#", ("/*", "*/")]
-122        IDENTIFIERS = ["`"]
-123        STRING_ESCAPES = ["\\"]
-124
-125        HEX_STRINGS = [("0x", ""), ("0X", "")]
-126
-127        BYTE_STRINGS = [
-128            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
-129        ]
-130
-131        RAW_STRINGS = [
-132            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
-133        ]
-134
-135        KEYWORDS = {
-136            **tokens.Tokenizer.KEYWORDS,
-137            "ANY TYPE": TokenType.VARIANT,
-138            "BEGIN": TokenType.COMMAND,
-139            "BEGIN TRANSACTION": TokenType.BEGIN,
-140            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
-141            "BYTES": TokenType.BINARY,
-142            "DECLARE": TokenType.COMMAND,
-143            "FLOAT64": TokenType.DOUBLE,
-144            "INT64": TokenType.BIGINT,
-145            "RECORD": TokenType.STRUCT,
-146            "NOT DETERMINISTIC": TokenType.VOLATILE,
-147            "UNKNOWN": TokenType.NULL,
-148        }
-149        KEYWORDS.pop("DIV")
-150
-151    class Parser(parser.Parser):
-152        PREFIXED_PIVOT_COLUMNS = True
-153
-154        LOG_BASE_FIRST = False
-155        LOG_DEFAULTS_TO_LN = True
-156
-157        FUNCTIONS = {
-158            **parser.Parser.FUNCTIONS,
-159            "DATE_TRUNC": lambda args: exp.DateTrunc(
-160                unit=exp.Literal.string(str(seq_get(args, 1))),
-161                this=seq_get(args, 0),
-162            ),
-163            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
-164            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
-165            "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)),
-166            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
-167            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
-168                this=seq_get(args, 0),
-169                expression=seq_get(args, 1),
-170                position=seq_get(args, 2),
-171                occurrence=seq_get(args, 3),
-172                group=exp.Literal.number(1)
-173                if re.compile(str(seq_get(args, 1))).groups == 1
-174                else None,
-175            ),
-176            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
-177            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
-178            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
-179            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
-180            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
-181            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
-182            "PARSE_TIMESTAMP": lambda args: exp.StrToTime(
-183                this=seq_get(args, 1), format=seq_get(args, 0)
-184            ),
-185        }
-186
-187        FUNCTION_PARSERS = {
-188            **parser.Parser.FUNCTION_PARSERS,
-189            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
-190        }
-191        FUNCTION_PARSERS.pop("TRIM")
-192
-193        NO_PAREN_FUNCTIONS = {
-194            **parser.Parser.NO_PAREN_FUNCTIONS,
-195            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
-196        }
-197
-198        NESTED_TYPE_TOKENS = {
-199            *parser.Parser.NESTED_TYPE_TOKENS,
-200            TokenType.TABLE,
-201        }
-202
-203        ID_VAR_TOKENS = {
-204            *parser.Parser.ID_VAR_TOKENS,
-205            TokenType.VALUES,
-206        }
-207
-208        PROPERTY_PARSERS = {
-209            **parser.Parser.PROPERTY_PARSERS,
-210            "NOT DETERMINISTIC": lambda self: self.expression(
-211                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
-212            ),
-213            "OPTIONS": lambda self: self._parse_with_property(),
-214        }
-215
-216        CONSTRAINT_PARSERS = {
-217            **parser.Parser.CONSTRAINT_PARSERS,
-218            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
-219        }
-220
-221        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
-222            this = super()._parse_table_part(schema=schema)
-223
-224            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
-225            if isinstance(this, exp.Identifier):
-226                table_name = this.name
-227                while self._match(TokenType.DASH, advance=False) and self._next:
-228                    self._advance(2)
-229                    table_name += f"-{self._prev.text}"
-230
-231                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
-232
-233            return this
-234
-235        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
-236            table = super()._parse_table_parts(schema=schema)
-237            if isinstance(table.this, exp.Identifier) and "." in table.name:
-238                catalog, db, this, *rest = (
-239                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
-240                    for x in split_num_words(table.name, ".", 3)
-241                )
-242
-243                if rest and this:
-244                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
-245
-246                table = exp.Table(this=this, db=db, catalog=catalog)
-247
-248            return table
-249
-250    class Generator(generator.Generator):
-251        EXPLICIT_UNION = True
-252        INTERVAL_ALLOWS_PLURAL_FORM = False
-253        JOIN_HINTS = False
-254        TABLE_HINTS = False
-255        LIMIT_FETCH = "LIMIT"
-256        RENAME_TABLE_WITH_DB = False
-257
-258        TRANSFORMS = {
-259            **generator.Generator.TRANSFORMS,
-260            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
-261            exp.ArraySize: rename_func("ARRAY_LENGTH"),
-262            exp.AtTimeZone: lambda self, e: self.func(
-263                "TIMESTAMP", self.func("DATETIME", e.this, e.args.get("zone"))
-264            ),
-265            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
-266            exp.DateAdd: _date_add_sql("DATE", "ADD"),
-267            exp.DateSub: _date_add_sql("DATE", "SUB"),
-268            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
-269            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
-270            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
-271            exp.DateStrToDate: datestrtodate_sql,
-272            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
-273            exp.GroupConcat: rename_func("STRING_AGG"),
-274            exp.ILike: no_ilike_sql,
-275            exp.IntDiv: rename_func("DIV"),
-276            exp.Max: max_or_greatest,
-277            exp.Min: min_or_least,
-278            exp.Select: transforms.preprocess(
-279                [_unqualify_unnest, transforms.eliminate_distinct_on]
-280            ),
-281            exp.StrToTime: lambda self, e: f"PARSE_TIMESTAMP({self.format_time(e)}, {self.sql(e, 'this')})",
-282            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
-283            exp.TimeSub: _date_add_sql("TIME", "SUB"),
-284            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
-285            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
-286            exp.TimeStrToTime: timestrtotime_sql,
-287            exp.TryCast: lambda self, e: f"SAFE_CAST({self.sql(e, 'this')} AS {self.sql(e, 'to')})",
-288            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
-289            exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"),
-290            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
-291            exp.VariancePop: rename_func("VAR_POP"),
-292            exp.Values: _derived_table_values_to_unnest,
-293            exp.ReturnsProperty: _returnsproperty_sql,
-294            exp.Create: _create_sql,
-295            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
-296            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
-297            if e.name == "IMMUTABLE"
-298            else "NOT DETERMINISTIC",
-299            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
-300        }
-301
-302        TYPE_MAPPING = {
-303            **generator.Generator.TYPE_MAPPING,
-304            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
-305            exp.DataType.Type.BIGINT: "INT64",
-306            exp.DataType.Type.BINARY: "BYTES",
-307            exp.DataType.Type.BOOLEAN: "BOOL",
-308            exp.DataType.Type.CHAR: "STRING",
-309            exp.DataType.Type.DECIMAL: "NUMERIC",
-310            exp.DataType.Type.DOUBLE: "FLOAT64",
-311            exp.DataType.Type.FLOAT: "FLOAT64",
-312            exp.DataType.Type.INT: "INT64",
-313            exp.DataType.Type.NCHAR: "STRING",
-314            exp.DataType.Type.NVARCHAR: "STRING",
-315            exp.DataType.Type.SMALLINT: "INT64",
-316            exp.DataType.Type.TEXT: "STRING",
-317            exp.DataType.Type.TIMESTAMP: "DATETIME",
-318            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
-319            exp.DataType.Type.TINYINT: "INT64",
-320            exp.DataType.Type.VARBINARY: "BYTES",
-321            exp.DataType.Type.VARCHAR: "STRING",
-322            exp.DataType.Type.VARIANT: "ANY TYPE",
-323        }
-324
-325        PROPERTIES_LOCATION = {
-326            **generator.Generator.PROPERTIES_LOCATION,
-327            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
-328            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
-329        }
-330
-331        RESERVED_KEYWORDS = {*generator.Generator.RESERVED_KEYWORDS, "hash"}
-332
-333        def array_sql(self, expression: exp.Array) -> str:
-334            first_arg = seq_get(expression.expressions, 0)
-335            if isinstance(first_arg, exp.Subqueryable):
-336                return f"ARRAY{self.wrap(self.sql(first_arg))}"
-337
-338            return inline_array_sql(self, expression)
-339
-340        def transaction_sql(self, *_) -> str:
-341            return "BEGIN TRANSACTION"
-342
-343        def commit_sql(self, *_) -> str:
-344            return "COMMIT TRANSACTION"
-345
-346        def rollback_sql(self, *_) -> str:
-347            return "ROLLBACK TRANSACTION"
-348
-349        def in_unnest_op(self, expression: exp.Unnest) -> str:
-350            return self.sql(expression)
-351
-352        def except_op(self, expression: exp.Except) -> str:
-353            if not expression.args.get("distinct", False):
-354                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
-355            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
-356
-357        def intersect_op(self, expression: exp.Intersect) -> str:
-358            if not expression.args.get("distinct", False):
-359                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
-360            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
+            
107class BigQuery(Dialect):
+108    UNNEST_COLUMN_ONLY = True
+109
+110    TIME_MAPPING = {
+111        "%D": "%m/%d/%y",
+112    }
+113
+114    FORMAT_MAPPING = {
+115        "DD": "%d",
+116        "MM": "%m",
+117        "MON": "%b",
+118        "MONTH": "%B",
+119        "YYYY": "%Y",
+120        "YY": "%y",
+121        "HH": "%I",
+122        "HH12": "%I",
+123        "HH24": "%H",
+124        "MI": "%M",
+125        "SS": "%S",
+126        "SSSSS": "%f",
+127        "TZH": "%z",
+128    }
+129
+130    class Tokenizer(tokens.Tokenizer):
+131        QUOTES = ["'", '"', '"""', "'''"]
+132        COMMENTS = ["--", "#", ("/*", "*/")]
+133        IDENTIFIERS = ["`"]
+134        STRING_ESCAPES = ["\\"]
+135
+136        HEX_STRINGS = [("0x", ""), ("0X", "")]
+137
+138        BYTE_STRINGS = [
+139            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
+140        ]
+141
+142        RAW_STRINGS = [
+143            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
+144        ]
+145
+146        KEYWORDS = {
+147            **tokens.Tokenizer.KEYWORDS,
+148            "ANY TYPE": TokenType.VARIANT,
+149            "BEGIN": TokenType.COMMAND,
+150            "BEGIN TRANSACTION": TokenType.BEGIN,
+151            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
+152            "BYTES": TokenType.BINARY,
+153            "DECLARE": TokenType.COMMAND,
+154            "FLOAT64": TokenType.DOUBLE,
+155            "INT64": TokenType.BIGINT,
+156            "RECORD": TokenType.STRUCT,
+157            "TIMESTAMP": TokenType.TIMESTAMPTZ,
+158            "NOT DETERMINISTIC": TokenType.VOLATILE,
+159            "UNKNOWN": TokenType.NULL,
+160        }
+161        KEYWORDS.pop("DIV")
+162
+163    class Parser(parser.Parser):
+164        PREFIXED_PIVOT_COLUMNS = True
+165
+166        LOG_BASE_FIRST = False
+167        LOG_DEFAULTS_TO_LN = True
+168
+169        FUNCTIONS = {
+170            **parser.Parser.FUNCTIONS,
+171            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
+172            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
+173            "DATE_TRUNC": lambda args: exp.DateTrunc(
+174                unit=exp.Literal.string(str(seq_get(args, 1))),
+175                this=seq_get(args, 0),
+176            ),
+177            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
+178            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
+179            "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)),
+180            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
+181                [seq_get(args, 1), seq_get(args, 0)]
+182            ),
+183            "PARSE_TIMESTAMP": lambda args: format_time_lambda(exp.StrToTime, "bigquery")(
+184                [seq_get(args, 1), seq_get(args, 0)]
+185            ),
+186            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
+187            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
+188                this=seq_get(args, 0),
+189                expression=seq_get(args, 1),
+190                position=seq_get(args, 2),
+191                occurrence=seq_get(args, 3),
+192                group=exp.Literal.number(1)
+193                if re.compile(str(seq_get(args, 1))).groups == 1
+194                else None,
+195            ),
+196            "SPLIT": lambda args: exp.Split(
+197                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
+198                this=seq_get(args, 0),
+199                expression=seq_get(args, 1) or exp.Literal.string(","),
+200            ),
+201            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
+202            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
+203            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
+204            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
+205        }
+206
+207        FUNCTION_PARSERS = {
+208            **parser.Parser.FUNCTION_PARSERS,
+209            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
+210        }
+211        FUNCTION_PARSERS.pop("TRIM")
+212
+213        NO_PAREN_FUNCTIONS = {
+214            **parser.Parser.NO_PAREN_FUNCTIONS,
+215            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
+216        }
+217
+218        NESTED_TYPE_TOKENS = {
+219            *parser.Parser.NESTED_TYPE_TOKENS,
+220            TokenType.TABLE,
+221        }
+222
+223        ID_VAR_TOKENS = {
+224            *parser.Parser.ID_VAR_TOKENS,
+225            TokenType.VALUES,
+226        }
+227
+228        PROPERTY_PARSERS = {
+229            **parser.Parser.PROPERTY_PARSERS,
+230            "NOT DETERMINISTIC": lambda self: self.expression(
+231                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
+232            ),
+233            "OPTIONS": lambda self: self._parse_with_property(),
+234        }
+235
+236        CONSTRAINT_PARSERS = {
+237            **parser.Parser.CONSTRAINT_PARSERS,
+238            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
+239        }
+240
+241        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
+242            this = super()._parse_table_part(schema=schema)
+243
+244            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
+245            if isinstance(this, exp.Identifier):
+246                table_name = this.name
+247                while self._match(TokenType.DASH, advance=False) and self._next:
+248                    self._advance(2)
+249                    table_name += f"-{self._prev.text}"
+250
+251                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
+252
+253            return this
+254
+255        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
+256            table = super()._parse_table_parts(schema=schema)
+257            if isinstance(table.this, exp.Identifier) and "." in table.name:
+258                catalog, db, this, *rest = (
+259                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
+260                    for x in split_num_words(table.name, ".", 3)
+261                )
+262
+263                if rest and this:
+264                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
+265
+266                table = exp.Table(this=this, db=db, catalog=catalog)
+267
+268            return table
+269
+270    class Generator(generator.Generator):
+271        EXPLICIT_UNION = True
+272        INTERVAL_ALLOWS_PLURAL_FORM = False
+273        JOIN_HINTS = False
+274        TABLE_HINTS = False
+275        LIMIT_FETCH = "LIMIT"
+276        RENAME_TABLE_WITH_DB = False
+277
+278        TRANSFORMS = {
+279            **generator.Generator.TRANSFORMS,
+280            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
+281            exp.ArraySize: rename_func("ARRAY_LENGTH"),
+282            exp.AtTimeZone: lambda self, e: self.func(
+283                "TIMESTAMP", self.func("DATETIME", e.this, e.args.get("zone"))
+284            ),
+285            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
+286            exp.DateAdd: _date_add_sql("DATE", "ADD"),
+287            exp.DateSub: _date_add_sql("DATE", "SUB"),
+288            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
+289            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
+290            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
+291            exp.DateStrToDate: datestrtodate_sql,
+292            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
+293            exp.GroupConcat: rename_func("STRING_AGG"),
+294            exp.ILike: no_ilike_sql,
+295            exp.IntDiv: rename_func("DIV"),
+296            exp.Max: max_or_greatest,
+297            exp.Min: min_or_least,
+298            exp.RegexpExtract: lambda self, e: self.func(
+299                "REGEXP_EXTRACT",
+300                e.this,
+301                e.expression,
+302                e.args.get("position"),
+303                e.args.get("occurrence"),
+304            ),
+305            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
+306            exp.Select: transforms.preprocess(
+307                [_unqualify_unnest, transforms.eliminate_distinct_on]
+308            ),
+309            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
+310            exp.StrToTime: lambda self, e: f"PARSE_TIMESTAMP({self.format_time(e)}, {self.sql(e, 'this')})",
+311            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
+312            exp.TimeSub: _date_add_sql("TIME", "SUB"),
+313            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
+314            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
+315            exp.TimeStrToTime: timestrtotime_sql,
+316            exp.TryCast: lambda self, e: f"SAFE_CAST({self.sql(e, 'this')} AS {self.sql(e, 'to')})",
+317            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
+318            exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"),
+319            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
+320            exp.VariancePop: rename_func("VAR_POP"),
+321            exp.Values: _derived_table_values_to_unnest,
+322            exp.ReturnsProperty: _returnsproperty_sql,
+323            exp.Create: _create_sql,
+324            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
+325            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
+326            if e.name == "IMMUTABLE"
+327            else "NOT DETERMINISTIC",
+328        }
+329
+330        TYPE_MAPPING = {
+331            **generator.Generator.TYPE_MAPPING,
+332            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
+333            exp.DataType.Type.BIGINT: "INT64",
+334            exp.DataType.Type.BINARY: "BYTES",
+335            exp.DataType.Type.BOOLEAN: "BOOL",
+336            exp.DataType.Type.CHAR: "STRING",
+337            exp.DataType.Type.DECIMAL: "NUMERIC",
+338            exp.DataType.Type.DOUBLE: "FLOAT64",
+339            exp.DataType.Type.FLOAT: "FLOAT64",
+340            exp.DataType.Type.INT: "INT64",
+341            exp.DataType.Type.NCHAR: "STRING",
+342            exp.DataType.Type.NVARCHAR: "STRING",
+343            exp.DataType.Type.SMALLINT: "INT64",
+344            exp.DataType.Type.TEXT: "STRING",
+345            exp.DataType.Type.TIMESTAMP: "DATETIME",
+346            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
+347            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
+348            exp.DataType.Type.TINYINT: "INT64",
+349            exp.DataType.Type.VARBINARY: "BYTES",
+350            exp.DataType.Type.VARCHAR: "STRING",
+351            exp.DataType.Type.VARIANT: "ANY TYPE",
+352        }
+353
+354        PROPERTIES_LOCATION = {
+355            **generator.Generator.PROPERTIES_LOCATION,
+356            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
+357            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
+358        }
+359
+360        RESERVED_KEYWORDS = {*generator.Generator.RESERVED_KEYWORDS, "hash"}
 361
-362        def with_properties(self, properties: exp.Properties) -> str:
-363            return self.properties(properties, prefix=self.seg("OPTIONS"))
+362        def array_sql(self, expression: exp.Array) -> str:
+363            first_arg = seq_get(expression.expressions, 0)
+364            if isinstance(first_arg, exp.Subqueryable):
+365                return f"ARRAY{self.wrap(self.sql(first_arg))}"
+366
+367            return inline_array_sql(self, expression)
+368
+369        def transaction_sql(self, *_) -> str:
+370            return "BEGIN TRANSACTION"
+371
+372        def commit_sql(self, *_) -> str:
+373            return "COMMIT TRANSACTION"
+374
+375        def rollback_sql(self, *_) -> str:
+376            return "ROLLBACK TRANSACTION"
+377
+378        def in_unnest_op(self, expression: exp.Unnest) -> str:
+379            return self.sql(expression)
+380
+381        def except_op(self, expression: exp.Except) -> str:
+382            if not expression.args.get("distinct", False):
+383                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
+384            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
+385
+386        def intersect_op(self, expression: exp.Intersect) -> str:
+387            if not expression.args.get("distinct", False):
+388                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
+389            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
+390
+391        def with_properties(self, properties: exp.Properties) -> str:
+392            return self.properties(properties, prefix=self.seg("OPTIONS"))
 
@@ -770,37 +827,38 @@
-
119    class Tokenizer(tokens.Tokenizer):
-120        QUOTES = ["'", '"', '"""', "'''"]
-121        COMMENTS = ["--", "#", ("/*", "*/")]
-122        IDENTIFIERS = ["`"]
-123        STRING_ESCAPES = ["\\"]
-124
-125        HEX_STRINGS = [("0x", ""), ("0X", "")]
-126
-127        BYTE_STRINGS = [
-128            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
-129        ]
-130
-131        RAW_STRINGS = [
-132            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
-133        ]
-134
-135        KEYWORDS = {
-136            **tokens.Tokenizer.KEYWORDS,
-137            "ANY TYPE": TokenType.VARIANT,
-138            "BEGIN": TokenType.COMMAND,
-139            "BEGIN TRANSACTION": TokenType.BEGIN,
-140            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
-141            "BYTES": TokenType.BINARY,
-142            "DECLARE": TokenType.COMMAND,
-143            "FLOAT64": TokenType.DOUBLE,
-144            "INT64": TokenType.BIGINT,
-145            "RECORD": TokenType.STRUCT,
-146            "NOT DETERMINISTIC": TokenType.VOLATILE,
-147            "UNKNOWN": TokenType.NULL,
-148        }
-149        KEYWORDS.pop("DIV")
+            
130    class Tokenizer(tokens.Tokenizer):
+131        QUOTES = ["'", '"', '"""', "'''"]
+132        COMMENTS = ["--", "#", ("/*", "*/")]
+133        IDENTIFIERS = ["`"]
+134        STRING_ESCAPES = ["\\"]
+135
+136        HEX_STRINGS = [("0x", ""), ("0X", "")]
+137
+138        BYTE_STRINGS = [
+139            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
+140        ]
+141
+142        RAW_STRINGS = [
+143            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
+144        ]
+145
+146        KEYWORDS = {
+147            **tokens.Tokenizer.KEYWORDS,
+148            "ANY TYPE": TokenType.VARIANT,
+149            "BEGIN": TokenType.COMMAND,
+150            "BEGIN TRANSACTION": TokenType.BEGIN,
+151            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
+152            "BYTES": TokenType.BINARY,
+153            "DECLARE": TokenType.COMMAND,
+154            "FLOAT64": TokenType.DOUBLE,
+155            "INT64": TokenType.BIGINT,
+156            "RECORD": TokenType.STRUCT,
+157            "TIMESTAMP": TokenType.TIMESTAMPTZ,
+158            "NOT DETERMINISTIC": TokenType.VOLATILE,
+159            "UNKNOWN": TokenType.NULL,
+160        }
+161        KEYWORDS.pop("DIV")
 
@@ -812,6 +870,7 @@ @@ -828,128 +887,128 @@
-
151    class Parser(parser.Parser):
-152        PREFIXED_PIVOT_COLUMNS = True
-153
-154        LOG_BASE_FIRST = False
-155        LOG_DEFAULTS_TO_LN = True
-156
-157        FUNCTIONS = {
-158            **parser.Parser.FUNCTIONS,
-159            "DATE_TRUNC": lambda args: exp.DateTrunc(
-160                unit=exp.Literal.string(str(seq_get(args, 1))),
-161                this=seq_get(args, 0),
-162            ),
-163            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
-164            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
-165            "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)),
-166            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
-167            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
-168                this=seq_get(args, 0),
-169                expression=seq_get(args, 1),
-170                position=seq_get(args, 2),
-171                occurrence=seq_get(args, 3),
-172                group=exp.Literal.number(1)
-173                if re.compile(str(seq_get(args, 1))).groups == 1
-174                else None,
-175            ),
-176            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
-177            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
-178            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
-179            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
-180            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
-181            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
-182            "PARSE_TIMESTAMP": lambda args: exp.StrToTime(
-183                this=seq_get(args, 1), format=seq_get(args, 0)
-184            ),
-185        }
-186
-187        FUNCTION_PARSERS = {
-188            **parser.Parser.FUNCTION_PARSERS,
-189            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
-190        }
-191        FUNCTION_PARSERS.pop("TRIM")
-192
-193        NO_PAREN_FUNCTIONS = {
-194            **parser.Parser.NO_PAREN_FUNCTIONS,
-195            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
-196        }
-197
-198        NESTED_TYPE_TOKENS = {
-199            *parser.Parser.NESTED_TYPE_TOKENS,
-200            TokenType.TABLE,
-201        }
-202
-203        ID_VAR_TOKENS = {
-204            *parser.Parser.ID_VAR_TOKENS,
-205            TokenType.VALUES,
-206        }
-207
-208        PROPERTY_PARSERS = {
-209            **parser.Parser.PROPERTY_PARSERS,
-210            "NOT DETERMINISTIC": lambda self: self.expression(
-211                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
-212            ),
-213            "OPTIONS": lambda self: self._parse_with_property(),
-214        }
-215
-216        CONSTRAINT_PARSERS = {
-217            **parser.Parser.CONSTRAINT_PARSERS,
-218            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
-219        }
-220
-221        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
-222            this = super()._parse_table_part(schema=schema)
-223
-224            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
-225            if isinstance(this, exp.Identifier):
-226                table_name = this.name
-227                while self._match(TokenType.DASH, advance=False) and self._next:
-228                    self._advance(2)
-229                    table_name += f"-{self._prev.text}"
-230
-231                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
-232
-233            return this
-234
-235        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
-236            table = super()._parse_table_parts(schema=schema)
-237            if isinstance(table.this, exp.Identifier) and "." in table.name:
-238                catalog, db, this, *rest = (
-239                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
-240                    for x in split_num_words(table.name, ".", 3)
-241                )
-242
-243                if rest and this:
-244                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
-245
-246                table = exp.Table(this=this, db=db, catalog=catalog)
-247
-248            return table
+            
163    class Parser(parser.Parser):
+164        PREFIXED_PIVOT_COLUMNS = True
+165
+166        LOG_BASE_FIRST = False
+167        LOG_DEFAULTS_TO_LN = True
+168
+169        FUNCTIONS = {
+170            **parser.Parser.FUNCTIONS,
+171            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
+172            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
+173            "DATE_TRUNC": lambda args: exp.DateTrunc(
+174                unit=exp.Literal.string(str(seq_get(args, 1))),
+175                this=seq_get(args, 0),
+176            ),
+177            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
+178            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
+179            "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)),
+180            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
+181                [seq_get(args, 1), seq_get(args, 0)]
+182            ),
+183            "PARSE_TIMESTAMP": lambda args: format_time_lambda(exp.StrToTime, "bigquery")(
+184                [seq_get(args, 1), seq_get(args, 0)]
+185            ),
+186            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
+187            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
+188                this=seq_get(args, 0),
+189                expression=seq_get(args, 1),
+190                position=seq_get(args, 2),
+191                occurrence=seq_get(args, 3),
+192                group=exp.Literal.number(1)
+193                if re.compile(str(seq_get(args, 1))).groups == 1
+194                else None,
+195            ),
+196            "SPLIT": lambda args: exp.Split(
+197                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
+198                this=seq_get(args, 0),
+199                expression=seq_get(args, 1) or exp.Literal.string(","),
+200            ),
+201            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
+202            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
+203            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
+204            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
+205        }
+206
+207        FUNCTION_PARSERS = {
+208            **parser.Parser.FUNCTION_PARSERS,
+209            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
+210        }
+211        FUNCTION_PARSERS.pop("TRIM")
+212
+213        NO_PAREN_FUNCTIONS = {
+214            **parser.Parser.NO_PAREN_FUNCTIONS,
+215            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
+216        }
+217
+218        NESTED_TYPE_TOKENS = {
+219            *parser.Parser.NESTED_TYPE_TOKENS,
+220            TokenType.TABLE,
+221        }
+222
+223        ID_VAR_TOKENS = {
+224            *parser.Parser.ID_VAR_TOKENS,
+225            TokenType.VALUES,
+226        }
+227
+228        PROPERTY_PARSERS = {
+229            **parser.Parser.PROPERTY_PARSERS,
+230            "NOT DETERMINISTIC": lambda self: self.expression(
+231                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
+232            ),
+233            "OPTIONS": lambda self: self._parse_with_property(),
+234        }
+235
+236        CONSTRAINT_PARSERS = {
+237            **parser.Parser.CONSTRAINT_PARSERS,
+238            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
+239        }
+240
+241        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
+242            this = super()._parse_table_part(schema=schema)
+243
+244            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
+245            if isinstance(this, exp.Identifier):
+246                table_name = this.name
+247                while self._match(TokenType.DASH, advance=False) and self._next:
+248                    self._advance(2)
+249                    table_name += f"-{self._prev.text}"
+250
+251                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
+252
+253            return this
+254
+255        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
+256            table = super()._parse_table_parts(schema=schema)
+257            if isinstance(table.this, exp.Identifier) and "." in table.name:
+258                catalog, db, this, *rest = (
+259                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
+260                    for x in split_num_words(table.name, ".", 3)
+261                )
+262
+263                if rest and this:
+264                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
+265
+266                table = exp.Table(this=this, db=db, catalog=catalog)
+267
+268            return table
 
-

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces -a parsed syntax tree.

+

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
    -
  • error_level: the desired error level. +
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • -
  • error_message_context: determines the amount of context to capture from a +
  • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). -Default: 50.
  • -
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. -Default: 0
  • -
  • alias_post_tablesample: If the table alias comes after tablesample. -Default: False
  • +Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
  • -
  • null_ordering: Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
@@ -982,166 +1041,160 @@ Default: "nulls_are_small"
-
250    class Generator(generator.Generator):
-251        EXPLICIT_UNION = True
-252        INTERVAL_ALLOWS_PLURAL_FORM = False
-253        JOIN_HINTS = False
-254        TABLE_HINTS = False
-255        LIMIT_FETCH = "LIMIT"
-256        RENAME_TABLE_WITH_DB = False
-257
-258        TRANSFORMS = {
-259            **generator.Generator.TRANSFORMS,
-260            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
-261            exp.ArraySize: rename_func("ARRAY_LENGTH"),
-262            exp.AtTimeZone: lambda self, e: self.func(
-263                "TIMESTAMP", self.func("DATETIME", e.this, e.args.get("zone"))
-264            ),
-265            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
-266            exp.DateAdd: _date_add_sql("DATE", "ADD"),
-267            exp.DateSub: _date_add_sql("DATE", "SUB"),
-268            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
-269            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
-270            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
-271            exp.DateStrToDate: datestrtodate_sql,
-272            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
-273            exp.GroupConcat: rename_func("STRING_AGG"),
-274            exp.ILike: no_ilike_sql,
-275            exp.IntDiv: rename_func("DIV"),
-276            exp.Max: max_or_greatest,
-277            exp.Min: min_or_least,
-278            exp.Select: transforms.preprocess(
-279                [_unqualify_unnest, transforms.eliminate_distinct_on]
-280            ),
-281            exp.StrToTime: lambda self, e: f"PARSE_TIMESTAMP({self.format_time(e)}, {self.sql(e, 'this')})",
-282            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
-283            exp.TimeSub: _date_add_sql("TIME", "SUB"),
-284            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
-285            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
-286            exp.TimeStrToTime: timestrtotime_sql,
-287            exp.TryCast: lambda self, e: f"SAFE_CAST({self.sql(e, 'this')} AS {self.sql(e, 'to')})",
-288            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
-289            exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"),
-290            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
-291            exp.VariancePop: rename_func("VAR_POP"),
-292            exp.Values: _derived_table_values_to_unnest,
-293            exp.ReturnsProperty: _returnsproperty_sql,
-294            exp.Create: _create_sql,
-295            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
-296            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
-297            if e.name == "IMMUTABLE"
-298            else "NOT DETERMINISTIC",
-299            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
-300        }
-301
-302        TYPE_MAPPING = {
-303            **generator.Generator.TYPE_MAPPING,
-304            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
-305            exp.DataType.Type.BIGINT: "INT64",
-306            exp.DataType.Type.BINARY: "BYTES",
-307            exp.DataType.Type.BOOLEAN: "BOOL",
-308            exp.DataType.Type.CHAR: "STRING",
-309            exp.DataType.Type.DECIMAL: "NUMERIC",
-310            exp.DataType.Type.DOUBLE: "FLOAT64",
-311            exp.DataType.Type.FLOAT: "FLOAT64",
-312            exp.DataType.Type.INT: "INT64",
-313            exp.DataType.Type.NCHAR: "STRING",
-314            exp.DataType.Type.NVARCHAR: "STRING",
-315            exp.DataType.Type.SMALLINT: "INT64",
-316            exp.DataType.Type.TEXT: "STRING",
-317            exp.DataType.Type.TIMESTAMP: "DATETIME",
-318            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
-319            exp.DataType.Type.TINYINT: "INT64",
-320            exp.DataType.Type.VARBINARY: "BYTES",
-321            exp.DataType.Type.VARCHAR: "STRING",
-322            exp.DataType.Type.VARIANT: "ANY TYPE",
-323        }
-324
-325        PROPERTIES_LOCATION = {
-326            **generator.Generator.PROPERTIES_LOCATION,
-327            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
-328            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
-329        }
-330
-331        RESERVED_KEYWORDS = {*generator.Generator.RESERVED_KEYWORDS, "hash"}
-332
-333        def array_sql(self, expression: exp.Array) -> str:
-334            first_arg = seq_get(expression.expressions, 0)
-335            if isinstance(first_arg, exp.Subqueryable):
-336                return f"ARRAY{self.wrap(self.sql(first_arg))}"
-337
-338            return inline_array_sql(self, expression)
-339
-340        def transaction_sql(self, *_) -> str:
-341            return "BEGIN TRANSACTION"
-342
-343        def commit_sql(self, *_) -> str:
-344            return "COMMIT TRANSACTION"
-345
-346        def rollback_sql(self, *_) -> str:
-347            return "ROLLBACK TRANSACTION"
-348
-349        def in_unnest_op(self, expression: exp.Unnest) -> str:
-350            return self.sql(expression)
-351
-352        def except_op(self, expression: exp.Except) -> str:
-353            if not expression.args.get("distinct", False):
-354                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
-355            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
-356
-357        def intersect_op(self, expression: exp.Intersect) -> str:
-358            if not expression.args.get("distinct", False):
-359                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
-360            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
+            
270    class Generator(generator.Generator):
+271        EXPLICIT_UNION = True
+272        INTERVAL_ALLOWS_PLURAL_FORM = False
+273        JOIN_HINTS = False
+274        TABLE_HINTS = False
+275        LIMIT_FETCH = "LIMIT"
+276        RENAME_TABLE_WITH_DB = False
+277
+278        TRANSFORMS = {
+279            **generator.Generator.TRANSFORMS,
+280            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
+281            exp.ArraySize: rename_func("ARRAY_LENGTH"),
+282            exp.AtTimeZone: lambda self, e: self.func(
+283                "TIMESTAMP", self.func("DATETIME", e.this, e.args.get("zone"))
+284            ),
+285            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
+286            exp.DateAdd: _date_add_sql("DATE", "ADD"),
+287            exp.DateSub: _date_add_sql("DATE", "SUB"),
+288            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
+289            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
+290            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
+291            exp.DateStrToDate: datestrtodate_sql,
+292            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
+293            exp.GroupConcat: rename_func("STRING_AGG"),
+294            exp.ILike: no_ilike_sql,
+295            exp.IntDiv: rename_func("DIV"),
+296            exp.Max: max_or_greatest,
+297            exp.Min: min_or_least,
+298            exp.RegexpExtract: lambda self, e: self.func(
+299                "REGEXP_EXTRACT",
+300                e.this,
+301                e.expression,
+302                e.args.get("position"),
+303                e.args.get("occurrence"),
+304            ),
+305            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
+306            exp.Select: transforms.preprocess(
+307                [_unqualify_unnest, transforms.eliminate_distinct_on]
+308            ),
+309            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
+310            exp.StrToTime: lambda self, e: f"PARSE_TIMESTAMP({self.format_time(e)}, {self.sql(e, 'this')})",
+311            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
+312            exp.TimeSub: _date_add_sql("TIME", "SUB"),
+313            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
+314            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
+315            exp.TimeStrToTime: timestrtotime_sql,
+316            exp.TryCast: lambda self, e: f"SAFE_CAST({self.sql(e, 'this')} AS {self.sql(e, 'to')})",
+317            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
+318            exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"),
+319            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
+320            exp.VariancePop: rename_func("VAR_POP"),
+321            exp.Values: _derived_table_values_to_unnest,
+322            exp.ReturnsProperty: _returnsproperty_sql,
+323            exp.Create: _create_sql,
+324            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
+325            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
+326            if e.name == "IMMUTABLE"
+327            else "NOT DETERMINISTIC",
+328        }
+329
+330        TYPE_MAPPING = {
+331            **generator.Generator.TYPE_MAPPING,
+332            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
+333            exp.DataType.Type.BIGINT: "INT64",
+334            exp.DataType.Type.BINARY: "BYTES",
+335            exp.DataType.Type.BOOLEAN: "BOOL",
+336            exp.DataType.Type.CHAR: "STRING",
+337            exp.DataType.Type.DECIMAL: "NUMERIC",
+338            exp.DataType.Type.DOUBLE: "FLOAT64",
+339            exp.DataType.Type.FLOAT: "FLOAT64",
+340            exp.DataType.Type.INT: "INT64",
+341            exp.DataType.Type.NCHAR: "STRING",
+342            exp.DataType.Type.NVARCHAR: "STRING",
+343            exp.DataType.Type.SMALLINT: "INT64",
+344            exp.DataType.Type.TEXT: "STRING",
+345            exp.DataType.Type.TIMESTAMP: "DATETIME",
+346            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
+347            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
+348            exp.DataType.Type.TINYINT: "INT64",
+349            exp.DataType.Type.VARBINARY: "BYTES",
+350            exp.DataType.Type.VARCHAR: "STRING",
+351            exp.DataType.Type.VARIANT: "ANY TYPE",
+352        }
+353
+354        PROPERTIES_LOCATION = {
+355            **generator.Generator.PROPERTIES_LOCATION,
+356            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
+357            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
+358        }
+359
+360        RESERVED_KEYWORDS = {*generator.Generator.RESERVED_KEYWORDS, "hash"}
 361
-362        def with_properties(self, properties: exp.Properties) -> str:
-363            return self.properties(properties, prefix=self.seg("OPTIONS"))
+362        def array_sql(self, expression: exp.Array) -> str:
+363            first_arg = seq_get(expression.expressions, 0)
+364            if isinstance(first_arg, exp.Subqueryable):
+365                return f"ARRAY{self.wrap(self.sql(first_arg))}"
+366
+367            return inline_array_sql(self, expression)
+368
+369        def transaction_sql(self, *_) -> str:
+370            return "BEGIN TRANSACTION"
+371
+372        def commit_sql(self, *_) -> str:
+373            return "COMMIT TRANSACTION"
+374
+375        def rollback_sql(self, *_) -> str:
+376            return "ROLLBACK TRANSACTION"
+377
+378        def in_unnest_op(self, expression: exp.Unnest) -> str:
+379            return self.sql(expression)
+380
+381        def except_op(self, expression: exp.Except) -> str:
+382            if not expression.args.get("distinct", False):
+383                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
+384            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
+385
+386        def intersect_op(self, expression: exp.Intersect) -> str:
+387            if not expression.args.get("distinct", False):
+388                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
+389            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
+390
+391        def with_properties(self, properties: exp.Properties) -> str:
+392            return self.properties(properties, prefix=self.seg("OPTIONS"))
 
-

Generator interprets the given syntax tree and produces a SQL string as an output.

+

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
    -
  • time_mapping (dict): the dictionary of custom time mappings in which the key -represents a python time format and the output the target time format
  • -
  • time_trie (trie): a trie of the time_mapping keys
  • -
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • -
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • -
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • -
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • -
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • -
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • -
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • -
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • -
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • -
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • -
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • -
  • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
  • -
  • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
  • -
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • -
  • normalize (bool): if set to True all identifiers will lower cased
  • -
  • string_escape (str): specifies a string escape character. Default: '.
  • -
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • -
  • pad (int): determines padding in a formatted string. Default: 2.
  • -
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • -
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • -
  • normalize_functions (str): normalize function names, "upper", "lower", or None -Default: "upper"
  • -
  • alias_post_tablesample (bool): if the table alias comes after tablesample -Default: False
  • -
  • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit -Default: False
  • -
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters -unsupported expressions. Default ErrorLevel.WARN.
  • -
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
  • -
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +
  • pretty: Whether or not to format the produced SQL string. +Default: False.
  • +
  • identify: Determines when an identifier should be quoted. Possible values are: +False (default): Never quote, except in cases where it's mandatory by the dialect. +True or 'always': Always quote. +'safe': Only quote identifiers that are case insensitive.
  • +
  • normalize: Whether or not to normalize identifiers to lowercase. +Default: False.
  • +
  • pad: Determines the pad size in a formatted string. +Default: 2.
  • +
  • indent: Determines the indentation size in a formatted string. +Default: 2.
  • +
  • normalize_functions: Whether or not to normalize all function names. Possible values are: +"upper" or True (default): Convert names to uppercase. +"lower": Convert names to lowercase. +False: Disables function name normalization.
  • +
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. +Default ErrorLevel.WARN.
  • +
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • -
  • leading_comma (bool): if the the comma is leading or trailing in select statements +
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. +This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true @@ -1164,12 +1217,12 @@ Default: True
-
333        def array_sql(self, expression: exp.Array) -> str:
-334            first_arg = seq_get(expression.expressions, 0)
-335            if isinstance(first_arg, exp.Subqueryable):
-336                return f"ARRAY{self.wrap(self.sql(first_arg))}"
-337
-338            return inline_array_sql(self, expression)
+            
362        def array_sql(self, expression: exp.Array) -> str:
+363            first_arg = seq_get(expression.expressions, 0)
+364            if isinstance(first_arg, exp.Subqueryable):
+365                return f"ARRAY{self.wrap(self.sql(first_arg))}"
+366
+367            return inline_array_sql(self, expression)
 
@@ -1187,8 +1240,8 @@ Default: True
-
340        def transaction_sql(self, *_) -> str:
-341            return "BEGIN TRANSACTION"
+            
369        def transaction_sql(self, *_) -> str:
+370            return "BEGIN TRANSACTION"
 
@@ -1206,8 +1259,8 @@ Default: True
-
343        def commit_sql(self, *_) -> str:
-344            return "COMMIT TRANSACTION"
+            
372        def commit_sql(self, *_) -> str:
+373            return "COMMIT TRANSACTION"
 
@@ -1225,8 +1278,8 @@ Default: True
-
346        def rollback_sql(self, *_) -> str:
-347            return "ROLLBACK TRANSACTION"
+            
375        def rollback_sql(self, *_) -> str:
+376            return "ROLLBACK TRANSACTION"
 
@@ -1244,8 +1297,8 @@ Default: True
-
349        def in_unnest_op(self, expression: exp.Unnest) -> str:
-350            return self.sql(expression)
+            
378        def in_unnest_op(self, expression: exp.Unnest) -> str:
+379            return self.sql(expression)
 
@@ -1263,10 +1316,10 @@ Default: True
-
352        def except_op(self, expression: exp.Except) -> str:
-353            if not expression.args.get("distinct", False):
-354                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
-355            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
+            
381        def except_op(self, expression: exp.Except) -> str:
+382            if not expression.args.get("distinct", False):
+383                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
+384            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
 
@@ -1284,10 +1337,10 @@ Default: True
-
357        def intersect_op(self, expression: exp.Intersect) -> str:
-358            if not expression.args.get("distinct", False):
-359                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
-360            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
+            
386        def intersect_op(self, expression: exp.Intersect) -> str:
+387            if not expression.args.get("distinct", False):
+388                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
+389            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
 
@@ -1305,8 +1358,8 @@ Default: True
-
362        def with_properties(self, properties: exp.Properties) -> str:
-363            return self.properties(properties, prefix=self.seg("OPTIONS"))
+            
391        def with_properties(self, properties: exp.Properties) -> str:
+392            return self.properties(properties, prefix=self.seg("OPTIONS"))
 
@@ -1342,6 +1395,7 @@ Default: True
notnullcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
+
createable_sql
create_sql
clone_sql
describe_sql
@@ -1421,10 +1475,12 @@ Default: True
ordered_sql
matchrecognize_sql
query_modifiers
+
offset_limit_modifiers
after_having_modifiers
after_limit_modifiers
select_sql
schema_sql
+
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
@@ -1449,7 +1505,7 @@ Default: True
nextvaluefor_sql
extract_sql
trim_sql
-
concat_sql
+
safeconcat_sql
check_sql
foreignkey_sql
primarykey_sql
@@ -1496,6 +1552,7 @@ Default: True
respectnulls_sql
intdiv_sql
dpipe_sql
+
safedpipe_sql
div_sql
overlaps_sql
distance_sql
@@ -1544,6 +1601,7 @@ Default: True
dictproperty_sql
dictrange_sql
dictsubproperty_sql
+
oncluster_sql
diff --git a/docs/sqlglot/dialects/clickhouse.html b/docs/sqlglot/dialects/clickhouse.html index 2fc171e..e0d93dd 100644 --- a/docs/sqlglot/dialects/clickhouse.html +++ b/docs/sqlglot/dialects/clickhouse.html @@ -48,6 +48,9 @@
  • ClickHouse.Generator
  • @@ -112,313 +121,374 @@ 21 22 23class ClickHouse(Dialect): - 24 normalize_functions = None - 25 null_ordering = "nulls_are_last" - 26 - 27 class Tokenizer(tokens.Tokenizer): - 28 COMMENTS = ["--", "#", "#!", ("/*", "*/")] - 29 IDENTIFIERS = ['"', "`"] - 30 STRING_ESCAPES = ["'", "\\"] - 31 BIT_STRINGS = [("0b", "")] - 32 HEX_STRINGS = [("0x", ""), ("0X", "")] - 33 - 34 KEYWORDS = { - 35 **tokens.Tokenizer.KEYWORDS, - 36 "ATTACH": TokenType.COMMAND, - 37 "DATETIME64": TokenType.DATETIME64, - 38 "DICTIONARY": TokenType.DICTIONARY, - 39 "FINAL": TokenType.FINAL, - 40 "FLOAT32": TokenType.FLOAT, - 41 "FLOAT64": TokenType.DOUBLE, - 42 "GLOBAL": TokenType.GLOBAL, - 43 "INT128": TokenType.INT128, - 44 "INT16": TokenType.SMALLINT, - 45 "INT256": TokenType.INT256, - 46 "INT32": TokenType.INT, - 47 "INT64": TokenType.BIGINT, - 48 "INT8": TokenType.TINYINT, - 49 "MAP": TokenType.MAP, - 50 "TUPLE": TokenType.STRUCT, - 51 "UINT128": TokenType.UINT128, - 52 "UINT16": TokenType.USMALLINT, - 53 "UINT256": TokenType.UINT256, - 54 "UINT32": TokenType.UINT, - 55 "UINT64": TokenType.UBIGINT, - 56 "UINT8": TokenType.UTINYINT, - 57 } - 58 - 59 class Parser(parser.Parser): - 60 FUNCTIONS = { - 61 **parser.Parser.FUNCTIONS, - 62 "ANY": exp.AnyValue.from_arg_list, - 63 "MAP": parse_var_map, - 64 "MATCH": exp.RegexpLike.from_arg_list, - 65 "UNIQ": exp.ApproxDistinct.from_arg_list, - 66 } - 67 - 68 FUNCTIONS_WITH_ALIASED_ARGS = {*parser.Parser.FUNCTIONS_WITH_ALIASED_ARGS, "TUPLE"} - 69 - 70 FUNCTION_PARSERS = { - 71 **parser.Parser.FUNCTION_PARSERS, - 72 "QUANTILE": lambda self: self._parse_quantile(), - 73 } - 74 - 75 FUNCTION_PARSERS.pop("MATCH") - 76 - 77 NO_PAREN_FUNCTION_PARSERS = parser.Parser.NO_PAREN_FUNCTION_PARSERS.copy() - 78 NO_PAREN_FUNCTION_PARSERS.pop(TokenType.ANY) - 79 - 80 RANGE_PARSERS = { - 81 **parser.Parser.RANGE_PARSERS, - 82 TokenType.GLOBAL: lambda self, this: self._match(TokenType.IN) - 83 and self._parse_in(this, is_global=True), - 84 } - 85 - 86 # The PLACEHOLDER entry is popped because 1) it doesn't affect Clickhouse (it corresponds to - 87 # the postgres-specific JSONBContains parser) and 2) it makes parsing the ternary op simpler. - 88 COLUMN_OPERATORS = parser.Parser.COLUMN_OPERATORS.copy() - 89 COLUMN_OPERATORS.pop(TokenType.PLACEHOLDER) - 90 - 91 JOIN_KINDS = { - 92 *parser.Parser.JOIN_KINDS, - 93 TokenType.ANY, - 94 TokenType.ASOF, - 95 TokenType.ANTI, - 96 TokenType.SEMI, - 97 } - 98 - 99 TABLE_ALIAS_TOKENS = {*parser.Parser.TABLE_ALIAS_TOKENS} - { -100 TokenType.ANY, -101 TokenType.SEMI, -102 TokenType.ANTI, -103 TokenType.SETTINGS, -104 TokenType.FORMAT, -105 } -106 -107 LOG_DEFAULTS_TO_LN = True -108 -109 QUERY_MODIFIER_PARSERS = { -110 **parser.Parser.QUERY_MODIFIER_PARSERS, -111 "settings": lambda self: self._parse_csv(self._parse_conjunction) -112 if self._match(TokenType.SETTINGS) -113 else None, -114 "format": lambda self: self._parse_id_var() if self._match(TokenType.FORMAT) else None, -115 } -116 -117 def _parse_conjunction(self) -> t.Optional[exp.Expression]: -118 this = super()._parse_conjunction() -119 -120 if self._match(TokenType.PLACEHOLDER): -121 return self.expression( -122 exp.If, -123 this=this, -124 true=self._parse_conjunction(), -125 false=self._match(TokenType.COLON) and self._parse_conjunction(), -126 ) -127 -128 return this -129 -130 def _parse_placeholder(self) -> t.Optional[exp.Expression]: -131 """ -132 Parse a placeholder expression like SELECT {abc: UInt32} or FROM {table: Identifier} -133 https://clickhouse.com/docs/en/sql-reference/syntax#defining-and-using-query-parameters -134 """ -135 if not self._match(TokenType.L_BRACE): -136 return None -137 -138 this = self._parse_id_var() -139 self._match(TokenType.COLON) -140 kind = self._parse_types(check_func=False) or ( -141 self._match_text_seq("IDENTIFIER") and "Identifier" -142 ) -143 -144 if not kind: -145 self.raise_error("Expecting a placeholder type or 'Identifier' for tables") -146 elif not self._match(TokenType.R_BRACE): -147 self.raise_error("Expecting }") -148 -149 return self.expression(exp.Placeholder, this=this, kind=kind) -150 -151 def _parse_in(self, this: t.Optional[exp.Expression], is_global: bool = False) -> exp.In: -152 this = super()._parse_in(this) -153 this.set("is_global", is_global) -154 return this -155 -156 def _parse_table( -157 self, schema: bool = False, alias_tokens: t.Optional[t.Collection[TokenType]] = None -158 ) -> t.Optional[exp.Expression]: -159 this = super()._parse_table(schema=schema, alias_tokens=alias_tokens) -160 -161 if self._match(TokenType.FINAL): -162 this = self.expression(exp.Final, this=this) -163 -164 return this -165 -166 def _parse_position(self, haystack_first: bool = False) -> exp.Expression: -167 return super()._parse_position(haystack_first=True) -168 -169 # https://clickhouse.com/docs/en/sql-reference/statements/select/with/ -170 def _parse_cte(self) -> exp.Expression: -171 index = self._index -172 try: -173 # WITH <identifier> AS <subquery expression> -174 return super()._parse_cte() -175 except ParseError: -176 # WITH <expression> AS <identifier> -177 self._retreat(index) -178 statement = self._parse_statement() -179 -180 if statement and isinstance(statement.this, exp.Alias): -181 self.raise_error("Expected CTE to have alias") -182 -183 return self.expression(exp.CTE, this=statement, alias=statement and statement.this) -184 -185 def _parse_join_parts( -186 self, -187 ) -> t.Tuple[t.Optional[Token], t.Optional[Token], t.Optional[Token]]: -188 is_global = self._match(TokenType.GLOBAL) and self._prev -189 kind_pre = self._match_set(self.JOIN_KINDS, advance=False) and self._prev -190 if kind_pre: -191 kind = self._match_set(self.JOIN_KINDS) and self._prev -192 side = self._match_set(self.JOIN_SIDES) and self._prev -193 return is_global, side, kind -194 return ( -195 is_global, -196 self._match_set(self.JOIN_SIDES) and self._prev, -197 self._match_set(self.JOIN_KINDS) and self._prev, -198 ) -199 -200 def _parse_join(self, skip_join_token: bool = False) -> t.Optional[exp.Expression]: -201 join = super()._parse_join(skip_join_token) + 24 NORMALIZE_FUNCTIONS: bool | str = False + 25 NULL_ORDERING = "nulls_are_last" + 26 STRICT_STRING_CONCAT = True + 27 + 28 class Tokenizer(tokens.Tokenizer): + 29 COMMENTS = ["--", "#", "#!", ("/*", "*/")] + 30 IDENTIFIERS = ['"', "`"] + 31 STRING_ESCAPES = ["'", "\\"] + 32 BIT_STRINGS = [("0b", "")] + 33 HEX_STRINGS = [("0x", ""), ("0X", "")] + 34 + 35 KEYWORDS = { + 36 **tokens.Tokenizer.KEYWORDS, + 37 "ATTACH": TokenType.COMMAND, + 38 "DATETIME64": TokenType.DATETIME64, + 39 "DICTIONARY": TokenType.DICTIONARY, + 40 "FINAL": TokenType.FINAL, + 41 "FLOAT32": TokenType.FLOAT, + 42 "FLOAT64": TokenType.DOUBLE, + 43 "GLOBAL": TokenType.GLOBAL, + 44 "INT128": TokenType.INT128, + 45 "INT16": TokenType.SMALLINT, + 46 "INT256": TokenType.INT256, + 47 "INT32": TokenType.INT, + 48 "INT64": TokenType.BIGINT, + 49 "INT8": TokenType.TINYINT, + 50 "MAP": TokenType.MAP, + 51 "TUPLE": TokenType.STRUCT, + 52 "UINT128": TokenType.UINT128, + 53 "UINT16": TokenType.USMALLINT, + 54 "UINT256": TokenType.UINT256, + 55 "UINT32": TokenType.UINT, + 56 "UINT64": TokenType.UBIGINT, + 57 "UINT8": TokenType.UTINYINT, + 58 } + 59 + 60 class Parser(parser.Parser): + 61 FUNCTIONS = { + 62 **parser.Parser.FUNCTIONS, + 63 "ANY": exp.AnyValue.from_arg_list, + 64 "MAP": parse_var_map, + 65 "MATCH": exp.RegexpLike.from_arg_list, + 66 "UNIQ": exp.ApproxDistinct.from_arg_list, + 67 } + 68 + 69 FUNCTIONS_WITH_ALIASED_ARGS = {*parser.Parser.FUNCTIONS_WITH_ALIASED_ARGS, "TUPLE"} + 70 + 71 FUNCTION_PARSERS = { + 72 **parser.Parser.FUNCTION_PARSERS, + 73 "QUANTILE": lambda self: self._parse_quantile(), + 74 } + 75 + 76 FUNCTION_PARSERS.pop("MATCH") + 77 + 78 NO_PAREN_FUNCTION_PARSERS = parser.Parser.NO_PAREN_FUNCTION_PARSERS.copy() + 79 NO_PAREN_FUNCTION_PARSERS.pop(TokenType.ANY) + 80 + 81 RANGE_PARSERS = { + 82 **parser.Parser.RANGE_PARSERS, + 83 TokenType.GLOBAL: lambda self, this: self._match(TokenType.IN) + 84 and self._parse_in(this, is_global=True), + 85 } + 86 + 87 # The PLACEHOLDER entry is popped because 1) it doesn't affect Clickhouse (it corresponds to + 88 # the postgres-specific JSONBContains parser) and 2) it makes parsing the ternary op simpler. + 89 COLUMN_OPERATORS = parser.Parser.COLUMN_OPERATORS.copy() + 90 COLUMN_OPERATORS.pop(TokenType.PLACEHOLDER) + 91 + 92 JOIN_KINDS = { + 93 *parser.Parser.JOIN_KINDS, + 94 TokenType.ANY, + 95 TokenType.ASOF, + 96 TokenType.ANTI, + 97 TokenType.SEMI, + 98 } + 99 +100 TABLE_ALIAS_TOKENS = {*parser.Parser.TABLE_ALIAS_TOKENS} - { +101 TokenType.ANY, +102 TokenType.SEMI, +103 TokenType.ANTI, +104 TokenType.SETTINGS, +105 TokenType.FORMAT, +106 } +107 +108 LOG_DEFAULTS_TO_LN = True +109 +110 QUERY_MODIFIER_PARSERS = { +111 **parser.Parser.QUERY_MODIFIER_PARSERS, +112 "settings": lambda self: self._parse_csv(self._parse_conjunction) +113 if self._match(TokenType.SETTINGS) +114 else None, +115 "format": lambda self: self._parse_id_var() if self._match(TokenType.FORMAT) else None, +116 } +117 +118 def _parse_conjunction(self) -> t.Optional[exp.Expression]: +119 this = super()._parse_conjunction() +120 +121 if self._match(TokenType.PLACEHOLDER): +122 return self.expression( +123 exp.If, +124 this=this, +125 true=self._parse_conjunction(), +126 false=self._match(TokenType.COLON) and self._parse_conjunction(), +127 ) +128 +129 return this +130 +131 def _parse_placeholder(self) -> t.Optional[exp.Expression]: +132 """ +133 Parse a placeholder expression like SELECT {abc: UInt32} or FROM {table: Identifier} +134 https://clickhouse.com/docs/en/sql-reference/syntax#defining-and-using-query-parameters +135 """ +136 if not self._match(TokenType.L_BRACE): +137 return None +138 +139 this = self._parse_id_var() +140 self._match(TokenType.COLON) +141 kind = self._parse_types(check_func=False) or ( +142 self._match_text_seq("IDENTIFIER") and "Identifier" +143 ) +144 +145 if not kind: +146 self.raise_error("Expecting a placeholder type or 'Identifier' for tables") +147 elif not self._match(TokenType.R_BRACE): +148 self.raise_error("Expecting }") +149 +150 return self.expression(exp.Placeholder, this=this, kind=kind) +151 +152 def _parse_in(self, this: t.Optional[exp.Expression], is_global: bool = False) -> exp.In: +153 this = super()._parse_in(this) +154 this.set("is_global", is_global) +155 return this +156 +157 def _parse_table( +158 self, schema: bool = False, alias_tokens: t.Optional[t.Collection[TokenType]] = None +159 ) -> t.Optional[exp.Expression]: +160 this = super()._parse_table(schema=schema, alias_tokens=alias_tokens) +161 +162 if self._match(TokenType.FINAL): +163 this = self.expression(exp.Final, this=this) +164 +165 return this +166 +167 def _parse_position(self, haystack_first: bool = False) -> exp.StrPosition: +168 return super()._parse_position(haystack_first=True) +169 +170 # https://clickhouse.com/docs/en/sql-reference/statements/select/with/ +171 def _parse_cte(self) -> exp.CTE: +172 index = self._index +173 try: +174 # WITH <identifier> AS <subquery expression> +175 return super()._parse_cte() +176 except ParseError: +177 # WITH <expression> AS <identifier> +178 self._retreat(index) +179 statement = self._parse_statement() +180 +181 if statement and isinstance(statement.this, exp.Alias): +182 self.raise_error("Expected CTE to have alias") +183 +184 return self.expression(exp.CTE, this=statement, alias=statement and statement.this) +185 +186 def _parse_join_parts( +187 self, +188 ) -> t.Tuple[t.Optional[Token], t.Optional[Token], t.Optional[Token]]: +189 is_global = self._match(TokenType.GLOBAL) and self._prev +190 kind_pre = self._match_set(self.JOIN_KINDS, advance=False) and self._prev +191 +192 if kind_pre: +193 kind = self._match_set(self.JOIN_KINDS) and self._prev +194 side = self._match_set(self.JOIN_SIDES) and self._prev +195 return is_global, side, kind +196 +197 return ( +198 is_global, +199 self._match_set(self.JOIN_SIDES) and self._prev, +200 self._match_set(self.JOIN_KINDS) and self._prev, +201 ) 202 -203 if join: -204 join.set("global", join.args.pop("method", None)) -205 return join -206 -207 def _parse_function( -208 self, functions: t.Optional[t.Dict[str, t.Callable]] = None, anonymous: bool = False -209 ) -> t.Optional[exp.Expression]: -210 func = super()._parse_function(functions, anonymous) -211 -212 if isinstance(func, exp.Anonymous): -213 params = self._parse_func_params(func) -214 -215 if params: -216 return self.expression( -217 exp.ParameterizedAgg, -218 this=func.this, -219 expressions=func.expressions, -220 params=params, -221 ) +203 def _parse_join(self, skip_join_token: bool = False) -> t.Optional[exp.Join]: +204 join = super()._parse_join(skip_join_token) +205 +206 if join: +207 join.set("global", join.args.pop("method", None)) +208 return join +209 +210 def _parse_function( +211 self, +212 functions: t.Optional[t.Dict[str, t.Callable]] = None, +213 anonymous: bool = False, +214 optional_parens: bool = True, +215 ) -> t.Optional[exp.Expression]: +216 func = super()._parse_function( +217 functions=functions, anonymous=anonymous, optional_parens=optional_parens +218 ) +219 +220 if isinstance(func, exp.Anonymous): +221 params = self._parse_func_params(func) 222 -223 return func -224 -225 def _parse_func_params( -226 self, this: t.Optional[exp.Func] = None -227 ) -> t.Optional[t.List[t.Optional[exp.Expression]]]: -228 if self._match_pair(TokenType.R_PAREN, TokenType.L_PAREN): -229 return self._parse_csv(self._parse_lambda) -230 if self._match(TokenType.L_PAREN): -231 params = self._parse_csv(self._parse_lambda) -232 self._match_r_paren(this) -233 return params -234 return None -235 -236 def _parse_quantile(self) -> exp.Quantile: -237 this = self._parse_lambda() -238 params = self._parse_func_params() -239 if params: -240 return self.expression(exp.Quantile, this=params[0], quantile=this) -241 return self.expression(exp.Quantile, this=this, quantile=exp.Literal.number(0.5)) -242 -243 def _parse_wrapped_id_vars( -244 self, optional: bool = False -245 ) -> t.List[t.Optional[exp.Expression]]: -246 return super()._parse_wrapped_id_vars(optional=True) -247 -248 def _parse_primary_key( -249 self, wrapped_optional: bool = False, in_props: bool = False -250 ) -> exp.Expression: -251 return super()._parse_primary_key( -252 wrapped_optional=wrapped_optional or in_props, in_props=in_props -253 ) -254 -255 class Generator(generator.Generator): -256 STRUCT_DELIMITER = ("(", ")") +223 if params: +224 return self.expression( +225 exp.ParameterizedAgg, +226 this=func.this, +227 expressions=func.expressions, +228 params=params, +229 ) +230 +231 return func +232 +233 def _parse_func_params( +234 self, this: t.Optional[exp.Func] = None +235 ) -> t.Optional[t.List[t.Optional[exp.Expression]]]: +236 if self._match_pair(TokenType.R_PAREN, TokenType.L_PAREN): +237 return self._parse_csv(self._parse_lambda) +238 +239 if self._match(TokenType.L_PAREN): +240 params = self._parse_csv(self._parse_lambda) +241 self._match_r_paren(this) +242 return params +243 +244 return None +245 +246 def _parse_quantile(self) -> exp.Quantile: +247 this = self._parse_lambda() +248 params = self._parse_func_params() +249 if params: +250 return self.expression(exp.Quantile, this=params[0], quantile=this) +251 return self.expression(exp.Quantile, this=this, quantile=exp.Literal.number(0.5)) +252 +253 def _parse_wrapped_id_vars( +254 self, optional: bool = False +255 ) -> t.List[t.Optional[exp.Expression]]: +256 return super()._parse_wrapped_id_vars(optional=True) 257 -258 TYPE_MAPPING = { -259 **generator.Generator.TYPE_MAPPING, -260 exp.DataType.Type.ARRAY: "Array", -261 exp.DataType.Type.BIGINT: "Int64", -262 exp.DataType.Type.DATETIME64: "DateTime64", -263 exp.DataType.Type.DOUBLE: "Float64", -264 exp.DataType.Type.FLOAT: "Float32", -265 exp.DataType.Type.INT: "Int32", -266 exp.DataType.Type.INT128: "Int128", -267 exp.DataType.Type.INT256: "Int256", -268 exp.DataType.Type.MAP: "Map", -269 exp.DataType.Type.NULLABLE: "Nullable", -270 exp.DataType.Type.SMALLINT: "Int16", -271 exp.DataType.Type.STRUCT: "Tuple", -272 exp.DataType.Type.TINYINT: "Int8", -273 exp.DataType.Type.UBIGINT: "UInt64", -274 exp.DataType.Type.UINT: "UInt32", -275 exp.DataType.Type.UINT128: "UInt128", -276 exp.DataType.Type.UINT256: "UInt256", -277 exp.DataType.Type.USMALLINT: "UInt16", -278 exp.DataType.Type.UTINYINT: "UInt8", -279 } -280 -281 TRANSFORMS = { -282 **generator.Generator.TRANSFORMS, -283 exp.AnyValue: rename_func("any"), -284 exp.ApproxDistinct: rename_func("uniq"), -285 exp.Array: inline_array_sql, -286 exp.CastToStrType: rename_func("CAST"), -287 exp.Final: lambda self, e: f"{self.sql(e, 'this')} FINAL", -288 exp.Map: lambda self, e: _lower_func(var_map_sql(self, e)), -289 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", -290 exp.Pivot: no_pivot_sql, -291 exp.Quantile: lambda self, e: self.func("quantile", e.args.get("quantile")) -292 + f"({self.sql(e, 'this')})", -293 exp.RegexpLike: lambda self, e: f"match({self.format_args(e.this, e.expression)})", -294 exp.StrPosition: lambda self, e: f"position({self.format_args(e.this, e.args.get('substr'), e.args.get('position'))})", -295 exp.VarMap: lambda self, e: _lower_func(var_map_sql(self, e)), -296 } -297 -298 PROPERTIES_LOCATION = { -299 **generator.Generator.PROPERTIES_LOCATION, -300 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, -301 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, -302 } -303 -304 JOIN_HINTS = False -305 TABLE_HINTS = False -306 EXPLICIT_UNION = True -307 GROUPINGS_SEP = "" -308 -309 def cte_sql(self, expression: exp.CTE) -> str: -310 if isinstance(expression.this, exp.Alias): -311 return self.sql(expression, "this") -312 -313 return super().cte_sql(expression) -314 -315 def after_limit_modifiers(self, expression: exp.Expression) -> t.List[str]: -316 return super().after_limit_modifiers(expression) + [ -317 self.seg("SETTINGS ") + self.expressions(expression, key="settings", flat=True) -318 if expression.args.get("settings") -319 else "", -320 self.seg("FORMAT ") + self.sql(expression, "format") -321 if expression.args.get("format") -322 else "", -323 ] +258 def _parse_primary_key( +259 self, wrapped_optional: bool = False, in_props: bool = False +260 ) -> exp.PrimaryKeyColumnConstraint | exp.PrimaryKey: +261 return super()._parse_primary_key( +262 wrapped_optional=wrapped_optional or in_props, in_props=in_props +263 ) +264 +265 def _parse_on_property(self) -> t.Optional[exp.Expression]: +266 index = self._index +267 if self._match_text_seq("CLUSTER"): +268 this = self._parse_id_var() +269 if this: +270 return self.expression(exp.OnCluster, this=this) +271 else: +272 self._retreat(index) +273 return None +274 +275 class Generator(generator.Generator): +276 STRUCT_DELIMITER = ("(", ")") +277 +278 TYPE_MAPPING = { +279 **generator.Generator.TYPE_MAPPING, +280 exp.DataType.Type.ARRAY: "Array", +281 exp.DataType.Type.BIGINT: "Int64", +282 exp.DataType.Type.DATETIME64: "DateTime64", +283 exp.DataType.Type.DOUBLE: "Float64", +284 exp.DataType.Type.FLOAT: "Float32", +285 exp.DataType.Type.INT: "Int32", +286 exp.DataType.Type.INT128: "Int128", +287 exp.DataType.Type.INT256: "Int256", +288 exp.DataType.Type.MAP: "Map", +289 exp.DataType.Type.NULLABLE: "Nullable", +290 exp.DataType.Type.SMALLINT: "Int16", +291 exp.DataType.Type.STRUCT: "Tuple", +292 exp.DataType.Type.TINYINT: "Int8", +293 exp.DataType.Type.UBIGINT: "UInt64", +294 exp.DataType.Type.UINT: "UInt32", +295 exp.DataType.Type.UINT128: "UInt128", +296 exp.DataType.Type.UINT256: "UInt256", +297 exp.DataType.Type.USMALLINT: "UInt16", +298 exp.DataType.Type.UTINYINT: "UInt8", +299 } +300 +301 TRANSFORMS = { +302 **generator.Generator.TRANSFORMS, +303 exp.AnyValue: rename_func("any"), +304 exp.ApproxDistinct: rename_func("uniq"), +305 exp.Array: inline_array_sql, +306 exp.CastToStrType: rename_func("CAST"), +307 exp.Final: lambda self, e: f"{self.sql(e, 'this')} FINAL", +308 exp.Map: lambda self, e: _lower_func(var_map_sql(self, e)), +309 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", +310 exp.Pivot: no_pivot_sql, +311 exp.Quantile: lambda self, e: self.func("quantile", e.args.get("quantile")) +312 + f"({self.sql(e, 'this')})", +313 exp.RegexpLike: lambda self, e: f"match({self.format_args(e.this, e.expression)})", +314 exp.StrPosition: lambda self, e: f"position({self.format_args(e.this, e.args.get('substr'), e.args.get('position'))})", +315 exp.VarMap: lambda self, e: _lower_func(var_map_sql(self, e)), +316 } +317 +318 PROPERTIES_LOCATION = { +319 **generator.Generator.PROPERTIES_LOCATION, +320 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, +321 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, +322 exp.OnCluster: exp.Properties.Location.POST_NAME, +323 } 324 -325 def parameterizedagg_sql(self, expression: exp.Anonymous) -> str: -326 params = self.expressions(expression, "params", flat=True) -327 return self.func(expression.name, *expression.expressions) + f"({params})" -328 -329 def placeholder_sql(self, expression: exp.Placeholder) -> str: -330 return f"{{{expression.name}: {self.sql(expression, 'kind')}}}" +325 JOIN_HINTS = False +326 TABLE_HINTS = False +327 EXPLICIT_UNION = True +328 GROUPINGS_SEP = "" +329 +330 # there's no list in docs, but it can be found in Clickhouse code +331 # see `ClickHouse/src/Parsers/ParserCreate*.cpp` +332 ON_CLUSTER_TARGETS = { +333 "DATABASE", +334 "TABLE", +335 "VIEW", +336 "DICTIONARY", +337 "INDEX", +338 "FUNCTION", +339 "NAMED COLLECTION", +340 } +341 +342 def safeconcat_sql(self, expression: exp.SafeConcat) -> str: +343 # Clickhouse errors out if we try to cast a NULL value to TEXT +344 return self.func( +345 "CONCAT", +346 *[ +347 exp.func("if", e.is_(exp.null()), e, exp.cast(e, "text")) +348 for e in expression.expressions +349 ], +350 ) +351 +352 def cte_sql(self, expression: exp.CTE) -> str: +353 if isinstance(expression.this, exp.Alias): +354 return self.sql(expression, "this") +355 +356 return super().cte_sql(expression) +357 +358 def after_limit_modifiers(self, expression: exp.Expression) -> t.List[str]: +359 return super().after_limit_modifiers(expression) + [ +360 self.seg("SETTINGS ") + self.expressions(expression, key="settings", flat=True) +361 if expression.args.get("settings") +362 else "", +363 self.seg("FORMAT ") + self.sql(expression, "format") +364 if expression.args.get("format") +365 else "", +366 ] +367 +368 def parameterizedagg_sql(self, expression: exp.Anonymous) -> str: +369 params = self.expressions(expression, "params", flat=True) +370 return self.func(expression.name, *expression.expressions) + f"({params})" +371 +372 def placeholder_sql(self, expression: exp.Placeholder) -> str: +373 return f"{{{expression.name}: {self.sql(expression, 'kind')}}}" +374 +375 def oncluster_sql(self, expression: exp.OnCluster) -> str: +376 return f"ON CLUSTER {self.sql(expression, 'this')}" +377 +378 def createable_sql( +379 self, +380 expression: exp.Create, +381 locations: dict[exp.Properties.Location, list[exp.Property]], +382 ) -> str: +383 kind = self.sql(expression, "kind").upper() +384 if kind in self.ON_CLUSTER_TARGETS and locations.get(exp.Properties.Location.POST_NAME): +385 this_name = self.sql(expression.this, "this") +386 this_properties = " ".join( +387 [self.sql(prop) for prop in locations[exp.Properties.Location.POST_NAME]] +388 ) +389 this_schema = self.schema_columns_sql(expression.this) +390 return f"{this_name}{self.sep()}{this_properties}{self.sep()}{this_schema}" +391 return super().createable_sql(expression, locations)
    @@ -435,313 +505,374 @@
     24class ClickHouse(Dialect):
    - 25    normalize_functions = None
    - 26    null_ordering = "nulls_are_last"
    - 27
    - 28    class Tokenizer(tokens.Tokenizer):
    - 29        COMMENTS = ["--", "#", "#!", ("/*", "*/")]
    - 30        IDENTIFIERS = ['"', "`"]
    - 31        STRING_ESCAPES = ["'", "\\"]
    - 32        BIT_STRINGS = [("0b", "")]
    - 33        HEX_STRINGS = [("0x", ""), ("0X", "")]
    - 34
    - 35        KEYWORDS = {
    - 36            **tokens.Tokenizer.KEYWORDS,
    - 37            "ATTACH": TokenType.COMMAND,
    - 38            "DATETIME64": TokenType.DATETIME64,
    - 39            "DICTIONARY": TokenType.DICTIONARY,
    - 40            "FINAL": TokenType.FINAL,
    - 41            "FLOAT32": TokenType.FLOAT,
    - 42            "FLOAT64": TokenType.DOUBLE,
    - 43            "GLOBAL": TokenType.GLOBAL,
    - 44            "INT128": TokenType.INT128,
    - 45            "INT16": TokenType.SMALLINT,
    - 46            "INT256": TokenType.INT256,
    - 47            "INT32": TokenType.INT,
    - 48            "INT64": TokenType.BIGINT,
    - 49            "INT8": TokenType.TINYINT,
    - 50            "MAP": TokenType.MAP,
    - 51            "TUPLE": TokenType.STRUCT,
    - 52            "UINT128": TokenType.UINT128,
    - 53            "UINT16": TokenType.USMALLINT,
    - 54            "UINT256": TokenType.UINT256,
    - 55            "UINT32": TokenType.UINT,
    - 56            "UINT64": TokenType.UBIGINT,
    - 57            "UINT8": TokenType.UTINYINT,
    - 58        }
    - 59
    - 60    class Parser(parser.Parser):
    - 61        FUNCTIONS = {
    - 62            **parser.Parser.FUNCTIONS,
    - 63            "ANY": exp.AnyValue.from_arg_list,
    - 64            "MAP": parse_var_map,
    - 65            "MATCH": exp.RegexpLike.from_arg_list,
    - 66            "UNIQ": exp.ApproxDistinct.from_arg_list,
    - 67        }
    - 68
    - 69        FUNCTIONS_WITH_ALIASED_ARGS = {*parser.Parser.FUNCTIONS_WITH_ALIASED_ARGS, "TUPLE"}
    - 70
    - 71        FUNCTION_PARSERS = {
    - 72            **parser.Parser.FUNCTION_PARSERS,
    - 73            "QUANTILE": lambda self: self._parse_quantile(),
    - 74        }
    - 75
    - 76        FUNCTION_PARSERS.pop("MATCH")
    - 77
    - 78        NO_PAREN_FUNCTION_PARSERS = parser.Parser.NO_PAREN_FUNCTION_PARSERS.copy()
    - 79        NO_PAREN_FUNCTION_PARSERS.pop(TokenType.ANY)
    - 80
    - 81        RANGE_PARSERS = {
    - 82            **parser.Parser.RANGE_PARSERS,
    - 83            TokenType.GLOBAL: lambda self, this: self._match(TokenType.IN)
    - 84            and self._parse_in(this, is_global=True),
    - 85        }
    - 86
    - 87        # The PLACEHOLDER entry is popped because 1) it doesn't affect Clickhouse (it corresponds to
    - 88        # the postgres-specific JSONBContains parser) and 2) it makes parsing the ternary op simpler.
    - 89        COLUMN_OPERATORS = parser.Parser.COLUMN_OPERATORS.copy()
    - 90        COLUMN_OPERATORS.pop(TokenType.PLACEHOLDER)
    - 91
    - 92        JOIN_KINDS = {
    - 93            *parser.Parser.JOIN_KINDS,
    - 94            TokenType.ANY,
    - 95            TokenType.ASOF,
    - 96            TokenType.ANTI,
    - 97            TokenType.SEMI,
    - 98        }
    - 99
    -100        TABLE_ALIAS_TOKENS = {*parser.Parser.TABLE_ALIAS_TOKENS} - {
    -101            TokenType.ANY,
    -102            TokenType.SEMI,
    -103            TokenType.ANTI,
    -104            TokenType.SETTINGS,
    -105            TokenType.FORMAT,
    -106        }
    -107
    -108        LOG_DEFAULTS_TO_LN = True
    -109
    -110        QUERY_MODIFIER_PARSERS = {
    -111            **parser.Parser.QUERY_MODIFIER_PARSERS,
    -112            "settings": lambda self: self._parse_csv(self._parse_conjunction)
    -113            if self._match(TokenType.SETTINGS)
    -114            else None,
    -115            "format": lambda self: self._parse_id_var() if self._match(TokenType.FORMAT) else None,
    -116        }
    -117
    -118        def _parse_conjunction(self) -> t.Optional[exp.Expression]:
    -119            this = super()._parse_conjunction()
    -120
    -121            if self._match(TokenType.PLACEHOLDER):
    -122                return self.expression(
    -123                    exp.If,
    -124                    this=this,
    -125                    true=self._parse_conjunction(),
    -126                    false=self._match(TokenType.COLON) and self._parse_conjunction(),
    -127                )
    -128
    -129            return this
    -130
    -131        def _parse_placeholder(self) -> t.Optional[exp.Expression]:
    -132            """
    -133            Parse a placeholder expression like SELECT {abc: UInt32} or FROM {table: Identifier}
    -134            https://clickhouse.com/docs/en/sql-reference/syntax#defining-and-using-query-parameters
    -135            """
    -136            if not self._match(TokenType.L_BRACE):
    -137                return None
    -138
    -139            this = self._parse_id_var()
    -140            self._match(TokenType.COLON)
    -141            kind = self._parse_types(check_func=False) or (
    -142                self._match_text_seq("IDENTIFIER") and "Identifier"
    -143            )
    -144
    -145            if not kind:
    -146                self.raise_error("Expecting a placeholder type or 'Identifier' for tables")
    -147            elif not self._match(TokenType.R_BRACE):
    -148                self.raise_error("Expecting }")
    -149
    -150            return self.expression(exp.Placeholder, this=this, kind=kind)
    -151
    -152        def _parse_in(self, this: t.Optional[exp.Expression], is_global: bool = False) -> exp.In:
    -153            this = super()._parse_in(this)
    -154            this.set("is_global", is_global)
    -155            return this
    -156
    -157        def _parse_table(
    -158            self, schema: bool = False, alias_tokens: t.Optional[t.Collection[TokenType]] = None
    -159        ) -> t.Optional[exp.Expression]:
    -160            this = super()._parse_table(schema=schema, alias_tokens=alias_tokens)
    -161
    -162            if self._match(TokenType.FINAL):
    -163                this = self.expression(exp.Final, this=this)
    -164
    -165            return this
    -166
    -167        def _parse_position(self, haystack_first: bool = False) -> exp.Expression:
    -168            return super()._parse_position(haystack_first=True)
    -169
    -170        # https://clickhouse.com/docs/en/sql-reference/statements/select/with/
    -171        def _parse_cte(self) -> exp.Expression:
    -172            index = self._index
    -173            try:
    -174                # WITH <identifier> AS <subquery expression>
    -175                return super()._parse_cte()
    -176            except ParseError:
    -177                # WITH <expression> AS <identifier>
    -178                self._retreat(index)
    -179                statement = self._parse_statement()
    -180
    -181                if statement and isinstance(statement.this, exp.Alias):
    -182                    self.raise_error("Expected CTE to have alias")
    -183
    -184                return self.expression(exp.CTE, this=statement, alias=statement and statement.this)
    -185
    -186        def _parse_join_parts(
    -187            self,
    -188        ) -> t.Tuple[t.Optional[Token], t.Optional[Token], t.Optional[Token]]:
    -189            is_global = self._match(TokenType.GLOBAL) and self._prev
    -190            kind_pre = self._match_set(self.JOIN_KINDS, advance=False) and self._prev
    -191            if kind_pre:
    -192                kind = self._match_set(self.JOIN_KINDS) and self._prev
    -193                side = self._match_set(self.JOIN_SIDES) and self._prev
    -194                return is_global, side, kind
    -195            return (
    -196                is_global,
    -197                self._match_set(self.JOIN_SIDES) and self._prev,
    -198                self._match_set(self.JOIN_KINDS) and self._prev,
    -199            )
    -200
    -201        def _parse_join(self, skip_join_token: bool = False) -> t.Optional[exp.Expression]:
    -202            join = super()._parse_join(skip_join_token)
    + 25    NORMALIZE_FUNCTIONS: bool | str = False
    + 26    NULL_ORDERING = "nulls_are_last"
    + 27    STRICT_STRING_CONCAT = True
    + 28
    + 29    class Tokenizer(tokens.Tokenizer):
    + 30        COMMENTS = ["--", "#", "#!", ("/*", "*/")]
    + 31        IDENTIFIERS = ['"', "`"]
    + 32        STRING_ESCAPES = ["'", "\\"]
    + 33        BIT_STRINGS = [("0b", "")]
    + 34        HEX_STRINGS = [("0x", ""), ("0X", "")]
    + 35
    + 36        KEYWORDS = {
    + 37            **tokens.Tokenizer.KEYWORDS,
    + 38            "ATTACH": TokenType.COMMAND,
    + 39            "DATETIME64": TokenType.DATETIME64,
    + 40            "DICTIONARY": TokenType.DICTIONARY,
    + 41            "FINAL": TokenType.FINAL,
    + 42            "FLOAT32": TokenType.FLOAT,
    + 43            "FLOAT64": TokenType.DOUBLE,
    + 44            "GLOBAL": TokenType.GLOBAL,
    + 45            "INT128": TokenType.INT128,
    + 46            "INT16": TokenType.SMALLINT,
    + 47            "INT256": TokenType.INT256,
    + 48            "INT32": TokenType.INT,
    + 49            "INT64": TokenType.BIGINT,
    + 50            "INT8": TokenType.TINYINT,
    + 51            "MAP": TokenType.MAP,
    + 52            "TUPLE": TokenType.STRUCT,
    + 53            "UINT128": TokenType.UINT128,
    + 54            "UINT16": TokenType.USMALLINT,
    + 55            "UINT256": TokenType.UINT256,
    + 56            "UINT32": TokenType.UINT,
    + 57            "UINT64": TokenType.UBIGINT,
    + 58            "UINT8": TokenType.UTINYINT,
    + 59        }
    + 60
    + 61    class Parser(parser.Parser):
    + 62        FUNCTIONS = {
    + 63            **parser.Parser.FUNCTIONS,
    + 64            "ANY": exp.AnyValue.from_arg_list,
    + 65            "MAP": parse_var_map,
    + 66            "MATCH": exp.RegexpLike.from_arg_list,
    + 67            "UNIQ": exp.ApproxDistinct.from_arg_list,
    + 68        }
    + 69
    + 70        FUNCTIONS_WITH_ALIASED_ARGS = {*parser.Parser.FUNCTIONS_WITH_ALIASED_ARGS, "TUPLE"}
    + 71
    + 72        FUNCTION_PARSERS = {
    + 73            **parser.Parser.FUNCTION_PARSERS,
    + 74            "QUANTILE": lambda self: self._parse_quantile(),
    + 75        }
    + 76
    + 77        FUNCTION_PARSERS.pop("MATCH")
    + 78
    + 79        NO_PAREN_FUNCTION_PARSERS = parser.Parser.NO_PAREN_FUNCTION_PARSERS.copy()
    + 80        NO_PAREN_FUNCTION_PARSERS.pop(TokenType.ANY)
    + 81
    + 82        RANGE_PARSERS = {
    + 83            **parser.Parser.RANGE_PARSERS,
    + 84            TokenType.GLOBAL: lambda self, this: self._match(TokenType.IN)
    + 85            and self._parse_in(this, is_global=True),
    + 86        }
    + 87
    + 88        # The PLACEHOLDER entry is popped because 1) it doesn't affect Clickhouse (it corresponds to
    + 89        # the postgres-specific JSONBContains parser) and 2) it makes parsing the ternary op simpler.
    + 90        COLUMN_OPERATORS = parser.Parser.COLUMN_OPERATORS.copy()
    + 91        COLUMN_OPERATORS.pop(TokenType.PLACEHOLDER)
    + 92
    + 93        JOIN_KINDS = {
    + 94            *parser.Parser.JOIN_KINDS,
    + 95            TokenType.ANY,
    + 96            TokenType.ASOF,
    + 97            TokenType.ANTI,
    + 98            TokenType.SEMI,
    + 99        }
    +100
    +101        TABLE_ALIAS_TOKENS = {*parser.Parser.TABLE_ALIAS_TOKENS} - {
    +102            TokenType.ANY,
    +103            TokenType.SEMI,
    +104            TokenType.ANTI,
    +105            TokenType.SETTINGS,
    +106            TokenType.FORMAT,
    +107        }
    +108
    +109        LOG_DEFAULTS_TO_LN = True
    +110
    +111        QUERY_MODIFIER_PARSERS = {
    +112            **parser.Parser.QUERY_MODIFIER_PARSERS,
    +113            "settings": lambda self: self._parse_csv(self._parse_conjunction)
    +114            if self._match(TokenType.SETTINGS)
    +115            else None,
    +116            "format": lambda self: self._parse_id_var() if self._match(TokenType.FORMAT) else None,
    +117        }
    +118
    +119        def _parse_conjunction(self) -> t.Optional[exp.Expression]:
    +120            this = super()._parse_conjunction()
    +121
    +122            if self._match(TokenType.PLACEHOLDER):
    +123                return self.expression(
    +124                    exp.If,
    +125                    this=this,
    +126                    true=self._parse_conjunction(),
    +127                    false=self._match(TokenType.COLON) and self._parse_conjunction(),
    +128                )
    +129
    +130            return this
    +131
    +132        def _parse_placeholder(self) -> t.Optional[exp.Expression]:
    +133            """
    +134            Parse a placeholder expression like SELECT {abc: UInt32} or FROM {table: Identifier}
    +135            https://clickhouse.com/docs/en/sql-reference/syntax#defining-and-using-query-parameters
    +136            """
    +137            if not self._match(TokenType.L_BRACE):
    +138                return None
    +139
    +140            this = self._parse_id_var()
    +141            self._match(TokenType.COLON)
    +142            kind = self._parse_types(check_func=False) or (
    +143                self._match_text_seq("IDENTIFIER") and "Identifier"
    +144            )
    +145
    +146            if not kind:
    +147                self.raise_error("Expecting a placeholder type or 'Identifier' for tables")
    +148            elif not self._match(TokenType.R_BRACE):
    +149                self.raise_error("Expecting }")
    +150
    +151            return self.expression(exp.Placeholder, this=this, kind=kind)
    +152
    +153        def _parse_in(self, this: t.Optional[exp.Expression], is_global: bool = False) -> exp.In:
    +154            this = super()._parse_in(this)
    +155            this.set("is_global", is_global)
    +156            return this
    +157
    +158        def _parse_table(
    +159            self, schema: bool = False, alias_tokens: t.Optional[t.Collection[TokenType]] = None
    +160        ) -> t.Optional[exp.Expression]:
    +161            this = super()._parse_table(schema=schema, alias_tokens=alias_tokens)
    +162
    +163            if self._match(TokenType.FINAL):
    +164                this = self.expression(exp.Final, this=this)
    +165
    +166            return this
    +167
    +168        def _parse_position(self, haystack_first: bool = False) -> exp.StrPosition:
    +169            return super()._parse_position(haystack_first=True)
    +170
    +171        # https://clickhouse.com/docs/en/sql-reference/statements/select/with/
    +172        def _parse_cte(self) -> exp.CTE:
    +173            index = self._index
    +174            try:
    +175                # WITH <identifier> AS <subquery expression>
    +176                return super()._parse_cte()
    +177            except ParseError:
    +178                # WITH <expression> AS <identifier>
    +179                self._retreat(index)
    +180                statement = self._parse_statement()
    +181
    +182                if statement and isinstance(statement.this, exp.Alias):
    +183                    self.raise_error("Expected CTE to have alias")
    +184
    +185                return self.expression(exp.CTE, this=statement, alias=statement and statement.this)
    +186
    +187        def _parse_join_parts(
    +188            self,
    +189        ) -> t.Tuple[t.Optional[Token], t.Optional[Token], t.Optional[Token]]:
    +190            is_global = self._match(TokenType.GLOBAL) and self._prev
    +191            kind_pre = self._match_set(self.JOIN_KINDS, advance=False) and self._prev
    +192
    +193            if kind_pre:
    +194                kind = self._match_set(self.JOIN_KINDS) and self._prev
    +195                side = self._match_set(self.JOIN_SIDES) and self._prev
    +196                return is_global, side, kind
    +197
    +198            return (
    +199                is_global,
    +200                self._match_set(self.JOIN_SIDES) and self._prev,
    +201                self._match_set(self.JOIN_KINDS) and self._prev,
    +202            )
     203
    -204            if join:
    -205                join.set("global", join.args.pop("method", None))
    -206            return join
    -207
    -208        def _parse_function(
    -209            self, functions: t.Optional[t.Dict[str, t.Callable]] = None, anonymous: bool = False
    -210        ) -> t.Optional[exp.Expression]:
    -211            func = super()._parse_function(functions, anonymous)
    -212
    -213            if isinstance(func, exp.Anonymous):
    -214                params = self._parse_func_params(func)
    -215
    -216                if params:
    -217                    return self.expression(
    -218                        exp.ParameterizedAgg,
    -219                        this=func.this,
    -220                        expressions=func.expressions,
    -221                        params=params,
    -222                    )
    +204        def _parse_join(self, skip_join_token: bool = False) -> t.Optional[exp.Join]:
    +205            join = super()._parse_join(skip_join_token)
    +206
    +207            if join:
    +208                join.set("global", join.args.pop("method", None))
    +209            return join
    +210
    +211        def _parse_function(
    +212            self,
    +213            functions: t.Optional[t.Dict[str, t.Callable]] = None,
    +214            anonymous: bool = False,
    +215            optional_parens: bool = True,
    +216        ) -> t.Optional[exp.Expression]:
    +217            func = super()._parse_function(
    +218                functions=functions, anonymous=anonymous, optional_parens=optional_parens
    +219            )
    +220
    +221            if isinstance(func, exp.Anonymous):
    +222                params = self._parse_func_params(func)
     223
    -224            return func
    -225
    -226        def _parse_func_params(
    -227            self, this: t.Optional[exp.Func] = None
    -228        ) -> t.Optional[t.List[t.Optional[exp.Expression]]]:
    -229            if self._match_pair(TokenType.R_PAREN, TokenType.L_PAREN):
    -230                return self._parse_csv(self._parse_lambda)
    -231            if self._match(TokenType.L_PAREN):
    -232                params = self._parse_csv(self._parse_lambda)
    -233                self._match_r_paren(this)
    -234                return params
    -235            return None
    -236
    -237        def _parse_quantile(self) -> exp.Quantile:
    -238            this = self._parse_lambda()
    -239            params = self._parse_func_params()
    -240            if params:
    -241                return self.expression(exp.Quantile, this=params[0], quantile=this)
    -242            return self.expression(exp.Quantile, this=this, quantile=exp.Literal.number(0.5))
    -243
    -244        def _parse_wrapped_id_vars(
    -245            self, optional: bool = False
    -246        ) -> t.List[t.Optional[exp.Expression]]:
    -247            return super()._parse_wrapped_id_vars(optional=True)
    -248
    -249        def _parse_primary_key(
    -250            self, wrapped_optional: bool = False, in_props: bool = False
    -251        ) -> exp.Expression:
    -252            return super()._parse_primary_key(
    -253                wrapped_optional=wrapped_optional or in_props, in_props=in_props
    -254            )
    -255
    -256    class Generator(generator.Generator):
    -257        STRUCT_DELIMITER = ("(", ")")
    +224                if params:
    +225                    return self.expression(
    +226                        exp.ParameterizedAgg,
    +227                        this=func.this,
    +228                        expressions=func.expressions,
    +229                        params=params,
    +230                    )
    +231
    +232            return func
    +233
    +234        def _parse_func_params(
    +235            self, this: t.Optional[exp.Func] = None
    +236        ) -> t.Optional[t.List[t.Optional[exp.Expression]]]:
    +237            if self._match_pair(TokenType.R_PAREN, TokenType.L_PAREN):
    +238                return self._parse_csv(self._parse_lambda)
    +239
    +240            if self._match(TokenType.L_PAREN):
    +241                params = self._parse_csv(self._parse_lambda)
    +242                self._match_r_paren(this)
    +243                return params
    +244
    +245            return None
    +246
    +247        def _parse_quantile(self) -> exp.Quantile:
    +248            this = self._parse_lambda()
    +249            params = self._parse_func_params()
    +250            if params:
    +251                return self.expression(exp.Quantile, this=params[0], quantile=this)
    +252            return self.expression(exp.Quantile, this=this, quantile=exp.Literal.number(0.5))
    +253
    +254        def _parse_wrapped_id_vars(
    +255            self, optional: bool = False
    +256        ) -> t.List[t.Optional[exp.Expression]]:
    +257            return super()._parse_wrapped_id_vars(optional=True)
     258
    -259        TYPE_MAPPING = {
    -260            **generator.Generator.TYPE_MAPPING,
    -261            exp.DataType.Type.ARRAY: "Array",
    -262            exp.DataType.Type.BIGINT: "Int64",
    -263            exp.DataType.Type.DATETIME64: "DateTime64",
    -264            exp.DataType.Type.DOUBLE: "Float64",
    -265            exp.DataType.Type.FLOAT: "Float32",
    -266            exp.DataType.Type.INT: "Int32",
    -267            exp.DataType.Type.INT128: "Int128",
    -268            exp.DataType.Type.INT256: "Int256",
    -269            exp.DataType.Type.MAP: "Map",
    -270            exp.DataType.Type.NULLABLE: "Nullable",
    -271            exp.DataType.Type.SMALLINT: "Int16",
    -272            exp.DataType.Type.STRUCT: "Tuple",
    -273            exp.DataType.Type.TINYINT: "Int8",
    -274            exp.DataType.Type.UBIGINT: "UInt64",
    -275            exp.DataType.Type.UINT: "UInt32",
    -276            exp.DataType.Type.UINT128: "UInt128",
    -277            exp.DataType.Type.UINT256: "UInt256",
    -278            exp.DataType.Type.USMALLINT: "UInt16",
    -279            exp.DataType.Type.UTINYINT: "UInt8",
    -280        }
    -281
    -282        TRANSFORMS = {
    -283            **generator.Generator.TRANSFORMS,
    -284            exp.AnyValue: rename_func("any"),
    -285            exp.ApproxDistinct: rename_func("uniq"),
    -286            exp.Array: inline_array_sql,
    -287            exp.CastToStrType: rename_func("CAST"),
    -288            exp.Final: lambda self, e: f"{self.sql(e, 'this')} FINAL",
    -289            exp.Map: lambda self, e: _lower_func(var_map_sql(self, e)),
    -290            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
    -291            exp.Pivot: no_pivot_sql,
    -292            exp.Quantile: lambda self, e: self.func("quantile", e.args.get("quantile"))
    -293            + f"({self.sql(e, 'this')})",
    -294            exp.RegexpLike: lambda self, e: f"match({self.format_args(e.this, e.expression)})",
    -295            exp.StrPosition: lambda self, e: f"position({self.format_args(e.this, e.args.get('substr'), e.args.get('position'))})",
    -296            exp.VarMap: lambda self, e: _lower_func(var_map_sql(self, e)),
    -297        }
    -298
    -299        PROPERTIES_LOCATION = {
    -300            **generator.Generator.PROPERTIES_LOCATION,
    -301            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    -302            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
    -303        }
    -304
    -305        JOIN_HINTS = False
    -306        TABLE_HINTS = False
    -307        EXPLICIT_UNION = True
    -308        GROUPINGS_SEP = ""
    -309
    -310        def cte_sql(self, expression: exp.CTE) -> str:
    -311            if isinstance(expression.this, exp.Alias):
    -312                return self.sql(expression, "this")
    -313
    -314            return super().cte_sql(expression)
    -315
    -316        def after_limit_modifiers(self, expression: exp.Expression) -> t.List[str]:
    -317            return super().after_limit_modifiers(expression) + [
    -318                self.seg("SETTINGS ") + self.expressions(expression, key="settings", flat=True)
    -319                if expression.args.get("settings")
    -320                else "",
    -321                self.seg("FORMAT ") + self.sql(expression, "format")
    -322                if expression.args.get("format")
    -323                else "",
    -324            ]
    +259        def _parse_primary_key(
    +260            self, wrapped_optional: bool = False, in_props: bool = False
    +261        ) -> exp.PrimaryKeyColumnConstraint | exp.PrimaryKey:
    +262            return super()._parse_primary_key(
    +263                wrapped_optional=wrapped_optional or in_props, in_props=in_props
    +264            )
    +265
    +266        def _parse_on_property(self) -> t.Optional[exp.Expression]:
    +267            index = self._index
    +268            if self._match_text_seq("CLUSTER"):
    +269                this = self._parse_id_var()
    +270                if this:
    +271                    return self.expression(exp.OnCluster, this=this)
    +272                else:
    +273                    self._retreat(index)
    +274            return None
    +275
    +276    class Generator(generator.Generator):
    +277        STRUCT_DELIMITER = ("(", ")")
    +278
    +279        TYPE_MAPPING = {
    +280            **generator.Generator.TYPE_MAPPING,
    +281            exp.DataType.Type.ARRAY: "Array",
    +282            exp.DataType.Type.BIGINT: "Int64",
    +283            exp.DataType.Type.DATETIME64: "DateTime64",
    +284            exp.DataType.Type.DOUBLE: "Float64",
    +285            exp.DataType.Type.FLOAT: "Float32",
    +286            exp.DataType.Type.INT: "Int32",
    +287            exp.DataType.Type.INT128: "Int128",
    +288            exp.DataType.Type.INT256: "Int256",
    +289            exp.DataType.Type.MAP: "Map",
    +290            exp.DataType.Type.NULLABLE: "Nullable",
    +291            exp.DataType.Type.SMALLINT: "Int16",
    +292            exp.DataType.Type.STRUCT: "Tuple",
    +293            exp.DataType.Type.TINYINT: "Int8",
    +294            exp.DataType.Type.UBIGINT: "UInt64",
    +295            exp.DataType.Type.UINT: "UInt32",
    +296            exp.DataType.Type.UINT128: "UInt128",
    +297            exp.DataType.Type.UINT256: "UInt256",
    +298            exp.DataType.Type.USMALLINT: "UInt16",
    +299            exp.DataType.Type.UTINYINT: "UInt8",
    +300        }
    +301
    +302        TRANSFORMS = {
    +303            **generator.Generator.TRANSFORMS,
    +304            exp.AnyValue: rename_func("any"),
    +305            exp.ApproxDistinct: rename_func("uniq"),
    +306            exp.Array: inline_array_sql,
    +307            exp.CastToStrType: rename_func("CAST"),
    +308            exp.Final: lambda self, e: f"{self.sql(e, 'this')} FINAL",
    +309            exp.Map: lambda self, e: _lower_func(var_map_sql(self, e)),
    +310            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
    +311            exp.Pivot: no_pivot_sql,
    +312            exp.Quantile: lambda self, e: self.func("quantile", e.args.get("quantile"))
    +313            + f"({self.sql(e, 'this')})",
    +314            exp.RegexpLike: lambda self, e: f"match({self.format_args(e.this, e.expression)})",
    +315            exp.StrPosition: lambda self, e: f"position({self.format_args(e.this, e.args.get('substr'), e.args.get('position'))})",
    +316            exp.VarMap: lambda self, e: _lower_func(var_map_sql(self, e)),
    +317        }
    +318
    +319        PROPERTIES_LOCATION = {
    +320            **generator.Generator.PROPERTIES_LOCATION,
    +321            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +322            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
    +323            exp.OnCluster: exp.Properties.Location.POST_NAME,
    +324        }
     325
    -326        def parameterizedagg_sql(self, expression: exp.Anonymous) -> str:
    -327            params = self.expressions(expression, "params", flat=True)
    -328            return self.func(expression.name, *expression.expressions) + f"({params})"
    -329
    -330        def placeholder_sql(self, expression: exp.Placeholder) -> str:
    -331            return f"{{{expression.name}: {self.sql(expression, 'kind')}}}"
    +326        JOIN_HINTS = False
    +327        TABLE_HINTS = False
    +328        EXPLICIT_UNION = True
    +329        GROUPINGS_SEP = ""
    +330
    +331        # there's no list in docs, but it can be found in Clickhouse code
    +332        # see `ClickHouse/src/Parsers/ParserCreate*.cpp`
    +333        ON_CLUSTER_TARGETS = {
    +334            "DATABASE",
    +335            "TABLE",
    +336            "VIEW",
    +337            "DICTIONARY",
    +338            "INDEX",
    +339            "FUNCTION",
    +340            "NAMED COLLECTION",
    +341        }
    +342
    +343        def safeconcat_sql(self, expression: exp.SafeConcat) -> str:
    +344            # Clickhouse errors out if we try to cast a NULL value to TEXT
    +345            return self.func(
    +346                "CONCAT",
    +347                *[
    +348                    exp.func("if", e.is_(exp.null()), e, exp.cast(e, "text"))
    +349                    for e in expression.expressions
    +350                ],
    +351            )
    +352
    +353        def cte_sql(self, expression: exp.CTE) -> str:
    +354            if isinstance(expression.this, exp.Alias):
    +355                return self.sql(expression, "this")
    +356
    +357            return super().cte_sql(expression)
    +358
    +359        def after_limit_modifiers(self, expression: exp.Expression) -> t.List[str]:
    +360            return super().after_limit_modifiers(expression) + [
    +361                self.seg("SETTINGS ") + self.expressions(expression, key="settings", flat=True)
    +362                if expression.args.get("settings")
    +363                else "",
    +364                self.seg("FORMAT ") + self.sql(expression, "format")
    +365                if expression.args.get("format")
    +366                else "",
    +367            ]
    +368
    +369        def parameterizedagg_sql(self, expression: exp.Anonymous) -> str:
    +370            params = self.expressions(expression, "params", flat=True)
    +371            return self.func(expression.name, *expression.expressions) + f"({params})"
    +372
    +373        def placeholder_sql(self, expression: exp.Placeholder) -> str:
    +374            return f"{{{expression.name}: {self.sql(expression, 'kind')}}}"
    +375
    +376        def oncluster_sql(self, expression: exp.OnCluster) -> str:
    +377            return f"ON CLUSTER {self.sql(expression, 'this')}"
    +378
    +379        def createable_sql(
    +380            self,
    +381            expression: exp.Create,
    +382            locations: dict[exp.Properties.Location, list[exp.Property]],
    +383        ) -> str:
    +384            kind = self.sql(expression, "kind").upper()
    +385            if kind in self.ON_CLUSTER_TARGETS and locations.get(exp.Properties.Location.POST_NAME):
    +386                this_name = self.sql(expression.this, "this")
    +387                this_properties = " ".join(
    +388                    [self.sql(prop) for prop in locations[exp.Properties.Location.POST_NAME]]
    +389                )
    +390                this_schema = self.schema_columns_sql(expression.this)
    +391                return f"{this_name}{self.sep()}{this_properties}{self.sep()}{this_schema}"
    +392            return super().createable_sql(expression, locations)
     
    @@ -776,37 +907,37 @@
    -
    28    class Tokenizer(tokens.Tokenizer):
    -29        COMMENTS = ["--", "#", "#!", ("/*", "*/")]
    -30        IDENTIFIERS = ['"', "`"]
    -31        STRING_ESCAPES = ["'", "\\"]
    -32        BIT_STRINGS = [("0b", "")]
    -33        HEX_STRINGS = [("0x", ""), ("0X", "")]
    -34
    -35        KEYWORDS = {
    -36            **tokens.Tokenizer.KEYWORDS,
    -37            "ATTACH": TokenType.COMMAND,
    -38            "DATETIME64": TokenType.DATETIME64,
    -39            "DICTIONARY": TokenType.DICTIONARY,
    -40            "FINAL": TokenType.FINAL,
    -41            "FLOAT32": TokenType.FLOAT,
    -42            "FLOAT64": TokenType.DOUBLE,
    -43            "GLOBAL": TokenType.GLOBAL,
    -44            "INT128": TokenType.INT128,
    -45            "INT16": TokenType.SMALLINT,
    -46            "INT256": TokenType.INT256,
    -47            "INT32": TokenType.INT,
    -48            "INT64": TokenType.BIGINT,
    -49            "INT8": TokenType.TINYINT,
    -50            "MAP": TokenType.MAP,
    -51            "TUPLE": TokenType.STRUCT,
    -52            "UINT128": TokenType.UINT128,
    -53            "UINT16": TokenType.USMALLINT,
    -54            "UINT256": TokenType.UINT256,
    -55            "UINT32": TokenType.UINT,
    -56            "UINT64": TokenType.UBIGINT,
    -57            "UINT8": TokenType.UTINYINT,
    -58        }
    +            
    29    class Tokenizer(tokens.Tokenizer):
    +30        COMMENTS = ["--", "#", "#!", ("/*", "*/")]
    +31        IDENTIFIERS = ['"', "`"]
    +32        STRING_ESCAPES = ["'", "\\"]
    +33        BIT_STRINGS = [("0b", "")]
    +34        HEX_STRINGS = [("0x", ""), ("0X", "")]
    +35
    +36        KEYWORDS = {
    +37            **tokens.Tokenizer.KEYWORDS,
    +38            "ATTACH": TokenType.COMMAND,
    +39            "DATETIME64": TokenType.DATETIME64,
    +40            "DICTIONARY": TokenType.DICTIONARY,
    +41            "FINAL": TokenType.FINAL,
    +42            "FLOAT32": TokenType.FLOAT,
    +43            "FLOAT64": TokenType.DOUBLE,
    +44            "GLOBAL": TokenType.GLOBAL,
    +45            "INT128": TokenType.INT128,
    +46            "INT16": TokenType.SMALLINT,
    +47            "INT256": TokenType.INT256,
    +48            "INT32": TokenType.INT,
    +49            "INT64": TokenType.BIGINT,
    +50            "INT8": TokenType.TINYINT,
    +51            "MAP": TokenType.MAP,
    +52            "TUPLE": TokenType.STRUCT,
    +53            "UINT128": TokenType.UINT128,
    +54            "UINT16": TokenType.USMALLINT,
    +55            "UINT256": TokenType.UINT256,
    +56            "UINT32": TokenType.UINT,
    +57            "UINT64": TokenType.UBIGINT,
    +58            "UINT8": TokenType.UTINYINT,
    +59        }
     
    @@ -818,6 +949,7 @@ @@ -834,225 +966,236 @@
    -
     60    class Parser(parser.Parser):
    - 61        FUNCTIONS = {
    - 62            **parser.Parser.FUNCTIONS,
    - 63            "ANY": exp.AnyValue.from_arg_list,
    - 64            "MAP": parse_var_map,
    - 65            "MATCH": exp.RegexpLike.from_arg_list,
    - 66            "UNIQ": exp.ApproxDistinct.from_arg_list,
    - 67        }
    - 68
    - 69        FUNCTIONS_WITH_ALIASED_ARGS = {*parser.Parser.FUNCTIONS_WITH_ALIASED_ARGS, "TUPLE"}
    - 70
    - 71        FUNCTION_PARSERS = {
    - 72            **parser.Parser.FUNCTION_PARSERS,
    - 73            "QUANTILE": lambda self: self._parse_quantile(),
    - 74        }
    - 75
    - 76        FUNCTION_PARSERS.pop("MATCH")
    - 77
    - 78        NO_PAREN_FUNCTION_PARSERS = parser.Parser.NO_PAREN_FUNCTION_PARSERS.copy()
    - 79        NO_PAREN_FUNCTION_PARSERS.pop(TokenType.ANY)
    - 80
    - 81        RANGE_PARSERS = {
    - 82            **parser.Parser.RANGE_PARSERS,
    - 83            TokenType.GLOBAL: lambda self, this: self._match(TokenType.IN)
    - 84            and self._parse_in(this, is_global=True),
    - 85        }
    - 86
    - 87        # The PLACEHOLDER entry is popped because 1) it doesn't affect Clickhouse (it corresponds to
    - 88        # the postgres-specific JSONBContains parser) and 2) it makes parsing the ternary op simpler.
    - 89        COLUMN_OPERATORS = parser.Parser.COLUMN_OPERATORS.copy()
    - 90        COLUMN_OPERATORS.pop(TokenType.PLACEHOLDER)
    - 91
    - 92        JOIN_KINDS = {
    - 93            *parser.Parser.JOIN_KINDS,
    - 94            TokenType.ANY,
    - 95            TokenType.ASOF,
    - 96            TokenType.ANTI,
    - 97            TokenType.SEMI,
    - 98        }
    - 99
    -100        TABLE_ALIAS_TOKENS = {*parser.Parser.TABLE_ALIAS_TOKENS} - {
    -101            TokenType.ANY,
    -102            TokenType.SEMI,
    -103            TokenType.ANTI,
    -104            TokenType.SETTINGS,
    -105            TokenType.FORMAT,
    -106        }
    -107
    -108        LOG_DEFAULTS_TO_LN = True
    -109
    -110        QUERY_MODIFIER_PARSERS = {
    -111            **parser.Parser.QUERY_MODIFIER_PARSERS,
    -112            "settings": lambda self: self._parse_csv(self._parse_conjunction)
    -113            if self._match(TokenType.SETTINGS)
    -114            else None,
    -115            "format": lambda self: self._parse_id_var() if self._match(TokenType.FORMAT) else None,
    -116        }
    -117
    -118        def _parse_conjunction(self) -> t.Optional[exp.Expression]:
    -119            this = super()._parse_conjunction()
    -120
    -121            if self._match(TokenType.PLACEHOLDER):
    -122                return self.expression(
    -123                    exp.If,
    -124                    this=this,
    -125                    true=self._parse_conjunction(),
    -126                    false=self._match(TokenType.COLON) and self._parse_conjunction(),
    -127                )
    -128
    -129            return this
    -130
    -131        def _parse_placeholder(self) -> t.Optional[exp.Expression]:
    -132            """
    -133            Parse a placeholder expression like SELECT {abc: UInt32} or FROM {table: Identifier}
    -134            https://clickhouse.com/docs/en/sql-reference/syntax#defining-and-using-query-parameters
    -135            """
    -136            if not self._match(TokenType.L_BRACE):
    -137                return None
    -138
    -139            this = self._parse_id_var()
    -140            self._match(TokenType.COLON)
    -141            kind = self._parse_types(check_func=False) or (
    -142                self._match_text_seq("IDENTIFIER") and "Identifier"
    -143            )
    -144
    -145            if not kind:
    -146                self.raise_error("Expecting a placeholder type or 'Identifier' for tables")
    -147            elif not self._match(TokenType.R_BRACE):
    -148                self.raise_error("Expecting }")
    -149
    -150            return self.expression(exp.Placeholder, this=this, kind=kind)
    -151
    -152        def _parse_in(self, this: t.Optional[exp.Expression], is_global: bool = False) -> exp.In:
    -153            this = super()._parse_in(this)
    -154            this.set("is_global", is_global)
    -155            return this
    -156
    -157        def _parse_table(
    -158            self, schema: bool = False, alias_tokens: t.Optional[t.Collection[TokenType]] = None
    -159        ) -> t.Optional[exp.Expression]:
    -160            this = super()._parse_table(schema=schema, alias_tokens=alias_tokens)
    -161
    -162            if self._match(TokenType.FINAL):
    -163                this = self.expression(exp.Final, this=this)
    -164
    -165            return this
    -166
    -167        def _parse_position(self, haystack_first: bool = False) -> exp.Expression:
    -168            return super()._parse_position(haystack_first=True)
    -169
    -170        # https://clickhouse.com/docs/en/sql-reference/statements/select/with/
    -171        def _parse_cte(self) -> exp.Expression:
    -172            index = self._index
    -173            try:
    -174                # WITH <identifier> AS <subquery expression>
    -175                return super()._parse_cte()
    -176            except ParseError:
    -177                # WITH <expression> AS <identifier>
    -178                self._retreat(index)
    -179                statement = self._parse_statement()
    -180
    -181                if statement and isinstance(statement.this, exp.Alias):
    -182                    self.raise_error("Expected CTE to have alias")
    -183
    -184                return self.expression(exp.CTE, this=statement, alias=statement and statement.this)
    -185
    -186        def _parse_join_parts(
    -187            self,
    -188        ) -> t.Tuple[t.Optional[Token], t.Optional[Token], t.Optional[Token]]:
    -189            is_global = self._match(TokenType.GLOBAL) and self._prev
    -190            kind_pre = self._match_set(self.JOIN_KINDS, advance=False) and self._prev
    -191            if kind_pre:
    -192                kind = self._match_set(self.JOIN_KINDS) and self._prev
    -193                side = self._match_set(self.JOIN_SIDES) and self._prev
    -194                return is_global, side, kind
    -195            return (
    -196                is_global,
    -197                self._match_set(self.JOIN_SIDES) and self._prev,
    -198                self._match_set(self.JOIN_KINDS) and self._prev,
    -199            )
    -200
    -201        def _parse_join(self, skip_join_token: bool = False) -> t.Optional[exp.Expression]:
    -202            join = super()._parse_join(skip_join_token)
    +            
     61    class Parser(parser.Parser):
    + 62        FUNCTIONS = {
    + 63            **parser.Parser.FUNCTIONS,
    + 64            "ANY": exp.AnyValue.from_arg_list,
    + 65            "MAP": parse_var_map,
    + 66            "MATCH": exp.RegexpLike.from_arg_list,
    + 67            "UNIQ": exp.ApproxDistinct.from_arg_list,
    + 68        }
    + 69
    + 70        FUNCTIONS_WITH_ALIASED_ARGS = {*parser.Parser.FUNCTIONS_WITH_ALIASED_ARGS, "TUPLE"}
    + 71
    + 72        FUNCTION_PARSERS = {
    + 73            **parser.Parser.FUNCTION_PARSERS,
    + 74            "QUANTILE": lambda self: self._parse_quantile(),
    + 75        }
    + 76
    + 77        FUNCTION_PARSERS.pop("MATCH")
    + 78
    + 79        NO_PAREN_FUNCTION_PARSERS = parser.Parser.NO_PAREN_FUNCTION_PARSERS.copy()
    + 80        NO_PAREN_FUNCTION_PARSERS.pop(TokenType.ANY)
    + 81
    + 82        RANGE_PARSERS = {
    + 83            **parser.Parser.RANGE_PARSERS,
    + 84            TokenType.GLOBAL: lambda self, this: self._match(TokenType.IN)
    + 85            and self._parse_in(this, is_global=True),
    + 86        }
    + 87
    + 88        # The PLACEHOLDER entry is popped because 1) it doesn't affect Clickhouse (it corresponds to
    + 89        # the postgres-specific JSONBContains parser) and 2) it makes parsing the ternary op simpler.
    + 90        COLUMN_OPERATORS = parser.Parser.COLUMN_OPERATORS.copy()
    + 91        COLUMN_OPERATORS.pop(TokenType.PLACEHOLDER)
    + 92
    + 93        JOIN_KINDS = {
    + 94            *parser.Parser.JOIN_KINDS,
    + 95            TokenType.ANY,
    + 96            TokenType.ASOF,
    + 97            TokenType.ANTI,
    + 98            TokenType.SEMI,
    + 99        }
    +100
    +101        TABLE_ALIAS_TOKENS = {*parser.Parser.TABLE_ALIAS_TOKENS} - {
    +102            TokenType.ANY,
    +103            TokenType.SEMI,
    +104            TokenType.ANTI,
    +105            TokenType.SETTINGS,
    +106            TokenType.FORMAT,
    +107        }
    +108
    +109        LOG_DEFAULTS_TO_LN = True
    +110
    +111        QUERY_MODIFIER_PARSERS = {
    +112            **parser.Parser.QUERY_MODIFIER_PARSERS,
    +113            "settings": lambda self: self._parse_csv(self._parse_conjunction)
    +114            if self._match(TokenType.SETTINGS)
    +115            else None,
    +116            "format": lambda self: self._parse_id_var() if self._match(TokenType.FORMAT) else None,
    +117        }
    +118
    +119        def _parse_conjunction(self) -> t.Optional[exp.Expression]:
    +120            this = super()._parse_conjunction()
    +121
    +122            if self._match(TokenType.PLACEHOLDER):
    +123                return self.expression(
    +124                    exp.If,
    +125                    this=this,
    +126                    true=self._parse_conjunction(),
    +127                    false=self._match(TokenType.COLON) and self._parse_conjunction(),
    +128                )
    +129
    +130            return this
    +131
    +132        def _parse_placeholder(self) -> t.Optional[exp.Expression]:
    +133            """
    +134            Parse a placeholder expression like SELECT {abc: UInt32} or FROM {table: Identifier}
    +135            https://clickhouse.com/docs/en/sql-reference/syntax#defining-and-using-query-parameters
    +136            """
    +137            if not self._match(TokenType.L_BRACE):
    +138                return None
    +139
    +140            this = self._parse_id_var()
    +141            self._match(TokenType.COLON)
    +142            kind = self._parse_types(check_func=False) or (
    +143                self._match_text_seq("IDENTIFIER") and "Identifier"
    +144            )
    +145
    +146            if not kind:
    +147                self.raise_error("Expecting a placeholder type or 'Identifier' for tables")
    +148            elif not self._match(TokenType.R_BRACE):
    +149                self.raise_error("Expecting }")
    +150
    +151            return self.expression(exp.Placeholder, this=this, kind=kind)
    +152
    +153        def _parse_in(self, this: t.Optional[exp.Expression], is_global: bool = False) -> exp.In:
    +154            this = super()._parse_in(this)
    +155            this.set("is_global", is_global)
    +156            return this
    +157
    +158        def _parse_table(
    +159            self, schema: bool = False, alias_tokens: t.Optional[t.Collection[TokenType]] = None
    +160        ) -> t.Optional[exp.Expression]:
    +161            this = super()._parse_table(schema=schema, alias_tokens=alias_tokens)
    +162
    +163            if self._match(TokenType.FINAL):
    +164                this = self.expression(exp.Final, this=this)
    +165
    +166            return this
    +167
    +168        def _parse_position(self, haystack_first: bool = False) -> exp.StrPosition:
    +169            return super()._parse_position(haystack_first=True)
    +170
    +171        # https://clickhouse.com/docs/en/sql-reference/statements/select/with/
    +172        def _parse_cte(self) -> exp.CTE:
    +173            index = self._index
    +174            try:
    +175                # WITH <identifier> AS <subquery expression>
    +176                return super()._parse_cte()
    +177            except ParseError:
    +178                # WITH <expression> AS <identifier>
    +179                self._retreat(index)
    +180                statement = self._parse_statement()
    +181
    +182                if statement and isinstance(statement.this, exp.Alias):
    +183                    self.raise_error("Expected CTE to have alias")
    +184
    +185                return self.expression(exp.CTE, this=statement, alias=statement and statement.this)
    +186
    +187        def _parse_join_parts(
    +188            self,
    +189        ) -> t.Tuple[t.Optional[Token], t.Optional[Token], t.Optional[Token]]:
    +190            is_global = self._match(TokenType.GLOBAL) and self._prev
    +191            kind_pre = self._match_set(self.JOIN_KINDS, advance=False) and self._prev
    +192
    +193            if kind_pre:
    +194                kind = self._match_set(self.JOIN_KINDS) and self._prev
    +195                side = self._match_set(self.JOIN_SIDES) and self._prev
    +196                return is_global, side, kind
    +197
    +198            return (
    +199                is_global,
    +200                self._match_set(self.JOIN_SIDES) and self._prev,
    +201                self._match_set(self.JOIN_KINDS) and self._prev,
    +202            )
     203
    -204            if join:
    -205                join.set("global", join.args.pop("method", None))
    -206            return join
    -207
    -208        def _parse_function(
    -209            self, functions: t.Optional[t.Dict[str, t.Callable]] = None, anonymous: bool = False
    -210        ) -> t.Optional[exp.Expression]:
    -211            func = super()._parse_function(functions, anonymous)
    -212
    -213            if isinstance(func, exp.Anonymous):
    -214                params = self._parse_func_params(func)
    -215
    -216                if params:
    -217                    return self.expression(
    -218                        exp.ParameterizedAgg,
    -219                        this=func.this,
    -220                        expressions=func.expressions,
    -221                        params=params,
    -222                    )
    +204        def _parse_join(self, skip_join_token: bool = False) -> t.Optional[exp.Join]:
    +205            join = super()._parse_join(skip_join_token)
    +206
    +207            if join:
    +208                join.set("global", join.args.pop("method", None))
    +209            return join
    +210
    +211        def _parse_function(
    +212            self,
    +213            functions: t.Optional[t.Dict[str, t.Callable]] = None,
    +214            anonymous: bool = False,
    +215            optional_parens: bool = True,
    +216        ) -> t.Optional[exp.Expression]:
    +217            func = super()._parse_function(
    +218                functions=functions, anonymous=anonymous, optional_parens=optional_parens
    +219            )
    +220
    +221            if isinstance(func, exp.Anonymous):
    +222                params = self._parse_func_params(func)
     223
    -224            return func
    -225
    -226        def _parse_func_params(
    -227            self, this: t.Optional[exp.Func] = None
    -228        ) -> t.Optional[t.List[t.Optional[exp.Expression]]]:
    -229            if self._match_pair(TokenType.R_PAREN, TokenType.L_PAREN):
    -230                return self._parse_csv(self._parse_lambda)
    -231            if self._match(TokenType.L_PAREN):
    -232                params = self._parse_csv(self._parse_lambda)
    -233                self._match_r_paren(this)
    -234                return params
    -235            return None
    -236
    -237        def _parse_quantile(self) -> exp.Quantile:
    -238            this = self._parse_lambda()
    -239            params = self._parse_func_params()
    -240            if params:
    -241                return self.expression(exp.Quantile, this=params[0], quantile=this)
    -242            return self.expression(exp.Quantile, this=this, quantile=exp.Literal.number(0.5))
    -243
    -244        def _parse_wrapped_id_vars(
    -245            self, optional: bool = False
    -246        ) -> t.List[t.Optional[exp.Expression]]:
    -247            return super()._parse_wrapped_id_vars(optional=True)
    -248
    -249        def _parse_primary_key(
    -250            self, wrapped_optional: bool = False, in_props: bool = False
    -251        ) -> exp.Expression:
    -252            return super()._parse_primary_key(
    -253                wrapped_optional=wrapped_optional or in_props, in_props=in_props
    -254            )
    +224                if params:
    +225                    return self.expression(
    +226                        exp.ParameterizedAgg,
    +227                        this=func.this,
    +228                        expressions=func.expressions,
    +229                        params=params,
    +230                    )
    +231
    +232            return func
    +233
    +234        def _parse_func_params(
    +235            self, this: t.Optional[exp.Func] = None
    +236        ) -> t.Optional[t.List[t.Optional[exp.Expression]]]:
    +237            if self._match_pair(TokenType.R_PAREN, TokenType.L_PAREN):
    +238                return self._parse_csv(self._parse_lambda)
    +239
    +240            if self._match(TokenType.L_PAREN):
    +241                params = self._parse_csv(self._parse_lambda)
    +242                self._match_r_paren(this)
    +243                return params
    +244
    +245            return None
    +246
    +247        def _parse_quantile(self) -> exp.Quantile:
    +248            this = self._parse_lambda()
    +249            params = self._parse_func_params()
    +250            if params:
    +251                return self.expression(exp.Quantile, this=params[0], quantile=this)
    +252            return self.expression(exp.Quantile, this=this, quantile=exp.Literal.number(0.5))
    +253
    +254        def _parse_wrapped_id_vars(
    +255            self, optional: bool = False
    +256        ) -> t.List[t.Optional[exp.Expression]]:
    +257            return super()._parse_wrapped_id_vars(optional=True)
    +258
    +259        def _parse_primary_key(
    +260            self, wrapped_optional: bool = False, in_props: bool = False
    +261        ) -> exp.PrimaryKeyColumnConstraint | exp.PrimaryKey:
    +262            return super()._parse_primary_key(
    +263                wrapped_optional=wrapped_optional or in_props, in_props=in_props
    +264            )
    +265
    +266        def _parse_on_property(self) -> t.Optional[exp.Expression]:
    +267            index = self._index
    +268            if self._match_text_seq("CLUSTER"):
    +269                this = self._parse_id_var()
    +270                if this:
    +271                    return self.expression(exp.OnCluster, this=this)
    +272                else:
    +273                    self._retreat(index)
    +274            return None
     
    -

    Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces -a parsed syntax tree.

    +

    Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

    Arguments:
      -
    • error_level: the desired error level. +
    • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
    • -
    • error_message_context: determines the amount of context to capture from a +
    • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). -Default: 50.
    • -
    • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. -Default: 0
    • -
    • alias_post_tablesample: If the table alias comes after tablesample. -Default: False
    • +Default: 100
    • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
    • -
    • null_ordering: Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    @@ -1085,128 +1228,154 @@ Default: "nulls_are_small"
    -
    256    class Generator(generator.Generator):
    -257        STRUCT_DELIMITER = ("(", ")")
    -258
    -259        TYPE_MAPPING = {
    -260            **generator.Generator.TYPE_MAPPING,
    -261            exp.DataType.Type.ARRAY: "Array",
    -262            exp.DataType.Type.BIGINT: "Int64",
    -263            exp.DataType.Type.DATETIME64: "DateTime64",
    -264            exp.DataType.Type.DOUBLE: "Float64",
    -265            exp.DataType.Type.FLOAT: "Float32",
    -266            exp.DataType.Type.INT: "Int32",
    -267            exp.DataType.Type.INT128: "Int128",
    -268            exp.DataType.Type.INT256: "Int256",
    -269            exp.DataType.Type.MAP: "Map",
    -270            exp.DataType.Type.NULLABLE: "Nullable",
    -271            exp.DataType.Type.SMALLINT: "Int16",
    -272            exp.DataType.Type.STRUCT: "Tuple",
    -273            exp.DataType.Type.TINYINT: "Int8",
    -274            exp.DataType.Type.UBIGINT: "UInt64",
    -275            exp.DataType.Type.UINT: "UInt32",
    -276            exp.DataType.Type.UINT128: "UInt128",
    -277            exp.DataType.Type.UINT256: "UInt256",
    -278            exp.DataType.Type.USMALLINT: "UInt16",
    -279            exp.DataType.Type.UTINYINT: "UInt8",
    -280        }
    -281
    -282        TRANSFORMS = {
    -283            **generator.Generator.TRANSFORMS,
    -284            exp.AnyValue: rename_func("any"),
    -285            exp.ApproxDistinct: rename_func("uniq"),
    -286            exp.Array: inline_array_sql,
    -287            exp.CastToStrType: rename_func("CAST"),
    -288            exp.Final: lambda self, e: f"{self.sql(e, 'this')} FINAL",
    -289            exp.Map: lambda self, e: _lower_func(var_map_sql(self, e)),
    -290            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
    -291            exp.Pivot: no_pivot_sql,
    -292            exp.Quantile: lambda self, e: self.func("quantile", e.args.get("quantile"))
    -293            + f"({self.sql(e, 'this')})",
    -294            exp.RegexpLike: lambda self, e: f"match({self.format_args(e.this, e.expression)})",
    -295            exp.StrPosition: lambda self, e: f"position({self.format_args(e.this, e.args.get('substr'), e.args.get('position'))})",
    -296            exp.VarMap: lambda self, e: _lower_func(var_map_sql(self, e)),
    -297        }
    -298
    -299        PROPERTIES_LOCATION = {
    -300            **generator.Generator.PROPERTIES_LOCATION,
    -301            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    -302            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
    -303        }
    -304
    -305        JOIN_HINTS = False
    -306        TABLE_HINTS = False
    -307        EXPLICIT_UNION = True
    -308        GROUPINGS_SEP = ""
    -309
    -310        def cte_sql(self, expression: exp.CTE) -> str:
    -311            if isinstance(expression.this, exp.Alias):
    -312                return self.sql(expression, "this")
    -313
    -314            return super().cte_sql(expression)
    -315
    -316        def after_limit_modifiers(self, expression: exp.Expression) -> t.List[str]:
    -317            return super().after_limit_modifiers(expression) + [
    -318                self.seg("SETTINGS ") + self.expressions(expression, key="settings", flat=True)
    -319                if expression.args.get("settings")
    -320                else "",
    -321                self.seg("FORMAT ") + self.sql(expression, "format")
    -322                if expression.args.get("format")
    -323                else "",
    -324            ]
    +            
    276    class Generator(generator.Generator):
    +277        STRUCT_DELIMITER = ("(", ")")
    +278
    +279        TYPE_MAPPING = {
    +280            **generator.Generator.TYPE_MAPPING,
    +281            exp.DataType.Type.ARRAY: "Array",
    +282            exp.DataType.Type.BIGINT: "Int64",
    +283            exp.DataType.Type.DATETIME64: "DateTime64",
    +284            exp.DataType.Type.DOUBLE: "Float64",
    +285            exp.DataType.Type.FLOAT: "Float32",
    +286            exp.DataType.Type.INT: "Int32",
    +287            exp.DataType.Type.INT128: "Int128",
    +288            exp.DataType.Type.INT256: "Int256",
    +289            exp.DataType.Type.MAP: "Map",
    +290            exp.DataType.Type.NULLABLE: "Nullable",
    +291            exp.DataType.Type.SMALLINT: "Int16",
    +292            exp.DataType.Type.STRUCT: "Tuple",
    +293            exp.DataType.Type.TINYINT: "Int8",
    +294            exp.DataType.Type.UBIGINT: "UInt64",
    +295            exp.DataType.Type.UINT: "UInt32",
    +296            exp.DataType.Type.UINT128: "UInt128",
    +297            exp.DataType.Type.UINT256: "UInt256",
    +298            exp.DataType.Type.USMALLINT: "UInt16",
    +299            exp.DataType.Type.UTINYINT: "UInt8",
    +300        }
    +301
    +302        TRANSFORMS = {
    +303            **generator.Generator.TRANSFORMS,
    +304            exp.AnyValue: rename_func("any"),
    +305            exp.ApproxDistinct: rename_func("uniq"),
    +306            exp.Array: inline_array_sql,
    +307            exp.CastToStrType: rename_func("CAST"),
    +308            exp.Final: lambda self, e: f"{self.sql(e, 'this')} FINAL",
    +309            exp.Map: lambda self, e: _lower_func(var_map_sql(self, e)),
    +310            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
    +311            exp.Pivot: no_pivot_sql,
    +312            exp.Quantile: lambda self, e: self.func("quantile", e.args.get("quantile"))
    +313            + f"({self.sql(e, 'this')})",
    +314            exp.RegexpLike: lambda self, e: f"match({self.format_args(e.this, e.expression)})",
    +315            exp.StrPosition: lambda self, e: f"position({self.format_args(e.this, e.args.get('substr'), e.args.get('position'))})",
    +316            exp.VarMap: lambda self, e: _lower_func(var_map_sql(self, e)),
    +317        }
    +318
    +319        PROPERTIES_LOCATION = {
    +320            **generator.Generator.PROPERTIES_LOCATION,
    +321            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +322            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
    +323            exp.OnCluster: exp.Properties.Location.POST_NAME,
    +324        }
     325
    -326        def parameterizedagg_sql(self, expression: exp.Anonymous) -> str:
    -327            params = self.expressions(expression, "params", flat=True)
    -328            return self.func(expression.name, *expression.expressions) + f"({params})"
    -329
    -330        def placeholder_sql(self, expression: exp.Placeholder) -> str:
    -331            return f"{{{expression.name}: {self.sql(expression, 'kind')}}}"
    +326        JOIN_HINTS = False
    +327        TABLE_HINTS = False
    +328        EXPLICIT_UNION = True
    +329        GROUPINGS_SEP = ""
    +330
    +331        # there's no list in docs, but it can be found in Clickhouse code
    +332        # see `ClickHouse/src/Parsers/ParserCreate*.cpp`
    +333        ON_CLUSTER_TARGETS = {
    +334            "DATABASE",
    +335            "TABLE",
    +336            "VIEW",
    +337            "DICTIONARY",
    +338            "INDEX",
    +339            "FUNCTION",
    +340            "NAMED COLLECTION",
    +341        }
    +342
    +343        def safeconcat_sql(self, expression: exp.SafeConcat) -> str:
    +344            # Clickhouse errors out if we try to cast a NULL value to TEXT
    +345            return self.func(
    +346                "CONCAT",
    +347                *[
    +348                    exp.func("if", e.is_(exp.null()), e, exp.cast(e, "text"))
    +349                    for e in expression.expressions
    +350                ],
    +351            )
    +352
    +353        def cte_sql(self, expression: exp.CTE) -> str:
    +354            if isinstance(expression.this, exp.Alias):
    +355                return self.sql(expression, "this")
    +356
    +357            return super().cte_sql(expression)
    +358
    +359        def after_limit_modifiers(self, expression: exp.Expression) -> t.List[str]:
    +360            return super().after_limit_modifiers(expression) + [
    +361                self.seg("SETTINGS ") + self.expressions(expression, key="settings", flat=True)
    +362                if expression.args.get("settings")
    +363                else "",
    +364                self.seg("FORMAT ") + self.sql(expression, "format")
    +365                if expression.args.get("format")
    +366                else "",
    +367            ]
    +368
    +369        def parameterizedagg_sql(self, expression: exp.Anonymous) -> str:
    +370            params = self.expressions(expression, "params", flat=True)
    +371            return self.func(expression.name, *expression.expressions) + f"({params})"
    +372
    +373        def placeholder_sql(self, expression: exp.Placeholder) -> str:
    +374            return f"{{{expression.name}: {self.sql(expression, 'kind')}}}"
    +375
    +376        def oncluster_sql(self, expression: exp.OnCluster) -> str:
    +377            return f"ON CLUSTER {self.sql(expression, 'this')}"
    +378
    +379        def createable_sql(
    +380            self,
    +381            expression: exp.Create,
    +382            locations: dict[exp.Properties.Location, list[exp.Property]],
    +383        ) -> str:
    +384            kind = self.sql(expression, "kind").upper()
    +385            if kind in self.ON_CLUSTER_TARGETS and locations.get(exp.Properties.Location.POST_NAME):
    +386                this_name = self.sql(expression.this, "this")
    +387                this_properties = " ".join(
    +388                    [self.sql(prop) for prop in locations[exp.Properties.Location.POST_NAME]]
    +389                )
    +390                this_schema = self.schema_columns_sql(expression.this)
    +391                return f"{this_name}{self.sep()}{this_properties}{self.sep()}{this_schema}"
    +392            return super().createable_sql(expression, locations)
     
    -

    Generator interprets the given syntax tree and produces a SQL string as an output.

    +

    Generator converts a given syntax tree to the corresponding SQL string.

    Arguments:
      -
    • time_mapping (dict): the dictionary of custom time mappings in which the key -represents a python time format and the output the target time format
    • -
    • time_trie (trie): a trie of the time_mapping keys
    • -
    • pretty (bool): if set to True the returned string will be formatted. Default: False.
    • -
    • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
    • -
    • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
    • -
    • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
    • -
    • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
    • -
    • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
    • -
    • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
    • -
    • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
    • -
    • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
    • -
    • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
    • -
    • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
    • -
    • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
    • -
    • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
    • -
    • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
    • -
    • normalize (bool): if set to True all identifiers will lower cased
    • -
    • string_escape (str): specifies a string escape character. Default: '.
    • -
    • identifier_escape (str): specifies an identifier escape character. Default: ".
    • -
    • pad (int): determines padding in a formatted string. Default: 2.
    • -
    • indent (int): determines the size of indentation in a formatted string. Default: 4.
    • -
    • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
    • -
    • normalize_functions (str): normalize function names, "upper", "lower", or None -Default: "upper"
    • -
    • alias_post_tablesample (bool): if the table alias comes after tablesample -Default: False
    • -
    • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit -Default: False
    • -
    • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters -unsupported expressions. Default ErrorLevel.WARN.
    • -
    • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    • -
    • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +
    • pretty: Whether or not to format the produced SQL string. +Default: False.
    • +
    • identify: Determines when an identifier should be quoted. Possible values are: +False (default): Never quote, except in cases where it's mandatory by the dialect. +True or 'always': Always quote. +'safe': Only quote identifiers that are case insensitive.
    • +
    • normalize: Whether or not to normalize identifiers to lowercase. +Default: False.
    • +
    • pad: Determines the pad size in a formatted string. +Default: 2.
    • +
    • indent: Determines the indentation size in a formatted string. +Default: 2.
    • +
    • normalize_functions: Whether or not to normalize all function names. Possible values are: +"upper" or True (default): Convert names to uppercase. +"lower": Convert names to lowercase. +False: Disables function name normalization.
    • +
    • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. +Default ErrorLevel.WARN.
    • +
    • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
    • -
    • leading_comma (bool): if the the comma is leading or trailing in select statements +
    • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. +This is only relevant when generating in pretty mode. Default: False
    • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true @@ -1218,6 +1387,32 @@ Default: True
    +
    + +
    + + def + safeconcat_sql(self, expression: sqlglot.expressions.SafeConcat) -> str: + + + +
    + +
    343        def safeconcat_sql(self, expression: exp.SafeConcat) -> str:
    +344            # Clickhouse errors out if we try to cast a NULL value to TEXT
    +345            return self.func(
    +346                "CONCAT",
    +347                *[
    +348                    exp.func("if", e.is_(exp.null()), e, exp.cast(e, "text"))
    +349                    for e in expression.expressions
    +350                ],
    +351            )
    +
    + + + + +
    @@ -1229,11 +1424,11 @@ Default: True
    -
    310        def cte_sql(self, expression: exp.CTE) -> str:
    -311            if isinstance(expression.this, exp.Alias):
    -312                return self.sql(expression, "this")
    -313
    -314            return super().cte_sql(expression)
    +            
    353        def cte_sql(self, expression: exp.CTE) -> str:
    +354            if isinstance(expression.this, exp.Alias):
    +355                return self.sql(expression, "this")
    +356
    +357            return super().cte_sql(expression)
     
    @@ -1251,15 +1446,15 @@ Default: True
    -
    316        def after_limit_modifiers(self, expression: exp.Expression) -> t.List[str]:
    -317            return super().after_limit_modifiers(expression) + [
    -318                self.seg("SETTINGS ") + self.expressions(expression, key="settings", flat=True)
    -319                if expression.args.get("settings")
    -320                else "",
    -321                self.seg("FORMAT ") + self.sql(expression, "format")
    -322                if expression.args.get("format")
    -323                else "",
    -324            ]
    +            
    359        def after_limit_modifiers(self, expression: exp.Expression) -> t.List[str]:
    +360            return super().after_limit_modifiers(expression) + [
    +361                self.seg("SETTINGS ") + self.expressions(expression, key="settings", flat=True)
    +362                if expression.args.get("settings")
    +363                else "",
    +364                self.seg("FORMAT ") + self.sql(expression, "format")
    +365                if expression.args.get("format")
    +366                else "",
    +367            ]
     
    @@ -1277,9 +1472,9 @@ Default: True
    -
    326        def parameterizedagg_sql(self, expression: exp.Anonymous) -> str:
    -327            params = self.expressions(expression, "params", flat=True)
    -328            return self.func(expression.name, *expression.expressions) + f"({params})"
    +            
    369        def parameterizedagg_sql(self, expression: exp.Anonymous) -> str:
    +370            params = self.expressions(expression, "params", flat=True)
    +371            return self.func(expression.name, *expression.expressions) + f"({params})"
     
    @@ -1297,8 +1492,58 @@ Default: True
    -
    330        def placeholder_sql(self, expression: exp.Placeholder) -> str:
    -331            return f"{{{expression.name}: {self.sql(expression, 'kind')}}}"
    +            
    373        def placeholder_sql(self, expression: exp.Placeholder) -> str:
    +374            return f"{{{expression.name}: {self.sql(expression, 'kind')}}}"
    +
    + + + + +
    +
    + +
    + + def + oncluster_sql(self, expression: sqlglot.expressions.OnCluster) -> str: + + + +
    + +
    376        def oncluster_sql(self, expression: exp.OnCluster) -> str:
    +377            return f"ON CLUSTER {self.sql(expression, 'this')}"
    +
    + + + + +
    +
    + +
    + + def + createable_sql( self, expression: sqlglot.expressions.Create, locations: dict[sqlglot.expressions.Properties.Location, list[sqlglot.expressions.Property]]) -> str: + + + +
    + +
    379        def createable_sql(
    +380            self,
    +381            expression: exp.Create,
    +382            locations: dict[exp.Properties.Location, list[exp.Property]],
    +383        ) -> str:
    +384            kind = self.sql(expression, "kind").upper()
    +385            if kind in self.ON_CLUSTER_TARGETS and locations.get(exp.Properties.Location.POST_NAME):
    +386                this_name = self.sql(expression.this, "this")
    +387                this_properties = " ".join(
    +388                    [self.sql(prop) for prop in locations[exp.Properties.Location.POST_NAME]]
    +389                )
    +390                this_schema = self.schema_columns_sql(expression.this)
    +391                return f"{this_name}{self.sep()}{this_properties}{self.sep()}{this_schema}"
    +392            return super().createable_sql(expression, locations)
     
    @@ -1415,9 +1660,11 @@ Default: True
    ordered_sql
    matchrecognize_sql
    query_modifiers
    +
    offset_limit_modifiers
    after_having_modifiers
    select_sql
    schema_sql
    +
    schema_columns_sql
    star_sql
    parameter_sql
    sessionparameter_sql
    @@ -1441,7 +1688,6 @@ Default: True
    nextvaluefor_sql
    extract_sql
    trim_sql
    -
    concat_sql
    check_sql
    foreignkey_sql
    primarykey_sql
    @@ -1492,6 +1738,7 @@ Default: True
    respectnulls_sql
    intdiv_sql
    dpipe_sql
    +
    safedpipe_sql
    div_sql
    overlaps_sql
    distance_sql
    diff --git a/docs/sqlglot/dialects/databricks.html b/docs/sqlglot/dialects/databricks.html index c97eb16..3cd45cf 100644 --- a/docs/sqlglot/dialects/databricks.html +++ b/docs/sqlglot/dialects/databricks.html @@ -232,27 +232,19 @@
    -

    Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces -a parsed syntax tree.

    +

    Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

    Arguments:
      -
    • error_level: the desired error level. +
    • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
    • -
    • error_message_context: determines the amount of context to capture from a +
    • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). -Default: 50.
    • -
    • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. -Default: 0
    • -
    • alias_post_tablesample: If the table alias comes after tablesample. -Default: False
    • +Default: 100
    • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
    • -
    • null_ordering: Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    @@ -304,49 +296,34 @@ Default: "nulls_are_small"
    -

    Generator interprets the given syntax tree and produces a SQL string as an output.

    +

    Generator converts a given syntax tree to the corresponding SQL string.

    Arguments:
      -
    • time_mapping (dict): the dictionary of custom time mappings in which the key -represents a python time format and the output the target time format
    • -
    • time_trie (trie): a trie of the time_mapping keys
    • -
    • pretty (bool): if set to True the returned string will be formatted. Default: False.
    • -
    • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
    • -
    • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
    • -
    • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
    • -
    • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
    • -
    • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
    • -
    • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
    • -
    • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
    • -
    • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
    • -
    • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
    • -
    • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
    • -
    • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
    • -
    • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
    • -
    • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
    • -
    • normalize (bool): if set to True all identifiers will lower cased
    • -
    • string_escape (str): specifies a string escape character. Default: '.
    • -
    • identifier_escape (str): specifies an identifier escape character. Default: ".
    • -
    • pad (int): determines padding in a formatted string. Default: 2.
    • -
    • indent (int): determines the size of indentation in a formatted string. Default: 4.
    • -
    • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
    • -
    • normalize_functions (str): normalize function names, "upper", "lower", or None -Default: "upper"
    • -
    • alias_post_tablesample (bool): if the table alias comes after tablesample -Default: False
    • -
    • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit -Default: False
    • -
    • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters -unsupported expressions. Default ErrorLevel.WARN.
    • -
    • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    • -
    • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +
    • pretty: Whether or not to format the produced SQL string. +Default: False.
    • +
    • identify: Determines when an identifier should be quoted. Possible values are: +False (default): Never quote, except in cases where it's mandatory by the dialect. +True or 'always': Always quote. +'safe': Only quote identifiers that are case insensitive.
    • +
    • normalize: Whether or not to normalize identifiers to lowercase. +Default: False.
    • +
    • pad: Determines the pad size in a formatted string. +Default: 2.
    • +
    • indent: Determines the indentation size in a formatted string. +Default: 2.
    • +
    • normalize_functions: Whether or not to normalize all function names. Possible values are: +"upper" or True (default): Convert names to uppercase. +"lower": Convert names to lowercase. +False: Disables function name normalization.
    • +
    • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. +Default ErrorLevel.WARN.
    • +
    • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
    • -
    • leading_comma (bool): if the the comma is leading or trailing in select statements +
    • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. +This is only relevant when generating in pretty mode. Default: False
    • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true @@ -386,6 +363,7 @@ Default: True
    • notnullcolumnconstraint_sql
      primarykeycolumnconstraint_sql
      uniquecolumnconstraint_sql
      +
      createable_sql
      create_sql
      clone_sql
      describe_sql
      @@ -466,9 +444,11 @@ Default: True
      ordered_sql
      matchrecognize_sql
      query_modifiers
      +
      offset_limit_modifiers
      after_limit_modifiers
      select_sql
      schema_sql
      +
      schema_columns_sql
      star_sql
      parameter_sql
      sessionparameter_sql
      @@ -493,7 +473,7 @@ Default: True
      nextvaluefor_sql
      extract_sql
      trim_sql
      -
      concat_sql
      +
      safeconcat_sql
      check_sql
      foreignkey_sql
      primarykey_sql
      @@ -543,6 +523,7 @@ Default: True
      respectnulls_sql
      intdiv_sql
      dpipe_sql
      +
      safedpipe_sql
      div_sql
      overlaps_sql
      distance_sql
      @@ -591,6 +572,7 @@ Default: True
      dictproperty_sql
      dictrange_sql
      dictsubproperty_sql
      +
      oncluster_sql
    sqlglot.dialects.spark.Spark.Generator
    @@ -641,6 +623,7 @@ Default: True diff --git a/docs/sqlglot/dialects/dialect.html b/docs/sqlglot/dialects/dialect.html index 2c86235..e8e002d 100644 --- a/docs/sqlglot/dialects/dialect.html +++ b/docs/sqlglot/dialects/dialect.html @@ -42,6 +42,12 @@
  • CLICKHOUSE
  • +
  • + DATABRICKS +
  • +
  • + DRILL +
  • DUCKDB
  • @@ -82,19 +88,13 @@ TABLEAU
  • - TRINO -
  • -
  • - TSQL -
  • -
  • - DATABRICKS + TERADATA
  • - DRILL + TRINO
  • - TERADATA + TSQL
  • @@ -240,6 +240,9 @@
  • ts_or_ds_to_date_sql
  • +
  • + concat_to_dpipe_sql +
  • pivot_column_names
  • @@ -293,24 +296,24 @@ 25 26 BIGQUERY = "bigquery" 27 CLICKHOUSE = "clickhouse" - 28 DUCKDB = "duckdb" - 29 HIVE = "hive" - 30 MYSQL = "mysql" - 31 ORACLE = "oracle" - 32 POSTGRES = "postgres" - 33 PRESTO = "presto" - 34 REDSHIFT = "redshift" - 35 SNOWFLAKE = "snowflake" - 36 SPARK = "spark" - 37 SPARK2 = "spark2" - 38 SQLITE = "sqlite" - 39 STARROCKS = "starrocks" - 40 TABLEAU = "tableau" - 41 TRINO = "trino" - 42 TSQL = "tsql" - 43 DATABRICKS = "databricks" - 44 DRILL = "drill" - 45 TERADATA = "teradata" + 28 DATABRICKS = "databricks" + 29 DRILL = "drill" + 30 DUCKDB = "duckdb" + 31 HIVE = "hive" + 32 MYSQL = "mysql" + 33 ORACLE = "oracle" + 34 POSTGRES = "postgres" + 35 PRESTO = "presto" + 36 REDSHIFT = "redshift" + 37 SNOWFLAKE = "snowflake" + 38 SPARK = "spark" + 39 SPARK2 = "spark2" + 40 SQLITE = "sqlite" + 41 STARROCKS = "starrocks" + 42 TABLEAU = "tableau" + 43 TERADATA = "teradata" + 44 TRINO = "trino" + 45 TSQL = "tsql" 46 47 48class _Dialect(type): @@ -344,504 +347,505 @@ 76 enum = Dialects.__members__.get(clsname.upper()) 77 cls.classes[enum.value if enum is not None else clsname.lower()] = klass 78 - 79 klass.time_trie = new_trie(klass.time_mapping) - 80 klass.inverse_time_mapping = {v: k for k, v in klass.time_mapping.items()} - 81 klass.inverse_time_trie = new_trie(klass.inverse_time_mapping) - 82 - 83 klass.tokenizer_class = getattr(klass, "Tokenizer", Tokenizer) - 84 klass.parser_class = getattr(klass, "Parser", Parser) - 85 klass.generator_class = getattr(klass, "Generator", Generator) - 86 - 87 klass.quote_start, klass.quote_end = list(klass.tokenizer_class._QUOTES.items())[0] - 88 klass.identifier_start, klass.identifier_end = list( - 89 klass.tokenizer_class._IDENTIFIERS.items() - 90 )[0] - 91 - 92 def get_start_end(token_type: TokenType) -> t.Tuple[t.Optional[str], t.Optional[str]]: - 93 return next( - 94 ( - 95 (s, e) - 96 for s, (e, t) in klass.tokenizer_class._FORMAT_STRINGS.items() - 97 if t == token_type - 98 ), - 99 (None, None), -100 ) -101 -102 klass.bit_start, klass.bit_end = get_start_end(TokenType.BIT_STRING) -103 klass.hex_start, klass.hex_end = get_start_end(TokenType.HEX_STRING) -104 klass.byte_start, klass.byte_end = get_start_end(TokenType.BYTE_STRING) -105 klass.raw_start, klass.raw_end = get_start_end(TokenType.RAW_STRING) -106 -107 klass.tokenizer_class.identifiers_can_start_with_digit = ( -108 klass.identifiers_can_start_with_digit -109 ) -110 -111 return klass -112 -113 -114class Dialect(metaclass=_Dialect): -115 index_offset = 0 -116 unnest_column_only = False -117 alias_post_tablesample = False -118 identifiers_can_start_with_digit = False -119 normalize_functions: t.Optional[str] = "upper" -120 null_ordering = "nulls_are_small" -121 -122 date_format = "'%Y-%m-%d'" -123 dateint_format = "'%Y%m%d'" -124 time_format = "'%Y-%m-%d %H:%M:%S'" -125 time_mapping: t.Dict[str, str] = {} -126 -127 # autofilled -128 quote_start = None -129 quote_end = None -130 identifier_start = None -131 identifier_end = None -132 -133 time_trie = None -134 inverse_time_mapping = None -135 inverse_time_trie = None -136 tokenizer_class = None -137 parser_class = None -138 generator_class = None -139 -140 def __eq__(self, other: t.Any) -> bool: -141 return type(self) == other -142 -143 def __hash__(self) -> int: -144 return hash(type(self)) -145 -146 @classmethod -147 def get_or_raise(cls, dialect: DialectType) -> t.Type[Dialect]: -148 if not dialect: -149 return cls -150 if isinstance(dialect, _Dialect): -151 return dialect -152 if isinstance(dialect, Dialect): -153 return dialect.__class__ + 79 klass.TIME_TRIE = new_trie(klass.TIME_MAPPING) + 80 klass.FORMAT_TRIE = ( + 81 new_trie(klass.FORMAT_MAPPING) if klass.FORMAT_MAPPING else klass.TIME_TRIE + 82 ) + 83 klass.INVERSE_TIME_MAPPING = {v: k for k, v in klass.TIME_MAPPING.items()} + 84 klass.INVERSE_TIME_TRIE = new_trie(klass.INVERSE_TIME_MAPPING) + 85 + 86 klass.tokenizer_class = getattr(klass, "Tokenizer", Tokenizer) + 87 klass.parser_class = getattr(klass, "Parser", Parser) + 88 klass.generator_class = getattr(klass, "Generator", Generator) + 89 + 90 klass.QUOTE_START, klass.QUOTE_END = list(klass.tokenizer_class._QUOTES.items())[0] + 91 klass.IDENTIFIER_START, klass.IDENTIFIER_END = list( + 92 klass.tokenizer_class._IDENTIFIERS.items() + 93 )[0] + 94 + 95 def get_start_end(token_type: TokenType) -> t.Tuple[t.Optional[str], t.Optional[str]]: + 96 return next( + 97 ( + 98 (s, e) + 99 for s, (e, t) in klass.tokenizer_class._FORMAT_STRINGS.items() +100 if t == token_type +101 ), +102 (None, None), +103 ) +104 +105 klass.BIT_START, klass.BIT_END = get_start_end(TokenType.BIT_STRING) +106 klass.HEX_START, klass.HEX_END = get_start_end(TokenType.HEX_STRING) +107 klass.BYTE_START, klass.BYTE_END = get_start_end(TokenType.BYTE_STRING) +108 klass.RAW_START, klass.RAW_END = get_start_end(TokenType.RAW_STRING) +109 +110 dialect_properties = { +111 **{ +112 k: v +113 for k, v in vars(klass).items() +114 if not callable(v) and not isinstance(v, classmethod) and not k.startswith("__") +115 }, +116 "STRING_ESCAPE": klass.tokenizer_class.STRING_ESCAPES[0], +117 "IDENTIFIER_ESCAPE": klass.tokenizer_class.IDENTIFIER_ESCAPES[0], +118 } +119 +120 # Pass required dialect properties to the tokenizer, parser and generator classes +121 for subclass in (klass.tokenizer_class, klass.parser_class, klass.generator_class): +122 for name, value in dialect_properties.items(): +123 if hasattr(subclass, name): +124 setattr(subclass, name, value) +125 +126 if not klass.STRICT_STRING_CONCAT: +127 klass.parser_class.BITWISE[TokenType.DPIPE] = exp.SafeDPipe +128 +129 return klass +130 +131 +132class Dialect(metaclass=_Dialect): +133 # Determines the base index offset for arrays +134 INDEX_OFFSET = 0 +135 +136 # If true unnest table aliases are considered only as column aliases +137 UNNEST_COLUMN_ONLY = False +138 +139 # Determines whether or not the table alias comes after tablesample +140 ALIAS_POST_TABLESAMPLE = False +141 +142 # Determines whether or not an unquoted identifier can start with a digit +143 IDENTIFIERS_CAN_START_WITH_DIGIT = False +144 +145 # Determines whether or not CONCAT's arguments must be strings +146 STRICT_STRING_CONCAT = False +147 +148 # Determines how function names are going to be normalized +149 NORMALIZE_FUNCTIONS: bool | str = "upper" +150 +151 # Indicates the default null ordering method to use if not explicitly set +152 # Options are: "nulls_are_small", "nulls_are_large", "nulls_are_last" +153 NULL_ORDERING = "nulls_are_small" 154 -155 result = cls.get(dialect) -156 if not result: -157 raise ValueError(f"Unknown dialect '{dialect}'") +155 DATE_FORMAT = "'%Y-%m-%d'" +156 DATEINT_FORMAT = "'%Y%m%d'" +157 TIME_FORMAT = "'%Y-%m-%d %H:%M:%S'" 158 -159 return result -160 -161 @classmethod -162 def format_time( -163 cls, expression: t.Optional[str | exp.Expression] -164 ) -> t.Optional[exp.Expression]: -165 if isinstance(expression, str): -166 return exp.Literal.string( -167 format_time( -168 expression[1:-1], # the time formats are quoted -169 cls.time_mapping, -170 cls.time_trie, -171 ) -172 ) -173 if expression and expression.is_string: -174 return exp.Literal.string( -175 format_time( -176 expression.this, -177 cls.time_mapping, -178 cls.time_trie, -179 ) -180 ) -181 return expression +159 # Custom time mappings in which the key represents dialect time format +160 # and the value represents a python time format +161 TIME_MAPPING: t.Dict[str, str] = {} +162 +163 # https://cloud.google.com/bigquery/docs/reference/standard-sql/format-elements#format_model_rules_date_time +164 # https://docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Data-Type-Conversions/Character-to-DATE-Conversion/Forcing-a-FORMAT-on-CAST-for-Converting-Character-to-DATE +165 # special syntax cast(x as date format 'yyyy') defaults to time_mapping +166 FORMAT_MAPPING: t.Dict[str, str] = {} +167 +168 # Autofilled +169 tokenizer_class = Tokenizer +170 parser_class = Parser +171 generator_class = Generator +172 +173 # A trie of the time_mapping keys +174 TIME_TRIE: t.Dict = {} +175 FORMAT_TRIE: t.Dict = {} +176 +177 INVERSE_TIME_MAPPING: t.Dict[str, str] = {} +178 INVERSE_TIME_TRIE: t.Dict = {} +179 +180 def __eq__(self, other: t.Any) -> bool: +181 return type(self) == other 182 -183 def parse(self, sql: str, **opts) -> t.List[t.Optional[exp.Expression]]: -184 return self.parser(**opts).parse(self.tokenize(sql), sql) +183 def __hash__(self) -> int: +184 return hash(type(self)) 185 -186 def parse_into( -187 self, expression_type: exp.IntoType, sql: str, **opts -188 ) -> t.List[t.Optional[exp.Expression]]: -189 return self.parser(**opts).parse_into(expression_type, self.tokenize(sql), sql) -190 -191 def generate(self, expression: t.Optional[exp.Expression], **opts) -> str: -192 return self.generator(**opts).generate(expression) -193 -194 def transpile(self, sql: str, **opts) -> t.List[str]: -195 return [self.generate(expression, **opts) for expression in self.parse(sql)] -196 -197 def tokenize(self, sql: str) -> t.List[Token]: -198 return self.tokenizer.tokenize(sql) -199 -200 @property -201 def tokenizer(self) -> Tokenizer: -202 if not hasattr(self, "_tokenizer"): -203 self._tokenizer = self.tokenizer_class() # type: ignore -204 return self._tokenizer -205 -206 def parser(self, **opts) -> Parser: -207 return self.parser_class( # type: ignore -208 **{ -209 "index_offset": self.index_offset, -210 "unnest_column_only": self.unnest_column_only, -211 "alias_post_tablesample": self.alias_post_tablesample, -212 "null_ordering": self.null_ordering, -213 **opts, -214 }, -215 ) -216 -217 def generator(self, **opts) -> Generator: -218 return self.generator_class( # type: ignore -219 **{ -220 "quote_start": self.quote_start, -221 "quote_end": self.quote_end, -222 "bit_start": self.bit_start, -223 "bit_end": self.bit_end, -224 "hex_start": self.hex_start, -225 "hex_end": self.hex_end, -226 "byte_start": self.byte_start, -227 "byte_end": self.byte_end, -228 "raw_start": self.raw_start, -229 "raw_end": self.raw_end, -230 "identifier_start": self.identifier_start, -231 "identifier_end": self.identifier_end, -232 "string_escape": self.tokenizer_class.STRING_ESCAPES[0], -233 "identifier_escape": self.tokenizer_class.IDENTIFIER_ESCAPES[0], -234 "index_offset": self.index_offset, -235 "time_mapping": self.inverse_time_mapping, -236 "time_trie": self.inverse_time_trie, -237 "unnest_column_only": self.unnest_column_only, -238 "alias_post_tablesample": self.alias_post_tablesample, -239 "identifiers_can_start_with_digit": self.identifiers_can_start_with_digit, -240 "normalize_functions": self.normalize_functions, -241 "null_ordering": self.null_ordering, -242 **opts, -243 } -244 ) +186 @classmethod +187 def get_or_raise(cls, dialect: DialectType) -> t.Type[Dialect]: +188 if not dialect: +189 return cls +190 if isinstance(dialect, _Dialect): +191 return dialect +192 if isinstance(dialect, Dialect): +193 return dialect.__class__ +194 +195 result = cls.get(dialect) +196 if not result: +197 raise ValueError(f"Unknown dialect '{dialect}'") +198 +199 return result +200 +201 @classmethod +202 def format_time( +203 cls, expression: t.Optional[str | exp.Expression] +204 ) -> t.Optional[exp.Expression]: +205 if isinstance(expression, str): +206 return exp.Literal.string( +207 # the time formats are quoted +208 format_time(expression[1:-1], cls.TIME_MAPPING, cls.TIME_TRIE) +209 ) +210 +211 if expression and expression.is_string: +212 return exp.Literal.string(format_time(expression.this, cls.TIME_MAPPING, cls.TIME_TRIE)) +213 +214 return expression +215 +216 def parse(self, sql: str, **opts) -> t.List[t.Optional[exp.Expression]]: +217 return self.parser(**opts).parse(self.tokenize(sql), sql) +218 +219 def parse_into( +220 self, expression_type: exp.IntoType, sql: str, **opts +221 ) -> t.List[t.Optional[exp.Expression]]: +222 return self.parser(**opts).parse_into(expression_type, self.tokenize(sql), sql) +223 +224 def generate(self, expression: t.Optional[exp.Expression], **opts) -> str: +225 return self.generator(**opts).generate(expression) +226 +227 def transpile(self, sql: str, **opts) -> t.List[str]: +228 return [self.generate(expression, **opts) for expression in self.parse(sql)] +229 +230 def tokenize(self, sql: str) -> t.List[Token]: +231 return self.tokenizer.tokenize(sql) +232 +233 @property +234 def tokenizer(self) -> Tokenizer: +235 if not hasattr(self, "_tokenizer"): +236 self._tokenizer = self.tokenizer_class() +237 return self._tokenizer +238 +239 def parser(self, **opts) -> Parser: +240 return self.parser_class(**opts) +241 +242 def generator(self, **opts) -> Generator: +243 return self.generator_class(**opts) +244 245 -246 -247DialectType = t.Union[str, Dialect, t.Type[Dialect], None] +246DialectType = t.Union[str, Dialect, t.Type[Dialect], None] +247 248 -249 -250def rename_func(name: str) -> t.Callable[[Generator, exp.Expression], str]: -251 return lambda self, expression: self.func(name, *flatten(expression.args.values())) +249def rename_func(name: str) -> t.Callable[[Generator, exp.Expression], str]: +250 return lambda self, expression: self.func(name, *flatten(expression.args.values())) +251 252 -253 -254def approx_count_distinct_sql(self: Generator, expression: exp.ApproxDistinct) -> str: -255 if expression.args.get("accuracy"): -256 self.unsupported("APPROX_COUNT_DISTINCT does not support accuracy") -257 return self.func("APPROX_COUNT_DISTINCT", expression.this) +253def approx_count_distinct_sql(self: Generator, expression: exp.ApproxDistinct) -> str: +254 if expression.args.get("accuracy"): +255 self.unsupported("APPROX_COUNT_DISTINCT does not support accuracy") +256 return self.func("APPROX_COUNT_DISTINCT", expression.this) +257 258 -259 -260def if_sql(self: Generator, expression: exp.If) -> str: -261 return self.func( -262 "IF", expression.this, expression.args.get("true"), expression.args.get("false") -263 ) +259def if_sql(self: Generator, expression: exp.If) -> str: +260 return self.func( +261 "IF", expression.this, expression.args.get("true"), expression.args.get("false") +262 ) +263 264 -265 -266def arrow_json_extract_sql(self: Generator, expression: exp.JSONExtract | exp.JSONBExtract) -> str: -267 return self.binary(expression, "->") +265def arrow_json_extract_sql(self: Generator, expression: exp.JSONExtract | exp.JSONBExtract) -> str: +266 return self.binary(expression, "->") +267 268 -269 -270def arrow_json_extract_scalar_sql( -271 self: Generator, expression: exp.JSONExtractScalar | exp.JSONBExtractScalar -272) -> str: -273 return self.binary(expression, "->>") +269def arrow_json_extract_scalar_sql( +270 self: Generator, expression: exp.JSONExtractScalar | exp.JSONBExtractScalar +271) -> str: +272 return self.binary(expression, "->>") +273 274 -275 -276def inline_array_sql(self: Generator, expression: exp.Array) -> str: -277 return f"[{self.expressions(expression)}]" +275def inline_array_sql(self: Generator, expression: exp.Array) -> str: +276 return f"[{self.expressions(expression)}]" +277 278 -279 -280def no_ilike_sql(self: Generator, expression: exp.ILike) -> str: -281 return self.like_sql( -282 exp.Like( -283 this=exp.Lower(this=expression.this), -284 expression=expression.args["expression"], -285 ) -286 ) -287 +279def no_ilike_sql(self: Generator, expression: exp.ILike) -> str: +280 return self.like_sql( +281 exp.Like(this=exp.Lower(this=expression.this), expression=expression.expression) +282 ) +283 +284 +285def no_paren_current_date_sql(self: Generator, expression: exp.CurrentDate) -> str: +286 zone = self.sql(expression, "this") +287 return f"CURRENT_DATE AT TIME ZONE {zone}" if zone else "CURRENT_DATE" 288 -289def no_paren_current_date_sql(self: Generator, expression: exp.CurrentDate) -> str: -290 zone = self.sql(expression, "this") -291 return f"CURRENT_DATE AT TIME ZONE {zone}" if zone else "CURRENT_DATE" -292 -293 -294def no_recursive_cte_sql(self: Generator, expression: exp.With) -> str: -295 if expression.args.get("recursive"): -296 self.unsupported("Recursive CTEs are unsupported") -297 expression.args["recursive"] = False -298 return self.with_sql(expression) -299 -300 -301def no_safe_divide_sql(self: Generator, expression: exp.SafeDivide) -> str: -302 n = self.sql(expression, "this") -303 d = self.sql(expression, "expression") -304 return f"IF({d} <> 0, {n} / {d}, NULL)" -305 +289 +290def no_recursive_cte_sql(self: Generator, expression: exp.With) -> str: +291 if expression.args.get("recursive"): +292 self.unsupported("Recursive CTEs are unsupported") +293 expression.args["recursive"] = False +294 return self.with_sql(expression) +295 +296 +297def no_safe_divide_sql(self: Generator, expression: exp.SafeDivide) -> str: +298 n = self.sql(expression, "this") +299 d = self.sql(expression, "expression") +300 return f"IF({d} <> 0, {n} / {d}, NULL)" +301 +302 +303def no_tablesample_sql(self: Generator, expression: exp.TableSample) -> str: +304 self.unsupported("TABLESAMPLE unsupported") +305 return self.sql(expression.this) 306 -307def no_tablesample_sql(self: Generator, expression: exp.TableSample) -> str: -308 self.unsupported("TABLESAMPLE unsupported") -309 return self.sql(expression.this) -310 +307 +308def no_pivot_sql(self: Generator, expression: exp.Pivot) -> str: +309 self.unsupported("PIVOT unsupported") +310 return "" 311 -312def no_pivot_sql(self: Generator, expression: exp.Pivot) -> str: -313 self.unsupported("PIVOT unsupported") -314 return "" +312 +313def no_trycast_sql(self: Generator, expression: exp.TryCast) -> str: +314 return self.cast_sql(expression) 315 316 -317def no_trycast_sql(self: Generator, expression: exp.TryCast) -> str: -318 return self.cast_sql(expression) -319 +317def no_properties_sql(self: Generator, expression: exp.Properties) -> str: +318 self.unsupported("Properties unsupported") +319 return "" 320 -321def no_properties_sql(self: Generator, expression: exp.Properties) -> str: -322 self.unsupported("Properties unsupported") -323 return "" -324 -325 -326def no_comment_column_constraint_sql( -327 self: Generator, expression: exp.CommentColumnConstraint -328) -> str: -329 self.unsupported("CommentColumnConstraint unsupported") -330 return "" -331 -332 -333def str_position_sql(self: Generator, expression: exp.StrPosition) -> str: -334 this = self.sql(expression, "this") -335 substr = self.sql(expression, "substr") -336 position = self.sql(expression, "position") -337 if position: -338 return f"STRPOS(SUBSTR({this}, {position}), {substr}) + {position} - 1" -339 return f"STRPOS({this}, {substr})" -340 -341 -342def struct_extract_sql(self: Generator, expression: exp.StructExtract) -> str: -343 this = self.sql(expression, "this") -344 struct_key = self.sql(exp.Identifier(this=expression.expression, quoted=True)) -345 return f"{this}.{struct_key}" -346 -347 -348def var_map_sql( -349 self: Generator, expression: exp.Map | exp.VarMap, map_func_name: str = "MAP" -350) -> str: -351 keys = expression.args["keys"] -352 values = expression.args["values"] +321 +322def no_comment_column_constraint_sql( +323 self: Generator, expression: exp.CommentColumnConstraint +324) -> str: +325 self.unsupported("CommentColumnConstraint unsupported") +326 return "" +327 +328 +329def str_position_sql(self: Generator, expression: exp.StrPosition) -> str: +330 this = self.sql(expression, "this") +331 substr = self.sql(expression, "substr") +332 position = self.sql(expression, "position") +333 if position: +334 return f"STRPOS(SUBSTR({this}, {position}), {substr}) + {position} - 1" +335 return f"STRPOS({this}, {substr})" +336 +337 +338def struct_extract_sql(self: Generator, expression: exp.StructExtract) -> str: +339 this = self.sql(expression, "this") +340 struct_key = self.sql(exp.Identifier(this=expression.expression, quoted=True)) +341 return f"{this}.{struct_key}" +342 +343 +344def var_map_sql( +345 self: Generator, expression: exp.Map | exp.VarMap, map_func_name: str = "MAP" +346) -> str: +347 keys = expression.args["keys"] +348 values = expression.args["values"] +349 +350 if not isinstance(keys, exp.Array) or not isinstance(values, exp.Array): +351 self.unsupported("Cannot convert array columns into map.") +352 return self.func(map_func_name, keys, values) 353 -354 if not isinstance(keys, exp.Array) or not isinstance(values, exp.Array): -355 self.unsupported("Cannot convert array columns into map.") -356 return self.func(map_func_name, keys, values) -357 -358 args = [] -359 for key, value in zip(keys.expressions, values.expressions): -360 args.append(self.sql(key)) -361 args.append(self.sql(value)) -362 return self.func(map_func_name, *args) -363 -364 -365def format_time_lambda( -366 exp_class: t.Type[E], dialect: str, default: t.Optional[bool | str] = None -367) -> t.Callable[[t.List], E]: -368 """Helper used for time expressions. -369 -370 Args: -371 exp_class: the expression class to instantiate. -372 dialect: target sql dialect. -373 default: the default format, True being time. -374 -375 Returns: -376 A callable that can be used to return the appropriately formatted time expression. -377 """ -378 -379 def _format_time(args: t.List): -380 return exp_class( -381 this=seq_get(args, 0), -382 format=Dialect[dialect].format_time( -383 seq_get(args, 1) -384 or (Dialect[dialect].time_format if default is True else default or None) -385 ), -386 ) +354 args = [] +355 for key, value in zip(keys.expressions, values.expressions): +356 args.append(self.sql(key)) +357 args.append(self.sql(value)) +358 +359 return self.func(map_func_name, *args) +360 +361 +362def format_time_lambda( +363 exp_class: t.Type[E], dialect: str, default: t.Optional[bool | str] = None +364) -> t.Callable[[t.List], E]: +365 """Helper used for time expressions. +366 +367 Args: +368 exp_class: the expression class to instantiate. +369 dialect: target sql dialect. +370 default: the default format, True being time. +371 +372 Returns: +373 A callable that can be used to return the appropriately formatted time expression. +374 """ +375 +376 def _format_time(args: t.List): +377 return exp_class( +378 this=seq_get(args, 0), +379 format=Dialect[dialect].format_time( +380 seq_get(args, 1) +381 or (Dialect[dialect].TIME_FORMAT if default is True else default or None) +382 ), +383 ) +384 +385 return _format_time +386 387 -388 return _format_time -389 -390 -391def create_with_partitions_sql(self: Generator, expression: exp.Create) -> str: -392 """ -393 In Hive and Spark, the PARTITIONED BY property acts as an extension of a table's schema. When the -394 PARTITIONED BY value is an array of column names, they are transformed into a schema. The corresponding -395 columns are removed from the create statement. -396 """ -397 has_schema = isinstance(expression.this, exp.Schema) -398 is_partitionable = expression.args.get("kind") in ("TABLE", "VIEW") -399 -400 if has_schema and is_partitionable: -401 expression = expression.copy() -402 prop = expression.find(exp.PartitionedByProperty) -403 if prop and prop.this and not isinstance(prop.this, exp.Schema): -404 schema = expression.this -405 columns = {v.name.upper() for v in prop.this.expressions} -406 partitions = [col for col in schema.expressions if col.name.upper() in columns] -407 schema.set("expressions", [e for e in schema.expressions if e not in partitions]) -408 prop.replace(exp.PartitionedByProperty(this=exp.Schema(expressions=partitions))) -409 expression.set("this", schema) +388def create_with_partitions_sql(self: Generator, expression: exp.Create) -> str: +389 """ +390 In Hive and Spark, the PARTITIONED BY property acts as an extension of a table's schema. When the +391 PARTITIONED BY value is an array of column names, they are transformed into a schema. The corresponding +392 columns are removed from the create statement. +393 """ +394 has_schema = isinstance(expression.this, exp.Schema) +395 is_partitionable = expression.args.get("kind") in ("TABLE", "VIEW") +396 +397 if has_schema and is_partitionable: +398 expression = expression.copy() +399 prop = expression.find(exp.PartitionedByProperty) +400 if prop and prop.this and not isinstance(prop.this, exp.Schema): +401 schema = expression.this +402 columns = {v.name.upper() for v in prop.this.expressions} +403 partitions = [col for col in schema.expressions if col.name.upper() in columns] +404 schema.set("expressions", [e for e in schema.expressions if e not in partitions]) +405 prop.replace(exp.PartitionedByProperty(this=exp.Schema(expressions=partitions))) +406 expression.set("this", schema) +407 +408 return self.create_sql(expression) +409 410 -411 return self.create_sql(expression) -412 -413 -414def parse_date_delta( -415 exp_class: t.Type[E], unit_mapping: t.Optional[t.Dict[str, str]] = None -416) -> t.Callable[[t.List], E]: -417 def inner_func(args: t.List) -> E: -418 unit_based = len(args) == 3 -419 this = args[2] if unit_based else seq_get(args, 0) -420 unit = args[0] if unit_based else exp.Literal.string("DAY") -421 unit = exp.var(unit_mapping.get(unit.name.lower(), unit.name)) if unit_mapping else unit -422 return exp_class(this=this, expression=seq_get(args, 1), unit=unit) +411def parse_date_delta( +412 exp_class: t.Type[E], unit_mapping: t.Optional[t.Dict[str, str]] = None +413) -> t.Callable[[t.List], E]: +414 def inner_func(args: t.List) -> E: +415 unit_based = len(args) == 3 +416 this = args[2] if unit_based else seq_get(args, 0) +417 unit = args[0] if unit_based else exp.Literal.string("DAY") +418 unit = exp.var(unit_mapping.get(unit.name.lower(), unit.name)) if unit_mapping else unit +419 return exp_class(this=this, expression=seq_get(args, 1), unit=unit) +420 +421 return inner_func +422 423 -424 return inner_func -425 -426 -427def parse_date_delta_with_interval( -428 expression_class: t.Type[E], -429) -> t.Callable[[t.List], t.Optional[E]]: -430 def func(args: t.List) -> t.Optional[E]: -431 if len(args) < 2: -432 return None -433 -434 interval = args[1] -435 expression = interval.this -436 if expression and expression.is_string: -437 expression = exp.Literal.number(expression.this) -438 -439 return expression_class( -440 this=args[0], -441 expression=expression, -442 unit=exp.Literal.string(interval.text("unit")), -443 ) -444 -445 return func +424def parse_date_delta_with_interval( +425 expression_class: t.Type[E], +426) -> t.Callable[[t.List], t.Optional[E]]: +427 def func(args: t.List) -> t.Optional[E]: +428 if len(args) < 2: +429 return None +430 +431 interval = args[1] +432 expression = interval.this +433 if expression and expression.is_string: +434 expression = exp.Literal.number(expression.this) +435 +436 return expression_class( +437 this=args[0], expression=expression, unit=exp.Literal.string(interval.text("unit")) +438 ) +439 +440 return func +441 +442 +443def date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc: +444 unit = seq_get(args, 0) +445 this = seq_get(args, 1) 446 -447 -448def date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc: -449 unit = seq_get(args, 0) -450 this = seq_get(args, 1) +447 if isinstance(this, exp.Cast) and this.is_type("date"): +448 return exp.DateTrunc(unit=unit, this=this) +449 return exp.TimestampTrunc(this=this, unit=unit) +450 451 -452 if isinstance(this, exp.Cast) and this.is_type("date"): -453 return exp.DateTrunc(unit=unit, this=this) -454 return exp.TimestampTrunc(this=this, unit=unit) -455 +452def timestamptrunc_sql(self: Generator, expression: exp.TimestampTrunc) -> str: +453 return self.func( +454 "DATE_TRUNC", exp.Literal.string(expression.text("unit") or "day"), expression.this +455 ) 456 -457def timestamptrunc_sql(self: Generator, expression: exp.TimestampTrunc) -> str: -458 return self.func( -459 "DATE_TRUNC", exp.Literal.string(expression.text("unit") or "day"), expression.this -460 ) -461 +457 +458def locate_to_strposition(args: t.List) -> exp.Expression: +459 return exp.StrPosition( +460 this=seq_get(args, 1), substr=seq_get(args, 0), position=seq_get(args, 2) +461 ) 462 -463def locate_to_strposition(args: t.List) -> exp.Expression: -464 return exp.StrPosition( -465 this=seq_get(args, 1), -466 substr=seq_get(args, 0), -467 position=seq_get(args, 2), -468 ) +463 +464def strposition_to_locate_sql(self: Generator, expression: exp.StrPosition) -> str: +465 return self.func( +466 "LOCATE", expression.args.get("substr"), expression.this, expression.args.get("position") +467 ) +468 469 -470 -471def strposition_to_locate_sql(self: Generator, expression: exp.StrPosition) -> str: -472 return self.func( -473 "LOCATE", expression.args.get("substr"), expression.this, expression.args.get("position") -474 ) -475 -476 -477def left_to_substring_sql(self: Generator, expression: exp.Left) -> str: -478 expression = expression.copy() -479 return self.sql( -480 exp.Substring( -481 this=expression.this, start=exp.Literal.number(1), length=expression.expression -482 ) -483 ) -484 -485 -486def right_to_substring_sql(self: Generator, expression: exp.Left) -> str: -487 expression = expression.copy() -488 return self.sql( -489 exp.Substring( -490 this=expression.this, -491 start=exp.Length(this=expression.this) - exp.paren(expression.expression - 1), -492 ) -493 ) -494 +470def left_to_substring_sql(self: Generator, expression: exp.Left) -> str: +471 expression = expression.copy() +472 return self.sql( +473 exp.Substring( +474 this=expression.this, start=exp.Literal.number(1), length=expression.expression +475 ) +476 ) +477 +478 +479def right_to_substring_sql(self: Generator, expression: exp.Left) -> str: +480 expression = expression.copy() +481 return self.sql( +482 exp.Substring( +483 this=expression.this, +484 start=exp.Length(this=expression.this) - exp.paren(expression.expression - 1), +485 ) +486 ) +487 +488 +489def timestrtotime_sql(self: Generator, expression: exp.TimeStrToTime) -> str: +490 return f"CAST({self.sql(expression, 'this')} AS TIMESTAMP)" +491 +492 +493def datestrtodate_sql(self: Generator, expression: exp.DateStrToDate) -> str: +494 return f"CAST({self.sql(expression, 'this')} AS DATE)" 495 -496def timestrtotime_sql(self: Generator, expression: exp.TimeStrToTime) -> str: -497 return f"CAST({self.sql(expression, 'this')} AS TIMESTAMP)" -498 -499 -500def datestrtodate_sql(self: Generator, expression: exp.DateStrToDate) -> str: -501 return f"CAST({self.sql(expression, 'this')} AS DATE)" -502 -503 -504def min_or_least(self: Generator, expression: exp.Min) -> str: -505 name = "LEAST" if expression.expressions else "MIN" -506 return rename_func(name)(self, expression) -507 -508 -509def max_or_greatest(self: Generator, expression: exp.Max) -> str: -510 name = "GREATEST" if expression.expressions else "MAX" -511 return rename_func(name)(self, expression) -512 +496 +497def min_or_least(self: Generator, expression: exp.Min) -> str: +498 name = "LEAST" if expression.expressions else "MIN" +499 return rename_func(name)(self, expression) +500 +501 +502def max_or_greatest(self: Generator, expression: exp.Max) -> str: +503 name = "GREATEST" if expression.expressions else "MAX" +504 return rename_func(name)(self, expression) +505 +506 +507def count_if_to_sum(self: Generator, expression: exp.CountIf) -> str: +508 cond = expression.this +509 +510 if isinstance(expression.this, exp.Distinct): +511 cond = expression.this.expressions[0] +512 self.unsupported("DISTINCT is not supported when converting COUNT_IF to SUM") 513 -514def count_if_to_sum(self: Generator, expression: exp.CountIf) -> str: -515 cond = expression.this +514 return self.func("sum", exp.func("if", cond, 1, 0)) +515 516 -517 if isinstance(expression.this, exp.Distinct): -518 cond = expression.this.expressions[0] -519 self.unsupported("DISTINCT is not supported when converting COUNT_IF to SUM") -520 -521 return self.func("sum", exp.func("if", cond, 1, 0)) +517def trim_sql(self: Generator, expression: exp.Trim) -> str: +518 target = self.sql(expression, "this") +519 trim_type = self.sql(expression, "position") +520 remove_chars = self.sql(expression, "expression") +521 collation = self.sql(expression, "collation") 522 -523 -524def trim_sql(self: Generator, expression: exp.Trim) -> str: -525 target = self.sql(expression, "this") -526 trim_type = self.sql(expression, "position") -527 remove_chars = self.sql(expression, "expression") -528 collation = self.sql(expression, "collation") -529 -530 # Use TRIM/LTRIM/RTRIM syntax if the expression isn't database-specific -531 if not remove_chars and not collation: -532 return self.trim_sql(expression) +523 # Use TRIM/LTRIM/RTRIM syntax if the expression isn't database-specific +524 if not remove_chars and not collation: +525 return self.trim_sql(expression) +526 +527 trim_type = f"{trim_type} " if trim_type else "" +528 remove_chars = f"{remove_chars} " if remove_chars else "" +529 from_part = "FROM " if trim_type or remove_chars else "" +530 collation = f" COLLATE {collation}" if collation else "" +531 return f"TRIM({trim_type}{remove_chars}{from_part}{target}{collation})" +532 533 -534 trim_type = f"{trim_type} " if trim_type else "" -535 remove_chars = f"{remove_chars} " if remove_chars else "" -536 from_part = "FROM " if trim_type or remove_chars else "" -537 collation = f" COLLATE {collation}" if collation else "" -538 return f"TRIM({trim_type}{remove_chars}{from_part}{target}{collation})" -539 -540 -541def str_to_time_sql(self: Generator, expression: exp.Expression) -> str: -542 return self.func("STRPTIME", expression.this, self.format_time(expression)) -543 -544 -545def ts_or_ds_to_date_sql(dialect: str) -> t.Callable: -546 def _ts_or_ds_to_date_sql(self: Generator, expression: exp.TsOrDsToDate) -> str: -547 _dialect = Dialect.get_or_raise(dialect) -548 time_format = self.format_time(expression) -549 if time_format and time_format not in (_dialect.time_format, _dialect.date_format): -550 return f"CAST({str_to_time_sql(self, expression)} AS DATE)" -551 return f"CAST({self.sql(expression, 'this')} AS DATE)" -552 -553 return _ts_or_ds_to_date_sql -554 +534def str_to_time_sql(self: Generator, expression: exp.Expression) -> str: +535 return self.func("STRPTIME", expression.this, self.format_time(expression)) +536 +537 +538def ts_or_ds_to_date_sql(dialect: str) -> t.Callable: +539 def _ts_or_ds_to_date_sql(self: Generator, expression: exp.TsOrDsToDate) -> str: +540 _dialect = Dialect.get_or_raise(dialect) +541 time_format = self.format_time(expression) +542 if time_format and time_format not in (_dialect.TIME_FORMAT, _dialect.DATE_FORMAT): +543 return f"CAST({str_to_time_sql(self, expression)} AS DATE)" +544 return f"CAST({self.sql(expression, 'this')} AS DATE)" +545 +546 return _ts_or_ds_to_date_sql +547 +548 +549def concat_to_dpipe_sql(self: Generator, expression: exp.Concat | exp.SafeConcat) -> str: +550 this, *rest_args = expression.expressions +551 for arg in rest_args: +552 this = exp.DPipe(this=this, expression=arg) +553 +554 return self.sql(this) 555 -556# Spark, DuckDB use (almost) the same naming scheme for the output columns of the PIVOT operator -557def pivot_column_names(aggregations: t.List[exp.Expression], dialect: DialectType) -> t.List[str]: -558 names = [] -559 for agg in aggregations: -560 if isinstance(agg, exp.Alias): -561 names.append(agg.alias) -562 else: -563 """ -564 This case corresponds to aggregations without aliases being used as suffixes -565 (e.g. col_avg(foo)). We need to unquote identifiers because they're going to -566 be quoted in the base parser's `_parse_pivot` method, due to `to_identifier`. -567 Otherwise, we'd end up with `col_avg(`foo`)` (notice the double quotes). -568 """ -569 agg_all_unquoted = agg.transform( -570 lambda node: exp.Identifier(this=node.name, quoted=False) -571 if isinstance(node, exp.Identifier) -572 else node -573 ) -574 names.append(agg_all_unquoted.sql(dialect=dialect, normalize_functions="lower")) -575 -576 return names +556 +557# Spark, DuckDB use (almost) the same naming scheme for the output columns of the PIVOT operator +558def pivot_column_names(aggregations: t.List[exp.Expression], dialect: DialectType) -> t.List[str]: +559 names = [] +560 for agg in aggregations: +561 if isinstance(agg, exp.Alias): +562 names.append(agg.alias) +563 else: +564 """ +565 This case corresponds to aggregations without aliases being used as suffixes +566 (e.g. col_avg(foo)). We need to unquote identifiers because they're going to +567 be quoted in the base parser's `_parse_pivot` method, due to `to_identifier`. +568 Otherwise, we'd end up with `col_avg(`foo`)` (notice the double quotes). +569 """ +570 agg_all_unquoted = agg.transform( +571 lambda node: exp.Identifier(this=node.name, quoted=False) +572 if isinstance(node, exp.Identifier) +573 else node +574 ) +575 names.append(agg_all_unquoted.sql(dialect=dialect, normalize_functions="lower")) +576 +577 return names
    @@ -862,24 +866,24 @@ 26 27 BIGQUERY = "bigquery" 28 CLICKHOUSE = "clickhouse" -29 DUCKDB = "duckdb" -30 HIVE = "hive" -31 MYSQL = "mysql" -32 ORACLE = "oracle" -33 POSTGRES = "postgres" -34 PRESTO = "presto" -35 REDSHIFT = "redshift" -36 SNOWFLAKE = "snowflake" -37 SPARK = "spark" -38 SPARK2 = "spark2" -39 SQLITE = "sqlite" -40 STARROCKS = "starrocks" -41 TABLEAU = "tableau" -42 TRINO = "trino" -43 TSQL = "tsql" -44 DATABRICKS = "databricks" -45 DRILL = "drill" -46 TERADATA = "teradata" +29 DATABRICKS = "databricks" +30 DRILL = "drill" +31 DUCKDB = "duckdb" +32 HIVE = "hive" +33 MYSQL = "mysql" +34 ORACLE = "oracle" +35 POSTGRES = "postgres" +36 PRESTO = "presto" +37 REDSHIFT = "redshift" +38 SNOWFLAKE = "snowflake" +39 SPARK = "spark" +40 SPARK2 = "spark2" +41 SQLITE = "sqlite" +42 STARROCKS = "starrocks" +43 TABLEAU = "tableau" +44 TERADATA = "teradata" +45 TRINO = "trino" +46 TSQL = "tsql"
    @@ -922,6 +926,30 @@ +
    +
    +
    + DATABRICKS = +<Dialects.DATABRICKS: 'databricks'> + + +
    + + + + +
    +
    +
    + DRILL = +<Dialects.DRILL: 'drill'> + + +
    + + + +
    @@ -1079,62 +1107,38 @@
    -
    -
    - TRINO = -<Dialects.TRINO: 'trino'> - - -
    - - - - -
    -
    -
    - TSQL = -<Dialects.TSQL: 'tsql'> - - -
    - - - - -
    -
    +
    - DATABRICKS = -<Dialects.DATABRICKS: 'databricks'> + TERADATA = +<Dialects.TERADATA: 'teradata'>
    - +
    -
    +
    - DRILL = -<Dialects.DRILL: 'drill'> + TRINO = +<Dialects.TRINO: 'trino'>
    - +
    -
    +
    - TERADATA = -<Dialects.TERADATA: 'teradata'> + TSQL = +<Dialects.TSQL: 'tsql'>
    - + @@ -1211,137 +1215,118 @@
    -
    115class Dialect(metaclass=_Dialect):
    -116    index_offset = 0
    -117    unnest_column_only = False
    -118    alias_post_tablesample = False
    -119    identifiers_can_start_with_digit = False
    -120    normalize_functions: t.Optional[str] = "upper"
    -121    null_ordering = "nulls_are_small"
    -122
    -123    date_format = "'%Y-%m-%d'"
    -124    dateint_format = "'%Y%m%d'"
    -125    time_format = "'%Y-%m-%d %H:%M:%S'"
    -126    time_mapping: t.Dict[str, str] = {}
    -127
    -128    # autofilled
    -129    quote_start = None
    -130    quote_end = None
    -131    identifier_start = None
    -132    identifier_end = None
    -133
    -134    time_trie = None
    -135    inverse_time_mapping = None
    -136    inverse_time_trie = None
    -137    tokenizer_class = None
    -138    parser_class = None
    -139    generator_class = None
    -140
    -141    def __eq__(self, other: t.Any) -> bool:
    -142        return type(self) == other
    -143
    -144    def __hash__(self) -> int:
    -145        return hash(type(self))
    -146
    -147    @classmethod
    -148    def get_or_raise(cls, dialect: DialectType) -> t.Type[Dialect]:
    -149        if not dialect:
    -150            return cls
    -151        if isinstance(dialect, _Dialect):
    -152            return dialect
    -153        if isinstance(dialect, Dialect):
    -154            return dialect.__class__
    +            
    133class Dialect(metaclass=_Dialect):
    +134    # Determines the base index offset for arrays
    +135    INDEX_OFFSET = 0
    +136
    +137    # If true unnest table aliases are considered only as column aliases
    +138    UNNEST_COLUMN_ONLY = False
    +139
    +140    # Determines whether or not the table alias comes after tablesample
    +141    ALIAS_POST_TABLESAMPLE = False
    +142
    +143    # Determines whether or not an unquoted identifier can start with a digit
    +144    IDENTIFIERS_CAN_START_WITH_DIGIT = False
    +145
    +146    # Determines whether or not CONCAT's arguments must be strings
    +147    STRICT_STRING_CONCAT = False
    +148
    +149    # Determines how function names are going to be normalized
    +150    NORMALIZE_FUNCTIONS: bool | str = "upper"
    +151
    +152    # Indicates the default null ordering method to use if not explicitly set
    +153    # Options are: "nulls_are_small", "nulls_are_large", "nulls_are_last"
    +154    NULL_ORDERING = "nulls_are_small"
     155
    -156        result = cls.get(dialect)
    -157        if not result:
    -158            raise ValueError(f"Unknown dialect '{dialect}'")
    +156    DATE_FORMAT = "'%Y-%m-%d'"
    +157    DATEINT_FORMAT = "'%Y%m%d'"
    +158    TIME_FORMAT = "'%Y-%m-%d %H:%M:%S'"
     159
    -160        return result
    -161
    -162    @classmethod
    -163    def format_time(
    -164        cls, expression: t.Optional[str | exp.Expression]
    -165    ) -> t.Optional[exp.Expression]:
    -166        if isinstance(expression, str):
    -167            return exp.Literal.string(
    -168                format_time(
    -169                    expression[1:-1],  # the time formats are quoted
    -170                    cls.time_mapping,
    -171                    cls.time_trie,
    -172                )
    -173            )
    -174        if expression and expression.is_string:
    -175            return exp.Literal.string(
    -176                format_time(
    -177                    expression.this,
    -178                    cls.time_mapping,
    -179                    cls.time_trie,
    -180                )
    -181            )
    -182        return expression
    +160    # Custom time mappings in which the key represents dialect time format
    +161    # and the value represents a python time format
    +162    TIME_MAPPING: t.Dict[str, str] = {}
    +163
    +164    # https://cloud.google.com/bigquery/docs/reference/standard-sql/format-elements#format_model_rules_date_time
    +165    # https://docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Data-Type-Conversions/Character-to-DATE-Conversion/Forcing-a-FORMAT-on-CAST-for-Converting-Character-to-DATE
    +166    # special syntax cast(x as date format 'yyyy') defaults to time_mapping
    +167    FORMAT_MAPPING: t.Dict[str, str] = {}
    +168
    +169    # Autofilled
    +170    tokenizer_class = Tokenizer
    +171    parser_class = Parser
    +172    generator_class = Generator
    +173
    +174    # A trie of the time_mapping keys
    +175    TIME_TRIE: t.Dict = {}
    +176    FORMAT_TRIE: t.Dict = {}
    +177
    +178    INVERSE_TIME_MAPPING: t.Dict[str, str] = {}
    +179    INVERSE_TIME_TRIE: t.Dict = {}
    +180
    +181    def __eq__(self, other: t.Any) -> bool:
    +182        return type(self) == other
     183
    -184    def parse(self, sql: str, **opts) -> t.List[t.Optional[exp.Expression]]:
    -185        return self.parser(**opts).parse(self.tokenize(sql), sql)
    +184    def __hash__(self) -> int:
    +185        return hash(type(self))
     186
    -187    def parse_into(
    -188        self, expression_type: exp.IntoType, sql: str, **opts
    -189    ) -> t.List[t.Optional[exp.Expression]]:
    -190        return self.parser(**opts).parse_into(expression_type, self.tokenize(sql), sql)
    -191
    -192    def generate(self, expression: t.Optional[exp.Expression], **opts) -> str:
    -193        return self.generator(**opts).generate(expression)
    -194
    -195    def transpile(self, sql: str, **opts) -> t.List[str]:
    -196        return [self.generate(expression, **opts) for expression in self.parse(sql)]
    -197
    -198    def tokenize(self, sql: str) -> t.List[Token]:
    -199        return self.tokenizer.tokenize(sql)
    -200
    -201    @property
    -202    def tokenizer(self) -> Tokenizer:
    -203        if not hasattr(self, "_tokenizer"):
    -204            self._tokenizer = self.tokenizer_class()  # type: ignore
    -205        return self._tokenizer
    -206
    -207    def parser(self, **opts) -> Parser:
    -208        return self.parser_class(  # type: ignore
    -209            **{
    -210                "index_offset": self.index_offset,
    -211                "unnest_column_only": self.unnest_column_only,
    -212                "alias_post_tablesample": self.alias_post_tablesample,
    -213                "null_ordering": self.null_ordering,
    -214                **opts,
    -215            },
    -216        )
    -217
    -218    def generator(self, **opts) -> Generator:
    -219        return self.generator_class(  # type: ignore
    -220            **{
    -221                "quote_start": self.quote_start,
    -222                "quote_end": self.quote_end,
    -223                "bit_start": self.bit_start,
    -224                "bit_end": self.bit_end,
    -225                "hex_start": self.hex_start,
    -226                "hex_end": self.hex_end,
    -227                "byte_start": self.byte_start,
    -228                "byte_end": self.byte_end,
    -229                "raw_start": self.raw_start,
    -230                "raw_end": self.raw_end,
    -231                "identifier_start": self.identifier_start,
    -232                "identifier_end": self.identifier_end,
    -233                "string_escape": self.tokenizer_class.STRING_ESCAPES[0],
    -234                "identifier_escape": self.tokenizer_class.IDENTIFIER_ESCAPES[0],
    -235                "index_offset": self.index_offset,
    -236                "time_mapping": self.inverse_time_mapping,
    -237                "time_trie": self.inverse_time_trie,
    -238                "unnest_column_only": self.unnest_column_only,
    -239                "alias_post_tablesample": self.alias_post_tablesample,
    -240                "identifiers_can_start_with_digit": self.identifiers_can_start_with_digit,
    -241                "normalize_functions": self.normalize_functions,
    -242                "null_ordering": self.null_ordering,
    -243                **opts,
    -244            }
    -245        )
    +187    @classmethod
    +188    def get_or_raise(cls, dialect: DialectType) -> t.Type[Dialect]:
    +189        if not dialect:
    +190            return cls
    +191        if isinstance(dialect, _Dialect):
    +192            return dialect
    +193        if isinstance(dialect, Dialect):
    +194            return dialect.__class__
    +195
    +196        result = cls.get(dialect)
    +197        if not result:
    +198            raise ValueError(f"Unknown dialect '{dialect}'")
    +199
    +200        return result
    +201
    +202    @classmethod
    +203    def format_time(
    +204        cls, expression: t.Optional[str | exp.Expression]
    +205    ) -> t.Optional[exp.Expression]:
    +206        if isinstance(expression, str):
    +207            return exp.Literal.string(
    +208                # the time formats are quoted
    +209                format_time(expression[1:-1], cls.TIME_MAPPING, cls.TIME_TRIE)
    +210            )
    +211
    +212        if expression and expression.is_string:
    +213            return exp.Literal.string(format_time(expression.this, cls.TIME_MAPPING, cls.TIME_TRIE))
    +214
    +215        return expression
    +216
    +217    def parse(self, sql: str, **opts) -> t.List[t.Optional[exp.Expression]]:
    +218        return self.parser(**opts).parse(self.tokenize(sql), sql)
    +219
    +220    def parse_into(
    +221        self, expression_type: exp.IntoType, sql: str, **opts
    +222    ) -> t.List[t.Optional[exp.Expression]]:
    +223        return self.parser(**opts).parse_into(expression_type, self.tokenize(sql), sql)
    +224
    +225    def generate(self, expression: t.Optional[exp.Expression], **opts) -> str:
    +226        return self.generator(**opts).generate(expression)
    +227
    +228    def transpile(self, sql: str, **opts) -> t.List[str]:
    +229        return [self.generate(expression, **opts) for expression in self.parse(sql)]
    +230
    +231    def tokenize(self, sql: str) -> t.List[Token]:
    +232        return self.tokenizer.tokenize(sql)
    +233
    +234    @property
    +235    def tokenizer(self) -> Tokenizer:
    +236        if not hasattr(self, "_tokenizer"):
    +237            self._tokenizer = self.tokenizer_class()
    +238        return self._tokenizer
    +239
    +240    def parser(self, **opts) -> Parser:
    +241        return self.parser_class(**opts)
    +242
    +243    def generator(self, **opts) -> Generator:
    +244        return self.generator_class(**opts)
     
    @@ -1359,20 +1344,20 @@
    -
    147    @classmethod
    -148    def get_or_raise(cls, dialect: DialectType) -> t.Type[Dialect]:
    -149        if not dialect:
    -150            return cls
    -151        if isinstance(dialect, _Dialect):
    -152            return dialect
    -153        if isinstance(dialect, Dialect):
    -154            return dialect.__class__
    -155
    -156        result = cls.get(dialect)
    -157        if not result:
    -158            raise ValueError(f"Unknown dialect '{dialect}'")
    -159
    -160        return result
    +            
    187    @classmethod
    +188    def get_or_raise(cls, dialect: DialectType) -> t.Type[Dialect]:
    +189        if not dialect:
    +190            return cls
    +191        if isinstance(dialect, _Dialect):
    +192            return dialect
    +193        if isinstance(dialect, Dialect):
    +194            return dialect.__class__
    +195
    +196        result = cls.get(dialect)
    +197        if not result:
    +198            raise ValueError(f"Unknown dialect '{dialect}'")
    +199
    +200        return result
     
    @@ -1391,27 +1376,20 @@
    -
    162    @classmethod
    -163    def format_time(
    -164        cls, expression: t.Optional[str | exp.Expression]
    -165    ) -> t.Optional[exp.Expression]:
    -166        if isinstance(expression, str):
    -167            return exp.Literal.string(
    -168                format_time(
    -169                    expression[1:-1],  # the time formats are quoted
    -170                    cls.time_mapping,
    -171                    cls.time_trie,
    -172                )
    -173            )
    -174        if expression and expression.is_string:
    -175            return exp.Literal.string(
    -176                format_time(
    -177                    expression.this,
    -178                    cls.time_mapping,
    -179                    cls.time_trie,
    -180                )
    -181            )
    -182        return expression
    +            
    202    @classmethod
    +203    def format_time(
    +204        cls, expression: t.Optional[str | exp.Expression]
    +205    ) -> t.Optional[exp.Expression]:
    +206        if isinstance(expression, str):
    +207            return exp.Literal.string(
    +208                # the time formats are quoted
    +209                format_time(expression[1:-1], cls.TIME_MAPPING, cls.TIME_TRIE)
    +210            )
    +211
    +212        if expression and expression.is_string:
    +213            return exp.Literal.string(format_time(expression.this, cls.TIME_MAPPING, cls.TIME_TRIE))
    +214
    +215        return expression
     
    @@ -1429,8 +1407,8 @@
    -
    184    def parse(self, sql: str, **opts) -> t.List[t.Optional[exp.Expression]]:
    -185        return self.parser(**opts).parse(self.tokenize(sql), sql)
    +            
    217    def parse(self, sql: str, **opts) -> t.List[t.Optional[exp.Expression]]:
    +218        return self.parser(**opts).parse(self.tokenize(sql), sql)
     
    @@ -1448,10 +1426,10 @@
    -
    187    def parse_into(
    -188        self, expression_type: exp.IntoType, sql: str, **opts
    -189    ) -> t.List[t.Optional[exp.Expression]]:
    -190        return self.parser(**opts).parse_into(expression_type, self.tokenize(sql), sql)
    +            
    220    def parse_into(
    +221        self, expression_type: exp.IntoType, sql: str, **opts
    +222    ) -> t.List[t.Optional[exp.Expression]]:
    +223        return self.parser(**opts).parse_into(expression_type, self.tokenize(sql), sql)
     
    @@ -1469,8 +1447,8 @@
    -
    192    def generate(self, expression: t.Optional[exp.Expression], **opts) -> str:
    -193        return self.generator(**opts).generate(expression)
    +            
    225    def generate(self, expression: t.Optional[exp.Expression], **opts) -> str:
    +226        return self.generator(**opts).generate(expression)
     
    @@ -1488,8 +1466,8 @@
    -
    195    def transpile(self, sql: str, **opts) -> t.List[str]:
    -196        return [self.generate(expression, **opts) for expression in self.parse(sql)]
    +            
    228    def transpile(self, sql: str, **opts) -> t.List[str]:
    +229        return [self.generate(expression, **opts) for expression in self.parse(sql)]
     
    @@ -1507,8 +1485,8 @@
    -
    198    def tokenize(self, sql: str) -> t.List[Token]:
    -199        return self.tokenizer.tokenize(sql)
    +            
    231    def tokenize(self, sql: str) -> t.List[Token]:
    +232        return self.tokenizer.tokenize(sql)
     
    @@ -1526,16 +1504,8 @@
    -
    207    def parser(self, **opts) -> Parser:
    -208        return self.parser_class(  # type: ignore
    -209            **{
    -210                "index_offset": self.index_offset,
    -211                "unnest_column_only": self.unnest_column_only,
    -212                "alias_post_tablesample": self.alias_post_tablesample,
    -213                "null_ordering": self.null_ordering,
    -214                **opts,
    -215            },
    -216        )
    +            
    240    def parser(self, **opts) -> Parser:
    +241        return self.parser_class(**opts)
     
    @@ -1553,34 +1523,8 @@
    -
    218    def generator(self, **opts) -> Generator:
    -219        return self.generator_class(  # type: ignore
    -220            **{
    -221                "quote_start": self.quote_start,
    -222                "quote_end": self.quote_end,
    -223                "bit_start": self.bit_start,
    -224                "bit_end": self.bit_end,
    -225                "hex_start": self.hex_start,
    -226                "hex_end": self.hex_end,
    -227                "byte_start": self.byte_start,
    -228                "byte_end": self.byte_end,
    -229                "raw_start": self.raw_start,
    -230                "raw_end": self.raw_end,
    -231                "identifier_start": self.identifier_start,
    -232                "identifier_end": self.identifier_end,
    -233                "string_escape": self.tokenizer_class.STRING_ESCAPES[0],
    -234                "identifier_escape": self.tokenizer_class.IDENTIFIER_ESCAPES[0],
    -235                "index_offset": self.index_offset,
    -236                "time_mapping": self.inverse_time_mapping,
    -237                "time_trie": self.inverse_time_trie,
    -238                "unnest_column_only": self.unnest_column_only,
    -239                "alias_post_tablesample": self.alias_post_tablesample,
    -240                "identifiers_can_start_with_digit": self.identifiers_can_start_with_digit,
    -241                "normalize_functions": self.normalize_functions,
    -242                "null_ordering": self.null_ordering,
    -243                **opts,
    -244            }
    -245        )
    +            
    243    def generator(self, **opts) -> Generator:
    +244        return self.generator_class(**opts)
     
    @@ -1599,8 +1543,8 @@
    -
    251def rename_func(name: str) -> t.Callable[[Generator, exp.Expression], str]:
    -252    return lambda self, expression: self.func(name, *flatten(expression.args.values()))
    +            
    250def rename_func(name: str) -> t.Callable[[Generator, exp.Expression], str]:
    +251    return lambda self, expression: self.func(name, *flatten(expression.args.values()))
     
    @@ -1618,10 +1562,10 @@
    -
    255def approx_count_distinct_sql(self: Generator, expression: exp.ApproxDistinct) -> str:
    -256    if expression.args.get("accuracy"):
    -257        self.unsupported("APPROX_COUNT_DISTINCT does not support accuracy")
    -258    return self.func("APPROX_COUNT_DISTINCT", expression.this)
    +            
    254def approx_count_distinct_sql(self: Generator, expression: exp.ApproxDistinct) -> str:
    +255    if expression.args.get("accuracy"):
    +256        self.unsupported("APPROX_COUNT_DISTINCT does not support accuracy")
    +257    return self.func("APPROX_COUNT_DISTINCT", expression.this)
     
    @@ -1639,10 +1583,10 @@
    -
    261def if_sql(self: Generator, expression: exp.If) -> str:
    -262    return self.func(
    -263        "IF", expression.this, expression.args.get("true"), expression.args.get("false")
    -264    )
    +            
    260def if_sql(self: Generator, expression: exp.If) -> str:
    +261    return self.func(
    +262        "IF", expression.this, expression.args.get("true"), expression.args.get("false")
    +263    )
     
    @@ -1660,8 +1604,8 @@
    -
    267def arrow_json_extract_sql(self: Generator, expression: exp.JSONExtract | exp.JSONBExtract) -> str:
    -268    return self.binary(expression, "->")
    +            
    266def arrow_json_extract_sql(self: Generator, expression: exp.JSONExtract | exp.JSONBExtract) -> str:
    +267    return self.binary(expression, "->")
     
    @@ -1679,10 +1623,10 @@
    -
    271def arrow_json_extract_scalar_sql(
    -272    self: Generator, expression: exp.JSONExtractScalar | exp.JSONBExtractScalar
    -273) -> str:
    -274    return self.binary(expression, "->>")
    +            
    270def arrow_json_extract_scalar_sql(
    +271    self: Generator, expression: exp.JSONExtractScalar | exp.JSONBExtractScalar
    +272) -> str:
    +273    return self.binary(expression, "->>")
     
    @@ -1700,8 +1644,8 @@
    -
    277def inline_array_sql(self: Generator, expression: exp.Array) -> str:
    -278    return f"[{self.expressions(expression)}]"
    +            
    276def inline_array_sql(self: Generator, expression: exp.Array) -> str:
    +277    return f"[{self.expressions(expression)}]"
     
    @@ -1719,13 +1663,10 @@
    -
    281def no_ilike_sql(self: Generator, expression: exp.ILike) -> str:
    -282    return self.like_sql(
    -283        exp.Like(
    -284            this=exp.Lower(this=expression.this),
    -285            expression=expression.args["expression"],
    -286        )
    -287    )
    +            
    280def no_ilike_sql(self: Generator, expression: exp.ILike) -> str:
    +281    return self.like_sql(
    +282        exp.Like(this=exp.Lower(this=expression.this), expression=expression.expression)
    +283    )
     
    @@ -1743,9 +1684,9 @@
    -
    290def no_paren_current_date_sql(self: Generator, expression: exp.CurrentDate) -> str:
    -291    zone = self.sql(expression, "this")
    -292    return f"CURRENT_DATE AT TIME ZONE {zone}" if zone else "CURRENT_DATE"
    +            
    286def no_paren_current_date_sql(self: Generator, expression: exp.CurrentDate) -> str:
    +287    zone = self.sql(expression, "this")
    +288    return f"CURRENT_DATE AT TIME ZONE {zone}" if zone else "CURRENT_DATE"
     
    @@ -1763,11 +1704,11 @@
    -
    295def no_recursive_cte_sql(self: Generator, expression: exp.With) -> str:
    -296    if expression.args.get("recursive"):
    -297        self.unsupported("Recursive CTEs are unsupported")
    -298        expression.args["recursive"] = False
    -299    return self.with_sql(expression)
    +            
    291def no_recursive_cte_sql(self: Generator, expression: exp.With) -> str:
    +292    if expression.args.get("recursive"):
    +293        self.unsupported("Recursive CTEs are unsupported")
    +294        expression.args["recursive"] = False
    +295    return self.with_sql(expression)
     
    @@ -1785,10 +1726,10 @@
    -
    302def no_safe_divide_sql(self: Generator, expression: exp.SafeDivide) -> str:
    -303    n = self.sql(expression, "this")
    -304    d = self.sql(expression, "expression")
    -305    return f"IF({d} <> 0, {n} / {d}, NULL)"
    +            
    298def no_safe_divide_sql(self: Generator, expression: exp.SafeDivide) -> str:
    +299    n = self.sql(expression, "this")
    +300    d = self.sql(expression, "expression")
    +301    return f"IF({d} <> 0, {n} / {d}, NULL)"
     
    @@ -1806,9 +1747,9 @@
    -
    308def no_tablesample_sql(self: Generator, expression: exp.TableSample) -> str:
    -309    self.unsupported("TABLESAMPLE unsupported")
    -310    return self.sql(expression.this)
    +            
    304def no_tablesample_sql(self: Generator, expression: exp.TableSample) -> str:
    +305    self.unsupported("TABLESAMPLE unsupported")
    +306    return self.sql(expression.this)
     
    @@ -1826,9 +1767,9 @@
    -
    313def no_pivot_sql(self: Generator, expression: exp.Pivot) -> str:
    -314    self.unsupported("PIVOT unsupported")
    -315    return ""
    +            
    309def no_pivot_sql(self: Generator, expression: exp.Pivot) -> str:
    +310    self.unsupported("PIVOT unsupported")
    +311    return ""
     
    @@ -1846,8 +1787,8 @@
    -
    318def no_trycast_sql(self: Generator, expression: exp.TryCast) -> str:
    -319    return self.cast_sql(expression)
    +            
    314def no_trycast_sql(self: Generator, expression: exp.TryCast) -> str:
    +315    return self.cast_sql(expression)
     
    @@ -1865,9 +1806,9 @@
    -
    322def no_properties_sql(self: Generator, expression: exp.Properties) -> str:
    -323    self.unsupported("Properties unsupported")
    -324    return ""
    +            
    318def no_properties_sql(self: Generator, expression: exp.Properties) -> str:
    +319    self.unsupported("Properties unsupported")
    +320    return ""
     
    @@ -1885,11 +1826,11 @@
    -
    327def no_comment_column_constraint_sql(
    -328    self: Generator, expression: exp.CommentColumnConstraint
    -329) -> str:
    -330    self.unsupported("CommentColumnConstraint unsupported")
    -331    return ""
    +            
    323def no_comment_column_constraint_sql(
    +324    self: Generator, expression: exp.CommentColumnConstraint
    +325) -> str:
    +326    self.unsupported("CommentColumnConstraint unsupported")
    +327    return ""
     
    @@ -1907,13 +1848,13 @@
    -
    334def str_position_sql(self: Generator, expression: exp.StrPosition) -> str:
    -335    this = self.sql(expression, "this")
    -336    substr = self.sql(expression, "substr")
    -337    position = self.sql(expression, "position")
    -338    if position:
    -339        return f"STRPOS(SUBSTR({this}, {position}), {substr}) + {position} - 1"
    -340    return f"STRPOS({this}, {substr})"
    +            
    330def str_position_sql(self: Generator, expression: exp.StrPosition) -> str:
    +331    this = self.sql(expression, "this")
    +332    substr = self.sql(expression, "substr")
    +333    position = self.sql(expression, "position")
    +334    if position:
    +335        return f"STRPOS(SUBSTR({this}, {position}), {substr}) + {position} - 1"
    +336    return f"STRPOS({this}, {substr})"
     
    @@ -1931,10 +1872,10 @@
    -
    343def struct_extract_sql(self: Generator, expression: exp.StructExtract) -> str:
    -344    this = self.sql(expression, "this")
    -345    struct_key = self.sql(exp.Identifier(this=expression.expression, quoted=True))
    -346    return f"{this}.{struct_key}"
    +            
    339def struct_extract_sql(self: Generator, expression: exp.StructExtract) -> str:
    +340    this = self.sql(expression, "this")
    +341    struct_key = self.sql(exp.Identifier(this=expression.expression, quoted=True))
    +342    return f"{this}.{struct_key}"
     
    @@ -1952,21 +1893,22 @@
    -
    349def var_map_sql(
    -350    self: Generator, expression: exp.Map | exp.VarMap, map_func_name: str = "MAP"
    -351) -> str:
    -352    keys = expression.args["keys"]
    -353    values = expression.args["values"]
    +            
    345def var_map_sql(
    +346    self: Generator, expression: exp.Map | exp.VarMap, map_func_name: str = "MAP"
    +347) -> str:
    +348    keys = expression.args["keys"]
    +349    values = expression.args["values"]
    +350
    +351    if not isinstance(keys, exp.Array) or not isinstance(values, exp.Array):
    +352        self.unsupported("Cannot convert array columns into map.")
    +353        return self.func(map_func_name, keys, values)
     354
    -355    if not isinstance(keys, exp.Array) or not isinstance(values, exp.Array):
    -356        self.unsupported("Cannot convert array columns into map.")
    -357        return self.func(map_func_name, keys, values)
    -358
    -359    args = []
    -360    for key, value in zip(keys.expressions, values.expressions):
    -361        args.append(self.sql(key))
    -362        args.append(self.sql(value))
    -363    return self.func(map_func_name, *args)
    +355    args = []
    +356    for key, value in zip(keys.expressions, values.expressions):
    +357        args.append(self.sql(key))
    +358        args.append(self.sql(value))
    +359
    +360    return self.func(map_func_name, *args)
     
    @@ -1978,36 +1920,36 @@
    def - format_time_lambda( exp_class: Type[~E], dialect: str, default: Union[bool, str, NoneType] = None) -> Callable[[List], ~E]: + format_time_lambda( exp_class: Type[~E], dialect: str, default: Union[str, bool, NoneType] = None) -> Callable[[List], ~E]:
    -
    366def format_time_lambda(
    -367    exp_class: t.Type[E], dialect: str, default: t.Optional[bool | str] = None
    -368) -> t.Callable[[t.List], E]:
    -369    """Helper used for time expressions.
    -370
    -371    Args:
    -372        exp_class: the expression class to instantiate.
    -373        dialect: target sql dialect.
    -374        default: the default format, True being time.
    -375
    -376    Returns:
    -377        A callable that can be used to return the appropriately formatted time expression.
    -378    """
    -379
    -380    def _format_time(args: t.List):
    -381        return exp_class(
    -382            this=seq_get(args, 0),
    -383            format=Dialect[dialect].format_time(
    -384                seq_get(args, 1)
    -385                or (Dialect[dialect].time_format if default is True else default or None)
    -386            ),
    -387        )
    -388
    -389    return _format_time
    +            
    363def format_time_lambda(
    +364    exp_class: t.Type[E], dialect: str, default: t.Optional[bool | str] = None
    +365) -> t.Callable[[t.List], E]:
    +366    """Helper used for time expressions.
    +367
    +368    Args:
    +369        exp_class: the expression class to instantiate.
    +370        dialect: target sql dialect.
    +371        default: the default format, True being time.
    +372
    +373    Returns:
    +374        A callable that can be used to return the appropriately formatted time expression.
    +375    """
    +376
    +377    def _format_time(args: t.List):
    +378        return exp_class(
    +379            this=seq_get(args, 0),
    +380            format=Dialect[dialect].format_time(
    +381                seq_get(args, 1)
    +382                or (Dialect[dialect].TIME_FORMAT if default is True else default or None)
    +383            ),
    +384        )
    +385
    +386    return _format_time
     
    @@ -2041,27 +1983,27 @@
    -
    392def create_with_partitions_sql(self: Generator, expression: exp.Create) -> str:
    -393    """
    -394    In Hive and Spark, the PARTITIONED BY property acts as an extension of a table's schema. When the
    -395    PARTITIONED BY value is an array of column names, they are transformed into a schema. The corresponding
    -396    columns are removed from the create statement.
    -397    """
    -398    has_schema = isinstance(expression.this, exp.Schema)
    -399    is_partitionable = expression.args.get("kind") in ("TABLE", "VIEW")
    -400
    -401    if has_schema and is_partitionable:
    -402        expression = expression.copy()
    -403        prop = expression.find(exp.PartitionedByProperty)
    -404        if prop and prop.this and not isinstance(prop.this, exp.Schema):
    -405            schema = expression.this
    -406            columns = {v.name.upper() for v in prop.this.expressions}
    -407            partitions = [col for col in schema.expressions if col.name.upper() in columns]
    -408            schema.set("expressions", [e for e in schema.expressions if e not in partitions])
    -409            prop.replace(exp.PartitionedByProperty(this=exp.Schema(expressions=partitions)))
    -410            expression.set("this", schema)
    -411
    -412    return self.create_sql(expression)
    +            
    389def create_with_partitions_sql(self: Generator, expression: exp.Create) -> str:
    +390    """
    +391    In Hive and Spark, the PARTITIONED BY property acts as an extension of a table's schema. When the
    +392    PARTITIONED BY value is an array of column names, they are transformed into a schema. The corresponding
    +393    columns are removed from the create statement.
    +394    """
    +395    has_schema = isinstance(expression.this, exp.Schema)
    +396    is_partitionable = expression.args.get("kind") in ("TABLE", "VIEW")
    +397
    +398    if has_schema and is_partitionable:
    +399        expression = expression.copy()
    +400        prop = expression.find(exp.PartitionedByProperty)
    +401        if prop and prop.this and not isinstance(prop.this, exp.Schema):
    +402            schema = expression.this
    +403            columns = {v.name.upper() for v in prop.this.expressions}
    +404            partitions = [col for col in schema.expressions if col.name.upper() in columns]
    +405            schema.set("expressions", [e for e in schema.expressions if e not in partitions])
    +406            prop.replace(exp.PartitionedByProperty(this=exp.Schema(expressions=partitions)))
    +407            expression.set("this", schema)
    +408
    +409    return self.create_sql(expression)
     
    @@ -2083,17 +2025,17 @@ columns are removed from the create statement.

    -
    415def parse_date_delta(
    -416    exp_class: t.Type[E], unit_mapping: t.Optional[t.Dict[str, str]] = None
    -417) -> t.Callable[[t.List], E]:
    -418    def inner_func(args: t.List) -> E:
    -419        unit_based = len(args) == 3
    -420        this = args[2] if unit_based else seq_get(args, 0)
    -421        unit = args[0] if unit_based else exp.Literal.string("DAY")
    -422        unit = exp.var(unit_mapping.get(unit.name.lower(), unit.name)) if unit_mapping else unit
    -423        return exp_class(this=this, expression=seq_get(args, 1), unit=unit)
    -424
    -425    return inner_func
    +            
    412def parse_date_delta(
    +413    exp_class: t.Type[E], unit_mapping: t.Optional[t.Dict[str, str]] = None
    +414) -> t.Callable[[t.List], E]:
    +415    def inner_func(args: t.List) -> E:
    +416        unit_based = len(args) == 3
    +417        this = args[2] if unit_based else seq_get(args, 0)
    +418        unit = args[0] if unit_based else exp.Literal.string("DAY")
    +419        unit = exp.var(unit_mapping.get(unit.name.lower(), unit.name)) if unit_mapping else unit
    +420        return exp_class(this=this, expression=seq_get(args, 1), unit=unit)
    +421
    +422    return inner_func
     
    @@ -2111,25 +2053,23 @@ columns are removed from the create statement.

    -
    428def parse_date_delta_with_interval(
    -429    expression_class: t.Type[E],
    -430) -> t.Callable[[t.List], t.Optional[E]]:
    -431    def func(args: t.List) -> t.Optional[E]:
    -432        if len(args) < 2:
    -433            return None
    -434
    -435        interval = args[1]
    -436        expression = interval.this
    -437        if expression and expression.is_string:
    -438            expression = exp.Literal.number(expression.this)
    -439
    -440        return expression_class(
    -441            this=args[0],
    -442            expression=expression,
    -443            unit=exp.Literal.string(interval.text("unit")),
    -444        )
    -445
    -446    return func
    +            
    425def parse_date_delta_with_interval(
    +426    expression_class: t.Type[E],
    +427) -> t.Callable[[t.List], t.Optional[E]]:
    +428    def func(args: t.List) -> t.Optional[E]:
    +429        if len(args) < 2:
    +430            return None
    +431
    +432        interval = args[1]
    +433        expression = interval.this
    +434        if expression and expression.is_string:
    +435            expression = exp.Literal.number(expression.this)
    +436
    +437        return expression_class(
    +438            this=args[0], expression=expression, unit=exp.Literal.string(interval.text("unit"))
    +439        )
    +440
    +441    return func
     
    @@ -2147,13 +2087,13 @@ columns are removed from the create statement.

    -
    449def date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
    -450    unit = seq_get(args, 0)
    -451    this = seq_get(args, 1)
    -452
    -453    if isinstance(this, exp.Cast) and this.is_type("date"):
    -454        return exp.DateTrunc(unit=unit, this=this)
    -455    return exp.TimestampTrunc(this=this, unit=unit)
    +            
    444def date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
    +445    unit = seq_get(args, 0)
    +446    this = seq_get(args, 1)
    +447
    +448    if isinstance(this, exp.Cast) and this.is_type("date"):
    +449        return exp.DateTrunc(unit=unit, this=this)
    +450    return exp.TimestampTrunc(this=this, unit=unit)
     
    @@ -2171,10 +2111,10 @@ columns are removed from the create statement.

    -
    458def timestamptrunc_sql(self: Generator, expression: exp.TimestampTrunc) -> str:
    -459    return self.func(
    -460        "DATE_TRUNC", exp.Literal.string(expression.text("unit") or "day"), expression.this
    -461    )
    +            
    453def timestamptrunc_sql(self: Generator, expression: exp.TimestampTrunc) -> str:
    +454    return self.func(
    +455        "DATE_TRUNC", exp.Literal.string(expression.text("unit") or "day"), expression.this
    +456    )
     
    @@ -2192,12 +2132,10 @@ columns are removed from the create statement.

    -
    464def locate_to_strposition(args: t.List) -> exp.Expression:
    -465    return exp.StrPosition(
    -466        this=seq_get(args, 1),
    -467        substr=seq_get(args, 0),
    -468        position=seq_get(args, 2),
    -469    )
    +            
    459def locate_to_strposition(args: t.List) -> exp.Expression:
    +460    return exp.StrPosition(
    +461        this=seq_get(args, 1), substr=seq_get(args, 0), position=seq_get(args, 2)
    +462    )
     
    @@ -2215,10 +2153,10 @@ columns are removed from the create statement.

    -
    472def strposition_to_locate_sql(self: Generator, expression: exp.StrPosition) -> str:
    -473    return self.func(
    -474        "LOCATE", expression.args.get("substr"), expression.this, expression.args.get("position")
    -475    )
    +            
    465def strposition_to_locate_sql(self: Generator, expression: exp.StrPosition) -> str:
    +466    return self.func(
    +467        "LOCATE", expression.args.get("substr"), expression.this, expression.args.get("position")
    +468    )
     
    @@ -2236,13 +2174,13 @@ columns are removed from the create statement.

    -
    478def left_to_substring_sql(self: Generator, expression: exp.Left) -> str:
    -479    expression = expression.copy()
    -480    return self.sql(
    -481        exp.Substring(
    -482            this=expression.this, start=exp.Literal.number(1), length=expression.expression
    -483        )
    -484    )
    +            
    471def left_to_substring_sql(self: Generator, expression: exp.Left) -> str:
    +472    expression = expression.copy()
    +473    return self.sql(
    +474        exp.Substring(
    +475            this=expression.this, start=exp.Literal.number(1), length=expression.expression
    +476        )
    +477    )
     
    @@ -2260,14 +2198,14 @@ columns are removed from the create statement.

    -
    487def right_to_substring_sql(self: Generator, expression: exp.Left) -> str:
    -488    expression = expression.copy()
    -489    return self.sql(
    -490        exp.Substring(
    -491            this=expression.this,
    -492            start=exp.Length(this=expression.this) - exp.paren(expression.expression - 1),
    -493        )
    -494    )
    +            
    480def right_to_substring_sql(self: Generator, expression: exp.Left) -> str:
    +481    expression = expression.copy()
    +482    return self.sql(
    +483        exp.Substring(
    +484            this=expression.this,
    +485            start=exp.Length(this=expression.this) - exp.paren(expression.expression - 1),
    +486        )
    +487    )
     
    @@ -2285,8 +2223,8 @@ columns are removed from the create statement.

    -
    497def timestrtotime_sql(self: Generator, expression: exp.TimeStrToTime) -> str:
    -498    return f"CAST({self.sql(expression, 'this')} AS TIMESTAMP)"
    +            
    490def timestrtotime_sql(self: Generator, expression: exp.TimeStrToTime) -> str:
    +491    return f"CAST({self.sql(expression, 'this')} AS TIMESTAMP)"
     
    @@ -2304,8 +2242,8 @@ columns are removed from the create statement.

    -
    501def datestrtodate_sql(self: Generator, expression: exp.DateStrToDate) -> str:
    -502    return f"CAST({self.sql(expression, 'this')} AS DATE)"
    +            
    494def datestrtodate_sql(self: Generator, expression: exp.DateStrToDate) -> str:
    +495    return f"CAST({self.sql(expression, 'this')} AS DATE)"
     
    @@ -2323,9 +2261,9 @@ columns are removed from the create statement.

    -
    505def min_or_least(self: Generator, expression: exp.Min) -> str:
    -506    name = "LEAST" if expression.expressions else "MIN"
    -507    return rename_func(name)(self, expression)
    +            
    498def min_or_least(self: Generator, expression: exp.Min) -> str:
    +499    name = "LEAST" if expression.expressions else "MIN"
    +500    return rename_func(name)(self, expression)
     
    @@ -2343,9 +2281,9 @@ columns are removed from the create statement.

    -
    510def max_or_greatest(self: Generator, expression: exp.Max) -> str:
    -511    name = "GREATEST" if expression.expressions else "MAX"
    -512    return rename_func(name)(self, expression)
    +            
    503def max_or_greatest(self: Generator, expression: exp.Max) -> str:
    +504    name = "GREATEST" if expression.expressions else "MAX"
    +505    return rename_func(name)(self, expression)
     
    @@ -2363,14 +2301,14 @@ columns are removed from the create statement.

    -
    515def count_if_to_sum(self: Generator, expression: exp.CountIf) -> str:
    -516    cond = expression.this
    -517
    -518    if isinstance(expression.this, exp.Distinct):
    -519        cond = expression.this.expressions[0]
    -520        self.unsupported("DISTINCT is not supported when converting COUNT_IF to SUM")
    -521
    -522    return self.func("sum", exp.func("if", cond, 1, 0))
    +            
    508def count_if_to_sum(self: Generator, expression: exp.CountIf) -> str:
    +509    cond = expression.this
    +510
    +511    if isinstance(expression.this, exp.Distinct):
    +512        cond = expression.this.expressions[0]
    +513        self.unsupported("DISTINCT is not supported when converting COUNT_IF to SUM")
    +514
    +515    return self.func("sum", exp.func("if", cond, 1, 0))
     
    @@ -2388,21 +2326,21 @@ columns are removed from the create statement.

    -
    525def trim_sql(self: Generator, expression: exp.Trim) -> str:
    -526    target = self.sql(expression, "this")
    -527    trim_type = self.sql(expression, "position")
    -528    remove_chars = self.sql(expression, "expression")
    -529    collation = self.sql(expression, "collation")
    -530
    -531    # Use TRIM/LTRIM/RTRIM syntax if the expression isn't database-specific
    -532    if not remove_chars and not collation:
    -533        return self.trim_sql(expression)
    -534
    -535    trim_type = f"{trim_type} " if trim_type else ""
    -536    remove_chars = f"{remove_chars} " if remove_chars else ""
    -537    from_part = "FROM " if trim_type or remove_chars else ""
    -538    collation = f" COLLATE {collation}" if collation else ""
    -539    return f"TRIM({trim_type}{remove_chars}{from_part}{target}{collation})"
    +            
    518def trim_sql(self: Generator, expression: exp.Trim) -> str:
    +519    target = self.sql(expression, "this")
    +520    trim_type = self.sql(expression, "position")
    +521    remove_chars = self.sql(expression, "expression")
    +522    collation = self.sql(expression, "collation")
    +523
    +524    # Use TRIM/LTRIM/RTRIM syntax if the expression isn't database-specific
    +525    if not remove_chars and not collation:
    +526        return self.trim_sql(expression)
    +527
    +528    trim_type = f"{trim_type} " if trim_type else ""
    +529    remove_chars = f"{remove_chars} " if remove_chars else ""
    +530    from_part = "FROM " if trim_type or remove_chars else ""
    +531    collation = f" COLLATE {collation}" if collation else ""
    +532    return f"TRIM({trim_type}{remove_chars}{from_part}{target}{collation})"
     
    @@ -2420,8 +2358,8 @@ columns are removed from the create statement.

    -
    542def str_to_time_sql(self: Generator, expression: exp.Expression) -> str:
    -543    return self.func("STRPTIME", expression.this, self.format_time(expression))
    +            
    535def str_to_time_sql(self: Generator, expression: exp.Expression) -> str:
    +536    return self.func("STRPTIME", expression.this, self.format_time(expression))
     
    @@ -2439,15 +2377,38 @@ columns are removed from the create statement.

    -
    546def ts_or_ds_to_date_sql(dialect: str) -> t.Callable:
    -547    def _ts_or_ds_to_date_sql(self: Generator, expression: exp.TsOrDsToDate) -> str:
    -548        _dialect = Dialect.get_or_raise(dialect)
    -549        time_format = self.format_time(expression)
    -550        if time_format and time_format not in (_dialect.time_format, _dialect.date_format):
    -551            return f"CAST({str_to_time_sql(self, expression)} AS DATE)"
    -552        return f"CAST({self.sql(expression, 'this')} AS DATE)"
    -553
    -554    return _ts_or_ds_to_date_sql
    +            
    539def ts_or_ds_to_date_sql(dialect: str) -> t.Callable:
    +540    def _ts_or_ds_to_date_sql(self: Generator, expression: exp.TsOrDsToDate) -> str:
    +541        _dialect = Dialect.get_or_raise(dialect)
    +542        time_format = self.format_time(expression)
    +543        if time_format and time_format not in (_dialect.TIME_FORMAT, _dialect.DATE_FORMAT):
    +544            return f"CAST({str_to_time_sql(self, expression)} AS DATE)"
    +545        return f"CAST({self.sql(expression, 'this')} AS DATE)"
    +546
    +547    return _ts_or_ds_to_date_sql
    +
    + + + + + +
    + +
    + + def + concat_to_dpipe_sql( self: sqlglot.generator.Generator, expression: sqlglot.expressions.Concat | sqlglot.expressions.SafeConcat) -> str: + + + +
    + +
    550def concat_to_dpipe_sql(self: Generator, expression: exp.Concat | exp.SafeConcat) -> str:
    +551    this, *rest_args = expression.expressions
    +552    for arg in rest_args:
    +553        this = exp.DPipe(this=this, expression=arg)
    +554
    +555    return self.sql(this)
     
    @@ -2465,26 +2426,26 @@ columns are removed from the create statement.

    -
    558def pivot_column_names(aggregations: t.List[exp.Expression], dialect: DialectType) -> t.List[str]:
    -559    names = []
    -560    for agg in aggregations:
    -561        if isinstance(agg, exp.Alias):
    -562            names.append(agg.alias)
    -563        else:
    -564            """
    -565            This case corresponds to aggregations without aliases being used as suffixes
    -566            (e.g. col_avg(foo)). We need to unquote identifiers because they're going to
    -567            be quoted in the base parser's `_parse_pivot` method, due to `to_identifier`.
    -568            Otherwise, we'd end up with `col_avg(`foo`)` (notice the double quotes).
    -569            """
    -570            agg_all_unquoted = agg.transform(
    -571                lambda node: exp.Identifier(this=node.name, quoted=False)
    -572                if isinstance(node, exp.Identifier)
    -573                else node
    -574            )
    -575            names.append(agg_all_unquoted.sql(dialect=dialect, normalize_functions="lower"))
    -576
    -577    return names
    +            
    559def pivot_column_names(aggregations: t.List[exp.Expression], dialect: DialectType) -> t.List[str]:
    +560    names = []
    +561    for agg in aggregations:
    +562        if isinstance(agg, exp.Alias):
    +563            names.append(agg.alias)
    +564        else:
    +565            """
    +566            This case corresponds to aggregations without aliases being used as suffixes
    +567            (e.g. col_avg(foo)). We need to unquote identifiers because they're going to
    +568            be quoted in the base parser's `_parse_pivot` method, due to `to_identifier`.
    +569            Otherwise, we'd end up with `col_avg(`foo`)` (notice the double quotes).
    +570            """
    +571            agg_all_unquoted = agg.transform(
    +572                lambda node: exp.Identifier(this=node.name, quoted=False)
    +573                if isinstance(node, exp.Identifier)
    +574                else node
    +575            )
    +576            names.append(agg_all_unquoted.sql(dialect=dialect, normalize_functions="lower"))
    +577
    +578    return names
     
    diff --git a/docs/sqlglot/dialects/drill.html b/docs/sqlglot/dialects/drill.html index 342c5f1..dc9d709 100644 --- a/docs/sqlglot/dialects/drill.html +++ b/docs/sqlglot/dialects/drill.html @@ -98,151 +98,141 @@
    16) 17 18 - 19def _str_to_time_sql(self: generator.Generator, expression: exp.TsOrDsToDate) -> str: - 20 return f"STRPTIME({self.sql(expression, 'this')}, {self.format_time(expression)})" - 21 - 22 - 23def _ts_or_ds_to_date_sql(self: generator.Generator, expression: exp.TsOrDsToDate) -> str: - 24 time_format = self.format_time(expression) - 25 if time_format and time_format not in (Drill.time_format, Drill.date_format): - 26 return f"CAST({_str_to_time_sql(self, expression)} AS DATE)" - 27 return f"CAST({self.sql(expression, 'this')} AS DATE)" + 19def _date_add_sql(kind: str) -> t.Callable[[generator.Generator, exp.DateAdd | exp.DateSub], str]: + 20 def func(self: generator.Generator, expression: exp.DateAdd | exp.DateSub) -> str: + 21 this = self.sql(expression, "this") + 22 unit = exp.var(expression.text("unit").upper() or "DAY") + 23 return ( + 24 f"DATE_{kind}({this}, {self.sql(exp.Interval(this=expression.expression, unit=unit))})" + 25 ) + 26 + 27 return func 28 29 - 30def _date_add_sql(kind: str) -> t.Callable[[generator.Generator, exp.DateAdd | exp.DateSub], str]: - 31 def func(self: generator.Generator, expression: exp.DateAdd | exp.DateSub) -> str: - 32 this = self.sql(expression, "this") - 33 unit = exp.Var(this=expression.text("unit").upper() or "DAY") - 34 return ( - 35 f"DATE_{kind}({this}, {self.sql(exp.Interval(this=expression.expression, unit=unit))})" - 36 ) + 30def _str_to_date(self: generator.Generator, expression: exp.StrToDate) -> str: + 31 this = self.sql(expression, "this") + 32 time_format = self.format_time(expression) + 33 if time_format == Drill.DATE_FORMAT: + 34 return f"CAST({this} AS DATE)" + 35 return f"TO_DATE({this}, {time_format})" + 36 37 - 38 return func - 39 - 40 - 41def _str_to_date(self: generator.Generator, expression: exp.StrToDate) -> str: - 42 this = self.sql(expression, "this") - 43 time_format = self.format_time(expression) - 44 if time_format == Drill.date_format: - 45 return f"CAST({this} AS DATE)" - 46 return f"TO_DATE({this}, {time_format})" - 47 - 48 - 49class Drill(Dialect): - 50 normalize_functions = None - 51 null_ordering = "nulls_are_last" - 52 date_format = "'yyyy-MM-dd'" - 53 dateint_format = "'yyyyMMdd'" - 54 time_format = "'yyyy-MM-dd HH:mm:ss'" - 55 - 56 time_mapping = { - 57 "y": "%Y", - 58 "Y": "%Y", - 59 "YYYY": "%Y", - 60 "yyyy": "%Y", - 61 "YY": "%y", - 62 "yy": "%y", - 63 "MMMM": "%B", - 64 "MMM": "%b", - 65 "MM": "%m", - 66 "M": "%-m", - 67 "dd": "%d", - 68 "d": "%-d", - 69 "HH": "%H", - 70 "H": "%-H", - 71 "hh": "%I", - 72 "h": "%-I", - 73 "mm": "%M", - 74 "m": "%-M", - 75 "ss": "%S", - 76 "s": "%-S", - 77 "SSSSSS": "%f", - 78 "a": "%p", - 79 "DD": "%j", - 80 "D": "%-j", - 81 "E": "%a", - 82 "EE": "%a", - 83 "EEE": "%a", - 84 "EEEE": "%A", - 85 "''T''": "T", - 86 } - 87 - 88 class Tokenizer(tokens.Tokenizer): - 89 QUOTES = ["'"] - 90 IDENTIFIERS = ["`"] - 91 STRING_ESCAPES = ["\\"] - 92 ENCODE = "utf-8" + 38class Drill(Dialect): + 39 NORMALIZE_FUNCTIONS: bool | str = False + 40 NULL_ORDERING = "nulls_are_last" + 41 DATE_FORMAT = "'yyyy-MM-dd'" + 42 DATEINT_FORMAT = "'yyyyMMdd'" + 43 TIME_FORMAT = "'yyyy-MM-dd HH:mm:ss'" + 44 + 45 TIME_MAPPING = { + 46 "y": "%Y", + 47 "Y": "%Y", + 48 "YYYY": "%Y", + 49 "yyyy": "%Y", + 50 "YY": "%y", + 51 "yy": "%y", + 52 "MMMM": "%B", + 53 "MMM": "%b", + 54 "MM": "%m", + 55 "M": "%-m", + 56 "dd": "%d", + 57 "d": "%-d", + 58 "HH": "%H", + 59 "H": "%-H", + 60 "hh": "%I", + 61 "h": "%-I", + 62 "mm": "%M", + 63 "m": "%-M", + 64 "ss": "%S", + 65 "s": "%-S", + 66 "SSSSSS": "%f", + 67 "a": "%p", + 68 "DD": "%j", + 69 "D": "%-j", + 70 "E": "%a", + 71 "EE": "%a", + 72 "EEE": "%a", + 73 "EEEE": "%A", + 74 "''T''": "T", + 75 } + 76 + 77 class Tokenizer(tokens.Tokenizer): + 78 QUOTES = ["'"] + 79 IDENTIFIERS = ["`"] + 80 STRING_ESCAPES = ["\\"] + 81 ENCODE = "utf-8" + 82 + 83 class Parser(parser.Parser): + 84 STRICT_CAST = False + 85 CONCAT_NULL_OUTPUTS_STRING = True + 86 + 87 FUNCTIONS = { + 88 **parser.Parser.FUNCTIONS, + 89 "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "drill"), + 90 "TO_TIMESTAMP": exp.TimeStrToTime.from_arg_list, + 91 "TO_CHAR": format_time_lambda(exp.TimeToStr, "drill"), + 92 } 93 - 94 class Parser(parser.Parser): - 95 STRICT_CAST = False - 96 - 97 FUNCTIONS = { - 98 **parser.Parser.FUNCTIONS, - 99 "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "drill"), -100 "TO_TIMESTAMP": exp.TimeStrToTime.from_arg_list, -101 "TO_CHAR": format_time_lambda(exp.TimeToStr, "drill"), -102 } -103 -104 LOG_DEFAULTS_TO_LN = True -105 -106 class Generator(generator.Generator): -107 JOIN_HINTS = False -108 TABLE_HINTS = False -109 -110 TYPE_MAPPING = { -111 **generator.Generator.TYPE_MAPPING, -112 exp.DataType.Type.INT: "INTEGER", -113 exp.DataType.Type.SMALLINT: "INTEGER", -114 exp.DataType.Type.TINYINT: "INTEGER", -115 exp.DataType.Type.BINARY: "VARBINARY", -116 exp.DataType.Type.TEXT: "VARCHAR", -117 exp.DataType.Type.NCHAR: "VARCHAR", -118 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", -119 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", -120 exp.DataType.Type.DATETIME: "TIMESTAMP", -121 } -122 -123 PROPERTIES_LOCATION = { -124 **generator.Generator.PROPERTIES_LOCATION, -125 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, -126 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, -127 } -128 -129 TRANSFORMS = { -130 **generator.Generator.TRANSFORMS, -131 exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP", -132 exp.ArrayContains: rename_func("REPEATED_CONTAINS"), -133 exp.ArraySize: rename_func("REPEATED_COUNT"), -134 exp.Create: create_with_partitions_sql, -135 exp.DateAdd: _date_add_sql("ADD"), -136 exp.DateStrToDate: datestrtodate_sql, -137 exp.DateSub: _date_add_sql("SUB"), -138 exp.DateToDi: lambda self, e: f"CAST(TO_DATE({self.sql(e, 'this')}, {Drill.dateint_format}) AS INT)", -139 exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS VARCHAR), {Drill.dateint_format})", -140 exp.If: lambda self, e: f"`IF`({self.format_args(e.this, e.args.get('true'), e.args.get('false'))})", -141 exp.ILike: lambda self, e: f" {self.sql(e, 'this')} `ILIKE` {self.sql(e, 'expression')}", -142 exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"), -143 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", -144 exp.RegexpLike: rename_func("REGEXP_MATCHES"), -145 exp.StrPosition: str_position_sql, -146 exp.StrToDate: _str_to_date, -147 exp.Pow: rename_func("POW"), -148 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), -149 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", -150 exp.TimeStrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE)", -151 exp.TimeStrToTime: timestrtotime_sql, -152 exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"), -153 exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})", -154 exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"), -155 exp.ToChar: lambda self, e: self.function_fallback_sql(e), -156 exp.TryCast: no_trycast_sql, -157 exp.TsOrDsAdd: lambda self, e: f"DATE_ADD(CAST({self.sql(e, 'this')} AS DATE), {self.sql(exp.Interval(this=e.expression, unit=exp.Var(this='DAY')))})", -158 exp.TsOrDsToDate: ts_or_ds_to_date_sql("drill"), -159 exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)", -160 } -161 -162 def normalize_func(self, name: str) -> str: -163 return name if exp.SAFE_IDENTIFIER_RE.match(name) else f"`{name}`" + 94 LOG_DEFAULTS_TO_LN = True + 95 + 96 class Generator(generator.Generator): + 97 JOIN_HINTS = False + 98 TABLE_HINTS = False + 99 +100 TYPE_MAPPING = { +101 **generator.Generator.TYPE_MAPPING, +102 exp.DataType.Type.INT: "INTEGER", +103 exp.DataType.Type.SMALLINT: "INTEGER", +104 exp.DataType.Type.TINYINT: "INTEGER", +105 exp.DataType.Type.BINARY: "VARBINARY", +106 exp.DataType.Type.TEXT: "VARCHAR", +107 exp.DataType.Type.NCHAR: "VARCHAR", +108 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", +109 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", +110 exp.DataType.Type.DATETIME: "TIMESTAMP", +111 } +112 +113 PROPERTIES_LOCATION = { +114 **generator.Generator.PROPERTIES_LOCATION, +115 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, +116 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, +117 } +118 +119 TRANSFORMS = { +120 **generator.Generator.TRANSFORMS, +121 exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP", +122 exp.ArrayContains: rename_func("REPEATED_CONTAINS"), +123 exp.ArraySize: rename_func("REPEATED_COUNT"), +124 exp.Create: create_with_partitions_sql, +125 exp.DateAdd: _date_add_sql("ADD"), +126 exp.DateStrToDate: datestrtodate_sql, +127 exp.DateSub: _date_add_sql("SUB"), +128 exp.DateToDi: lambda self, e: f"CAST(TO_DATE({self.sql(e, 'this')}, {Drill.DATEINT_FORMAT}) AS INT)", +129 exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS VARCHAR), {Drill.DATEINT_FORMAT})", +130 exp.If: lambda self, e: f"`IF`({self.format_args(e.this, e.args.get('true'), e.args.get('false'))})", +131 exp.ILike: lambda self, e: f" {self.sql(e, 'this')} `ILIKE` {self.sql(e, 'expression')}", +132 exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"), +133 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", +134 exp.RegexpLike: rename_func("REGEXP_MATCHES"), +135 exp.StrPosition: str_position_sql, +136 exp.StrToDate: _str_to_date, +137 exp.Pow: rename_func("POW"), +138 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), +139 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", +140 exp.TimeStrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE)", +141 exp.TimeStrToTime: timestrtotime_sql, +142 exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"), +143 exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})", +144 exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"), +145 exp.ToChar: lambda self, e: self.function_fallback_sql(e), +146 exp.TryCast: no_trycast_sql, +147 exp.TsOrDsAdd: lambda self, e: f"DATE_ADD(CAST({self.sql(e, 'this')} AS DATE), {self.sql(exp.Interval(this=e.expression, unit=exp.var('DAY')))})", +148 exp.TsOrDsToDate: ts_or_ds_to_date_sql("drill"), +149 exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)", +150 } +151 +152 def normalize_func(self, name: str) -> str: +153 return name if exp.SAFE_IDENTIFIER_RE.match(name) else f"`{name}`"
    @@ -258,121 +248,122 @@
    -
     50class Drill(Dialect):
    - 51    normalize_functions = None
    - 52    null_ordering = "nulls_are_last"
    - 53    date_format = "'yyyy-MM-dd'"
    - 54    dateint_format = "'yyyyMMdd'"
    - 55    time_format = "'yyyy-MM-dd HH:mm:ss'"
    - 56
    - 57    time_mapping = {
    - 58        "y": "%Y",
    - 59        "Y": "%Y",
    - 60        "YYYY": "%Y",
    - 61        "yyyy": "%Y",
    - 62        "YY": "%y",
    - 63        "yy": "%y",
    - 64        "MMMM": "%B",
    - 65        "MMM": "%b",
    - 66        "MM": "%m",
    - 67        "M": "%-m",
    - 68        "dd": "%d",
    - 69        "d": "%-d",
    - 70        "HH": "%H",
    - 71        "H": "%-H",
    - 72        "hh": "%I",
    - 73        "h": "%-I",
    - 74        "mm": "%M",
    - 75        "m": "%-M",
    - 76        "ss": "%S",
    - 77        "s": "%-S",
    - 78        "SSSSSS": "%f",
    - 79        "a": "%p",
    - 80        "DD": "%j",
    - 81        "D": "%-j",
    - 82        "E": "%a",
    - 83        "EE": "%a",
    - 84        "EEE": "%a",
    - 85        "EEEE": "%A",
    - 86        "''T''": "T",
    - 87    }
    - 88
    - 89    class Tokenizer(tokens.Tokenizer):
    - 90        QUOTES = ["'"]
    - 91        IDENTIFIERS = ["`"]
    - 92        STRING_ESCAPES = ["\\"]
    - 93        ENCODE = "utf-8"
    +            
     39class Drill(Dialect):
    + 40    NORMALIZE_FUNCTIONS: bool | str = False
    + 41    NULL_ORDERING = "nulls_are_last"
    + 42    DATE_FORMAT = "'yyyy-MM-dd'"
    + 43    DATEINT_FORMAT = "'yyyyMMdd'"
    + 44    TIME_FORMAT = "'yyyy-MM-dd HH:mm:ss'"
    + 45
    + 46    TIME_MAPPING = {
    + 47        "y": "%Y",
    + 48        "Y": "%Y",
    + 49        "YYYY": "%Y",
    + 50        "yyyy": "%Y",
    + 51        "YY": "%y",
    + 52        "yy": "%y",
    + 53        "MMMM": "%B",
    + 54        "MMM": "%b",
    + 55        "MM": "%m",
    + 56        "M": "%-m",
    + 57        "dd": "%d",
    + 58        "d": "%-d",
    + 59        "HH": "%H",
    + 60        "H": "%-H",
    + 61        "hh": "%I",
    + 62        "h": "%-I",
    + 63        "mm": "%M",
    + 64        "m": "%-M",
    + 65        "ss": "%S",
    + 66        "s": "%-S",
    + 67        "SSSSSS": "%f",
    + 68        "a": "%p",
    + 69        "DD": "%j",
    + 70        "D": "%-j",
    + 71        "E": "%a",
    + 72        "EE": "%a",
    + 73        "EEE": "%a",
    + 74        "EEEE": "%A",
    + 75        "''T''": "T",
    + 76    }
    + 77
    + 78    class Tokenizer(tokens.Tokenizer):
    + 79        QUOTES = ["'"]
    + 80        IDENTIFIERS = ["`"]
    + 81        STRING_ESCAPES = ["\\"]
    + 82        ENCODE = "utf-8"
    + 83
    + 84    class Parser(parser.Parser):
    + 85        STRICT_CAST = False
    + 86        CONCAT_NULL_OUTPUTS_STRING = True
    + 87
    + 88        FUNCTIONS = {
    + 89            **parser.Parser.FUNCTIONS,
    + 90            "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "drill"),
    + 91            "TO_TIMESTAMP": exp.TimeStrToTime.from_arg_list,
    + 92            "TO_CHAR": format_time_lambda(exp.TimeToStr, "drill"),
    + 93        }
      94
    - 95    class Parser(parser.Parser):
    - 96        STRICT_CAST = False
    - 97
    - 98        FUNCTIONS = {
    - 99            **parser.Parser.FUNCTIONS,
    -100            "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "drill"),
    -101            "TO_TIMESTAMP": exp.TimeStrToTime.from_arg_list,
    -102            "TO_CHAR": format_time_lambda(exp.TimeToStr, "drill"),
    -103        }
    -104
    -105        LOG_DEFAULTS_TO_LN = True
    -106
    -107    class Generator(generator.Generator):
    -108        JOIN_HINTS = False
    -109        TABLE_HINTS = False
    -110
    -111        TYPE_MAPPING = {
    -112            **generator.Generator.TYPE_MAPPING,
    -113            exp.DataType.Type.INT: "INTEGER",
    -114            exp.DataType.Type.SMALLINT: "INTEGER",
    -115            exp.DataType.Type.TINYINT: "INTEGER",
    -116            exp.DataType.Type.BINARY: "VARBINARY",
    -117            exp.DataType.Type.TEXT: "VARCHAR",
    -118            exp.DataType.Type.NCHAR: "VARCHAR",
    -119            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
    -120            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
    -121            exp.DataType.Type.DATETIME: "TIMESTAMP",
    -122        }
    -123
    -124        PROPERTIES_LOCATION = {
    -125            **generator.Generator.PROPERTIES_LOCATION,
    -126            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
    -127            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    -128        }
    -129
    -130        TRANSFORMS = {
    -131            **generator.Generator.TRANSFORMS,
    -132            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
    -133            exp.ArrayContains: rename_func("REPEATED_CONTAINS"),
    -134            exp.ArraySize: rename_func("REPEATED_COUNT"),
    -135            exp.Create: create_with_partitions_sql,
    -136            exp.DateAdd: _date_add_sql("ADD"),
    -137            exp.DateStrToDate: datestrtodate_sql,
    -138            exp.DateSub: _date_add_sql("SUB"),
    -139            exp.DateToDi: lambda self, e: f"CAST(TO_DATE({self.sql(e, 'this')}, {Drill.dateint_format}) AS INT)",
    -140            exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS VARCHAR), {Drill.dateint_format})",
    -141            exp.If: lambda self, e: f"`IF`({self.format_args(e.this, e.args.get('true'), e.args.get('false'))})",
    -142            exp.ILike: lambda self, e: f" {self.sql(e, 'this')} `ILIKE` {self.sql(e, 'expression')}",
    -143            exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"),
    -144            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
    -145            exp.RegexpLike: rename_func("REGEXP_MATCHES"),
    -146            exp.StrPosition: str_position_sql,
    -147            exp.StrToDate: _str_to_date,
    -148            exp.Pow: rename_func("POW"),
    -149            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    -150            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
    -151            exp.TimeStrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE)",
    -152            exp.TimeStrToTime: timestrtotime_sql,
    -153            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
    -154            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
    -155            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
    -156            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    -157            exp.TryCast: no_trycast_sql,
    -158            exp.TsOrDsAdd: lambda self, e: f"DATE_ADD(CAST({self.sql(e, 'this')} AS DATE), {self.sql(exp.Interval(this=e.expression, unit=exp.Var(this='DAY')))})",
    -159            exp.TsOrDsToDate: ts_or_ds_to_date_sql("drill"),
    -160            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)",
    -161        }
    -162
    -163        def normalize_func(self, name: str) -> str:
    -164            return name if exp.SAFE_IDENTIFIER_RE.match(name) else f"`{name}`"
    + 95        LOG_DEFAULTS_TO_LN = True
    + 96
    + 97    class Generator(generator.Generator):
    + 98        JOIN_HINTS = False
    + 99        TABLE_HINTS = False
    +100
    +101        TYPE_MAPPING = {
    +102            **generator.Generator.TYPE_MAPPING,
    +103            exp.DataType.Type.INT: "INTEGER",
    +104            exp.DataType.Type.SMALLINT: "INTEGER",
    +105            exp.DataType.Type.TINYINT: "INTEGER",
    +106            exp.DataType.Type.BINARY: "VARBINARY",
    +107            exp.DataType.Type.TEXT: "VARCHAR",
    +108            exp.DataType.Type.NCHAR: "VARCHAR",
    +109            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
    +110            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
    +111            exp.DataType.Type.DATETIME: "TIMESTAMP",
    +112        }
    +113
    +114        PROPERTIES_LOCATION = {
    +115            **generator.Generator.PROPERTIES_LOCATION,
    +116            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
    +117            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +118        }
    +119
    +120        TRANSFORMS = {
    +121            **generator.Generator.TRANSFORMS,
    +122            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
    +123            exp.ArrayContains: rename_func("REPEATED_CONTAINS"),
    +124            exp.ArraySize: rename_func("REPEATED_COUNT"),
    +125            exp.Create: create_with_partitions_sql,
    +126            exp.DateAdd: _date_add_sql("ADD"),
    +127            exp.DateStrToDate: datestrtodate_sql,
    +128            exp.DateSub: _date_add_sql("SUB"),
    +129            exp.DateToDi: lambda self, e: f"CAST(TO_DATE({self.sql(e, 'this')}, {Drill.DATEINT_FORMAT}) AS INT)",
    +130            exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS VARCHAR), {Drill.DATEINT_FORMAT})",
    +131            exp.If: lambda self, e: f"`IF`({self.format_args(e.this, e.args.get('true'), e.args.get('false'))})",
    +132            exp.ILike: lambda self, e: f" {self.sql(e, 'this')} `ILIKE` {self.sql(e, 'expression')}",
    +133            exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"),
    +134            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
    +135            exp.RegexpLike: rename_func("REGEXP_MATCHES"),
    +136            exp.StrPosition: str_position_sql,
    +137            exp.StrToDate: _str_to_date,
    +138            exp.Pow: rename_func("POW"),
    +139            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    +140            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
    +141            exp.TimeStrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE)",
    +142            exp.TimeStrToTime: timestrtotime_sql,
    +143            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
    +144            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
    +145            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
    +146            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    +147            exp.TryCast: no_trycast_sql,
    +148            exp.TsOrDsAdd: lambda self, e: f"DATE_ADD(CAST({self.sql(e, 'this')} AS DATE), {self.sql(exp.Interval(this=e.expression, unit=exp.var('DAY')))})",
    +149            exp.TsOrDsToDate: ts_or_ds_to_date_sql("drill"),
    +150            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)",
    +151        }
    +152
    +153        def normalize_func(self, name: str) -> str:
    +154            return name if exp.SAFE_IDENTIFIER_RE.match(name) else f"`{name}`"
     
    @@ -407,11 +398,11 @@
    -
    89    class Tokenizer(tokens.Tokenizer):
    -90        QUOTES = ["'"]
    -91        IDENTIFIERS = ["`"]
    -92        STRING_ESCAPES = ["\\"]
    -93        ENCODE = "utf-8"
    +            
    78    class Tokenizer(tokens.Tokenizer):
    +79        QUOTES = ["'"]
    +80        IDENTIFIERS = ["`"]
    +81        STRING_ESCAPES = ["\\"]
    +82        ENCODE = "utf-8"
     
    @@ -423,6 +414,7 @@ @@ -439,41 +431,34 @@
    -
     95    class Parser(parser.Parser):
    - 96        STRICT_CAST = False
    - 97
    - 98        FUNCTIONS = {
    - 99            **parser.Parser.FUNCTIONS,
    -100            "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "drill"),
    -101            "TO_TIMESTAMP": exp.TimeStrToTime.from_arg_list,
    -102            "TO_CHAR": format_time_lambda(exp.TimeToStr, "drill"),
    -103        }
    -104
    -105        LOG_DEFAULTS_TO_LN = True
    +            
    84    class Parser(parser.Parser):
    +85        STRICT_CAST = False
    +86        CONCAT_NULL_OUTPUTS_STRING = True
    +87
    +88        FUNCTIONS = {
    +89            **parser.Parser.FUNCTIONS,
    +90            "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "drill"),
    +91            "TO_TIMESTAMP": exp.TimeStrToTime.from_arg_list,
    +92            "TO_CHAR": format_time_lambda(exp.TimeToStr, "drill"),
    +93        }
    +94
    +95        LOG_DEFAULTS_TO_LN = True
     
    -

    Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces -a parsed syntax tree.

    +

    Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

    Arguments:
      -
    • error_level: the desired error level. +
    • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
    • -
    • error_message_context: determines the amount of context to capture from a +
    • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). -Default: 50.
    • -
    • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. -Default: 0
    • -
    • alias_post_tablesample: If the table alias comes after tablesample. -Default: False
    • +Default: 100
    • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
    • -
    • null_ordering: Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    @@ -506,110 +491,95 @@ Default: "nulls_are_small"
    -
    107    class Generator(generator.Generator):
    -108        JOIN_HINTS = False
    -109        TABLE_HINTS = False
    -110
    -111        TYPE_MAPPING = {
    -112            **generator.Generator.TYPE_MAPPING,
    -113            exp.DataType.Type.INT: "INTEGER",
    -114            exp.DataType.Type.SMALLINT: "INTEGER",
    -115            exp.DataType.Type.TINYINT: "INTEGER",
    -116            exp.DataType.Type.BINARY: "VARBINARY",
    -117            exp.DataType.Type.TEXT: "VARCHAR",
    -118            exp.DataType.Type.NCHAR: "VARCHAR",
    -119            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
    -120            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
    -121            exp.DataType.Type.DATETIME: "TIMESTAMP",
    -122        }
    -123
    -124        PROPERTIES_LOCATION = {
    -125            **generator.Generator.PROPERTIES_LOCATION,
    -126            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
    -127            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    -128        }
    -129
    -130        TRANSFORMS = {
    -131            **generator.Generator.TRANSFORMS,
    -132            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
    -133            exp.ArrayContains: rename_func("REPEATED_CONTAINS"),
    -134            exp.ArraySize: rename_func("REPEATED_COUNT"),
    -135            exp.Create: create_with_partitions_sql,
    -136            exp.DateAdd: _date_add_sql("ADD"),
    -137            exp.DateStrToDate: datestrtodate_sql,
    -138            exp.DateSub: _date_add_sql("SUB"),
    -139            exp.DateToDi: lambda self, e: f"CAST(TO_DATE({self.sql(e, 'this')}, {Drill.dateint_format}) AS INT)",
    -140            exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS VARCHAR), {Drill.dateint_format})",
    -141            exp.If: lambda self, e: f"`IF`({self.format_args(e.this, e.args.get('true'), e.args.get('false'))})",
    -142            exp.ILike: lambda self, e: f" {self.sql(e, 'this')} `ILIKE` {self.sql(e, 'expression')}",
    -143            exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"),
    -144            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
    -145            exp.RegexpLike: rename_func("REGEXP_MATCHES"),
    -146            exp.StrPosition: str_position_sql,
    -147            exp.StrToDate: _str_to_date,
    -148            exp.Pow: rename_func("POW"),
    -149            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    -150            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
    -151            exp.TimeStrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE)",
    -152            exp.TimeStrToTime: timestrtotime_sql,
    -153            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
    -154            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
    -155            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
    -156            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    -157            exp.TryCast: no_trycast_sql,
    -158            exp.TsOrDsAdd: lambda self, e: f"DATE_ADD(CAST({self.sql(e, 'this')} AS DATE), {self.sql(exp.Interval(this=e.expression, unit=exp.Var(this='DAY')))})",
    -159            exp.TsOrDsToDate: ts_or_ds_to_date_sql("drill"),
    -160            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)",
    -161        }
    -162
    -163        def normalize_func(self, name: str) -> str:
    -164            return name if exp.SAFE_IDENTIFIER_RE.match(name) else f"`{name}`"
    +            
     97    class Generator(generator.Generator):
    + 98        JOIN_HINTS = False
    + 99        TABLE_HINTS = False
    +100
    +101        TYPE_MAPPING = {
    +102            **generator.Generator.TYPE_MAPPING,
    +103            exp.DataType.Type.INT: "INTEGER",
    +104            exp.DataType.Type.SMALLINT: "INTEGER",
    +105            exp.DataType.Type.TINYINT: "INTEGER",
    +106            exp.DataType.Type.BINARY: "VARBINARY",
    +107            exp.DataType.Type.TEXT: "VARCHAR",
    +108            exp.DataType.Type.NCHAR: "VARCHAR",
    +109            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
    +110            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
    +111            exp.DataType.Type.DATETIME: "TIMESTAMP",
    +112        }
    +113
    +114        PROPERTIES_LOCATION = {
    +115            **generator.Generator.PROPERTIES_LOCATION,
    +116            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
    +117            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +118        }
    +119
    +120        TRANSFORMS = {
    +121            **generator.Generator.TRANSFORMS,
    +122            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
    +123            exp.ArrayContains: rename_func("REPEATED_CONTAINS"),
    +124            exp.ArraySize: rename_func("REPEATED_COUNT"),
    +125            exp.Create: create_with_partitions_sql,
    +126            exp.DateAdd: _date_add_sql("ADD"),
    +127            exp.DateStrToDate: datestrtodate_sql,
    +128            exp.DateSub: _date_add_sql("SUB"),
    +129            exp.DateToDi: lambda self, e: f"CAST(TO_DATE({self.sql(e, 'this')}, {Drill.DATEINT_FORMAT}) AS INT)",
    +130            exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS VARCHAR), {Drill.DATEINT_FORMAT})",
    +131            exp.If: lambda self, e: f"`IF`({self.format_args(e.this, e.args.get('true'), e.args.get('false'))})",
    +132            exp.ILike: lambda self, e: f" {self.sql(e, 'this')} `ILIKE` {self.sql(e, 'expression')}",
    +133            exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"),
    +134            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
    +135            exp.RegexpLike: rename_func("REGEXP_MATCHES"),
    +136            exp.StrPosition: str_position_sql,
    +137            exp.StrToDate: _str_to_date,
    +138            exp.Pow: rename_func("POW"),
    +139            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    +140            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
    +141            exp.TimeStrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE)",
    +142            exp.TimeStrToTime: timestrtotime_sql,
    +143            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
    +144            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
    +145            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
    +146            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    +147            exp.TryCast: no_trycast_sql,
    +148            exp.TsOrDsAdd: lambda self, e: f"DATE_ADD(CAST({self.sql(e, 'this')} AS DATE), {self.sql(exp.Interval(this=e.expression, unit=exp.var('DAY')))})",
    +149            exp.TsOrDsToDate: ts_or_ds_to_date_sql("drill"),
    +150            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)",
    +151        }
    +152
    +153        def normalize_func(self, name: str) -> str:
    +154            return name if exp.SAFE_IDENTIFIER_RE.match(name) else f"`{name}`"
     
    -

    Generator interprets the given syntax tree and produces a SQL string as an output.

    +

    Generator converts a given syntax tree to the corresponding SQL string.

    Arguments:
      -
    • time_mapping (dict): the dictionary of custom time mappings in which the key -represents a python time format and the output the target time format
    • -
    • time_trie (trie): a trie of the time_mapping keys
    • -
    • pretty (bool): if set to True the returned string will be formatted. Default: False.
    • -
    • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
    • -
    • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
    • -
    • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
    • -
    • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
    • -
    • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
    • -
    • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
    • -
    • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
    • -
    • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
    • -
    • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
    • -
    • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
    • -
    • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
    • -
    • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
    • -
    • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
    • -
    • normalize (bool): if set to True all identifiers will lower cased
    • -
    • string_escape (str): specifies a string escape character. Default: '.
    • -
    • identifier_escape (str): specifies an identifier escape character. Default: ".
    • -
    • pad (int): determines padding in a formatted string. Default: 2.
    • -
    • indent (int): determines the size of indentation in a formatted string. Default: 4.
    • -
    • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
    • -
    • normalize_functions (str): normalize function names, "upper", "lower", or None -Default: "upper"
    • -
    • alias_post_tablesample (bool): if the table alias comes after tablesample -Default: False
    • -
    • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit -Default: False
    • -
    • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters -unsupported expressions. Default ErrorLevel.WARN.
    • -
    • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    • -
    • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +
    • pretty: Whether or not to format the produced SQL string. +Default: False.
    • +
    • identify: Determines when an identifier should be quoted. Possible values are: +False (default): Never quote, except in cases where it's mandatory by the dialect. +True or 'always': Always quote. +'safe': Only quote identifiers that are case insensitive.
    • +
    • normalize: Whether or not to normalize identifiers to lowercase. +Default: False.
    • +
    • pad: Determines the pad size in a formatted string. +Default: 2.
    • +
    • indent: Determines the indentation size in a formatted string. +Default: 2.
    • +
    • normalize_functions: Whether or not to normalize all function names. Possible values are: +"upper" or True (default): Convert names to uppercase. +"lower": Convert names to lowercase. +False: Disables function name normalization.
    • +
    • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. +Default ErrorLevel.WARN.
    • +
    • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
    • -
    • leading_comma (bool): if the the comma is leading or trailing in select statements +
    • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. +This is only relevant when generating in pretty mode. Default: False
    • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true @@ -632,8 +602,8 @@ Default: True
    -
    163        def normalize_func(self, name: str) -> str:
    -164            return name if exp.SAFE_IDENTIFIER_RE.match(name) else f"`{name}`"
    +            
    153        def normalize_func(self, name: str) -> str:
    +154            return name if exp.SAFE_IDENTIFIER_RE.match(name) else f"`{name}`"
     
    @@ -668,6 +638,7 @@ Default: True
    notnullcolumnconstraint_sql
    primarykeycolumnconstraint_sql
    uniquecolumnconstraint_sql
    +
    createable_sql
    create_sql
    clone_sql
    describe_sql
    @@ -750,10 +721,12 @@ Default: True
    ordered_sql
    matchrecognize_sql
    query_modifiers
    +
    offset_limit_modifiers
    after_having_modifiers
    after_limit_modifiers
    select_sql
    schema_sql
    +
    schema_columns_sql
    star_sql
    parameter_sql
    sessionparameter_sql
    @@ -778,7 +751,7 @@ Default: True
    nextvaluefor_sql
    extract_sql
    trim_sql
    -
    concat_sql
    +
    safeconcat_sql
    check_sql
    foreignkey_sql
    primarykey_sql
    @@ -829,6 +802,7 @@ Default: True
    respectnulls_sql
    intdiv_sql
    dpipe_sql
    +
    safedpipe_sql
    div_sql
    overlaps_sql
    distance_sql
    @@ -877,6 +851,7 @@ Default: True
    dictproperty_sql
    dictrange_sql
    dictsubproperty_sql
    +
    oncluster_sql
    diff --git a/docs/sqlglot/dialects/duckdb.html b/docs/sqlglot/dialects/duckdb.html index 0d39ff9..62dfecc 100644 --- a/docs/sqlglot/dialects/duckdb.html +++ b/docs/sqlglot/dialects/duckdb.html @@ -48,6 +48,9 @@
  • DuckDB.Generator
      +
    • + interval_sql +
    • tablesample_sql
    • @@ -138,201 +141,212 @@ 56 57 58def _parse_date_diff(args: t.List) -> exp.Expression: - 59 return exp.DateDiff( - 60 this=seq_get(args, 2), - 61 expression=seq_get(args, 1), - 62 unit=seq_get(args, 0), - 63 ) - 64 - 65 - 66def _struct_sql(self: generator.Generator, expression: exp.Struct) -> str: - 67 args = [ - 68 f"'{e.name or e.this.name}': {self.sql(e, 'expression')}" for e in expression.expressions - 69 ] - 70 return f"{{{', '.join(args)}}}" - 71 - 72 - 73def _datatype_sql(self: generator.Generator, expression: exp.DataType) -> str: - 74 if expression.is_type("array"): - 75 return f"{self.expressions(expression, flat=True)}[]" - 76 return self.datatype_sql(expression) - 77 - 78 - 79def _regexp_extract_sql(self: generator.Generator, expression: exp.RegexpExtract) -> str: - 80 bad_args = list(filter(expression.args.get, ("position", "occurrence"))) - 81 if bad_args: - 82 self.unsupported(f"REGEXP_EXTRACT does not support arg(s) {bad_args}") - 83 - 84 return self.func( - 85 "REGEXP_EXTRACT", - 86 expression.args.get("this"), - 87 expression.args.get("expression"), - 88 expression.args.get("group"), - 89 ) + 59 return exp.DateDiff(this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0)) + 60 + 61 + 62def _struct_sql(self: generator.Generator, expression: exp.Struct) -> str: + 63 args = [ + 64 f"'{e.name or e.this.name}': {self.sql(e, 'expression')}" for e in expression.expressions + 65 ] + 66 return f"{{{', '.join(args)}}}" + 67 + 68 + 69def _datatype_sql(self: generator.Generator, expression: exp.DataType) -> str: + 70 if expression.is_type("array"): + 71 return f"{self.expressions(expression, flat=True)}[]" + 72 return self.datatype_sql(expression) + 73 + 74 + 75def _regexp_extract_sql(self: generator.Generator, expression: exp.RegexpExtract) -> str: + 76 bad_args = list(filter(expression.args.get, ("position", "occurrence"))) + 77 if bad_args: + 78 self.unsupported(f"REGEXP_EXTRACT does not support arg(s) {bad_args}") + 79 + 80 return self.func( + 81 "REGEXP_EXTRACT", + 82 expression.args.get("this"), + 83 expression.args.get("expression"), + 84 expression.args.get("group"), + 85 ) + 86 + 87 + 88class DuckDB(Dialect): + 89 NULL_ORDERING = "nulls_are_last" 90 - 91 - 92class DuckDB(Dialect): - 93 null_ordering = "nulls_are_last" - 94 - 95 class Tokenizer(tokens.Tokenizer): - 96 KEYWORDS = { - 97 **tokens.Tokenizer.KEYWORDS, - 98 "~": TokenType.RLIKE, - 99 ":=": TokenType.EQ, -100 "//": TokenType.DIV, -101 "ATTACH": TokenType.COMMAND, -102 "BINARY": TokenType.VARBINARY, -103 "BPCHAR": TokenType.TEXT, -104 "BITSTRING": TokenType.BIT, -105 "CHAR": TokenType.TEXT, -106 "CHARACTER VARYING": TokenType.TEXT, -107 "EXCLUDE": TokenType.EXCEPT, -108 "INT1": TokenType.TINYINT, -109 "LOGICAL": TokenType.BOOLEAN, -110 "NUMERIC": TokenType.DOUBLE, -111 "PIVOT_WIDER": TokenType.PIVOT, -112 "SIGNED": TokenType.INT, -113 "STRING": TokenType.VARCHAR, -114 "UBIGINT": TokenType.UBIGINT, -115 "UINTEGER": TokenType.UINT, -116 "USMALLINT": TokenType.USMALLINT, -117 "UTINYINT": TokenType.UTINYINT, -118 } -119 -120 class Parser(parser.Parser): -121 FUNCTIONS = { -122 **parser.Parser.FUNCTIONS, -123 "ARRAY_LENGTH": exp.ArraySize.from_arg_list, -124 "ARRAY_SORT": exp.SortArray.from_arg_list, -125 "ARRAY_REVERSE_SORT": _sort_array_reverse, -126 "DATEDIFF": _parse_date_diff, -127 "DATE_DIFF": _parse_date_diff, -128 "EPOCH": exp.TimeToUnix.from_arg_list, -129 "EPOCH_MS": lambda args: exp.UnixToTime( -130 this=exp.Div( -131 this=seq_get(args, 0), -132 expression=exp.Literal.number(1000), -133 ) -134 ), -135 "LIST_REVERSE_SORT": _sort_array_reverse, -136 "LIST_SORT": exp.SortArray.from_arg_list, -137 "LIST_VALUE": exp.Array.from_arg_list, -138 "REGEXP_MATCHES": exp.RegexpLike.from_arg_list, -139 "STRFTIME": format_time_lambda(exp.TimeToStr, "duckdb"), -140 "STRING_SPLIT": exp.Split.from_arg_list, -141 "STRING_SPLIT_REGEX": exp.RegexpSplit.from_arg_list, -142 "STRING_TO_ARRAY": exp.Split.from_arg_list, -143 "STRPTIME": format_time_lambda(exp.StrToTime, "duckdb"), -144 "STRUCT_PACK": exp.Struct.from_arg_list, -145 "STR_SPLIT": exp.Split.from_arg_list, -146 "STR_SPLIT_REGEX": exp.RegexpSplit.from_arg_list, -147 "TO_TIMESTAMP": exp.UnixToTime.from_arg_list, -148 "UNNEST": exp.Explode.from_arg_list, -149 } -150 -151 TYPE_TOKENS = { -152 *parser.Parser.TYPE_TOKENS, -153 TokenType.UBIGINT, -154 TokenType.UINT, -155 TokenType.USMALLINT, -156 TokenType.UTINYINT, -157 } + 91 class Tokenizer(tokens.Tokenizer): + 92 KEYWORDS = { + 93 **tokens.Tokenizer.KEYWORDS, + 94 "~": TokenType.RLIKE, + 95 ":=": TokenType.EQ, + 96 "//": TokenType.DIV, + 97 "ATTACH": TokenType.COMMAND, + 98 "BINARY": TokenType.VARBINARY, + 99 "BPCHAR": TokenType.TEXT, +100 "BITSTRING": TokenType.BIT, +101 "CHAR": TokenType.TEXT, +102 "CHARACTER VARYING": TokenType.TEXT, +103 "EXCLUDE": TokenType.EXCEPT, +104 "INT1": TokenType.TINYINT, +105 "LOGICAL": TokenType.BOOLEAN, +106 "NUMERIC": TokenType.DOUBLE, +107 "PIVOT_WIDER": TokenType.PIVOT, +108 "SIGNED": TokenType.INT, +109 "STRING": TokenType.VARCHAR, +110 "UBIGINT": TokenType.UBIGINT, +111 "UINTEGER": TokenType.UINT, +112 "USMALLINT": TokenType.USMALLINT, +113 "UTINYINT": TokenType.UTINYINT, +114 } +115 +116 class Parser(parser.Parser): +117 CONCAT_NULL_OUTPUTS_STRING = True +118 +119 FUNCTIONS = { +120 **parser.Parser.FUNCTIONS, +121 "ARRAY_LENGTH": exp.ArraySize.from_arg_list, +122 "ARRAY_SORT": exp.SortArray.from_arg_list, +123 "ARRAY_REVERSE_SORT": _sort_array_reverse, +124 "DATEDIFF": _parse_date_diff, +125 "DATE_DIFF": _parse_date_diff, +126 "EPOCH": exp.TimeToUnix.from_arg_list, +127 "EPOCH_MS": lambda args: exp.UnixToTime( +128 this=exp.Div(this=seq_get(args, 0), expression=exp.Literal.number(1000)) +129 ), +130 "LIST_REVERSE_SORT": _sort_array_reverse, +131 "LIST_SORT": exp.SortArray.from_arg_list, +132 "LIST_VALUE": exp.Array.from_arg_list, +133 "REGEXP_MATCHES": exp.RegexpLike.from_arg_list, +134 "STRFTIME": format_time_lambda(exp.TimeToStr, "duckdb"), +135 "STRING_SPLIT": exp.Split.from_arg_list, +136 "STRING_SPLIT_REGEX": exp.RegexpSplit.from_arg_list, +137 "STRING_TO_ARRAY": exp.Split.from_arg_list, +138 "STRPTIME": format_time_lambda(exp.StrToTime, "duckdb"), +139 "STRUCT_PACK": exp.Struct.from_arg_list, +140 "STR_SPLIT": exp.Split.from_arg_list, +141 "STR_SPLIT_REGEX": exp.RegexpSplit.from_arg_list, +142 "TO_TIMESTAMP": exp.UnixToTime.from_arg_list, +143 "UNNEST": exp.Explode.from_arg_list, +144 } +145 +146 TYPE_TOKENS = { +147 *parser.Parser.TYPE_TOKENS, +148 TokenType.UBIGINT, +149 TokenType.UINT, +150 TokenType.USMALLINT, +151 TokenType.UTINYINT, +152 } +153 +154 def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]: +155 if len(aggregations) == 1: +156 return super()._pivot_column_names(aggregations) +157 return pivot_column_names(aggregations, dialect="duckdb") 158 -159 def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]: -160 if len(aggregations) == 1: -161 return super()._pivot_column_names(aggregations) -162 return pivot_column_names(aggregations, dialect="duckdb") -163 -164 class Generator(generator.Generator): -165 JOIN_HINTS = False -166 TABLE_HINTS = False -167 LIMIT_FETCH = "LIMIT" -168 STRUCT_DELIMITER = ("(", ")") -169 RENAME_TABLE_WITH_DB = False -170 -171 TRANSFORMS = { -172 **generator.Generator.TRANSFORMS, -173 exp.ApproxDistinct: approx_count_distinct_sql, -174 exp.Array: lambda self, e: self.func("ARRAY", e.expressions[0]) -175 if isinstance(seq_get(e.expressions, 0), exp.Select) -176 else rename_func("LIST_VALUE")(self, e), -177 exp.ArraySize: rename_func("ARRAY_LENGTH"), -178 exp.ArraySort: _array_sort_sql, -179 exp.ArraySum: rename_func("LIST_SUM"), -180 exp.CommentColumnConstraint: no_comment_column_constraint_sql, -181 exp.CurrentDate: lambda self, e: "CURRENT_DATE", -182 exp.CurrentTime: lambda self, e: "CURRENT_TIME", -183 exp.CurrentTimestamp: lambda self, e: "CURRENT_TIMESTAMP", -184 exp.DayOfMonth: rename_func("DAYOFMONTH"), -185 exp.DayOfWeek: rename_func("DAYOFWEEK"), -186 exp.DayOfYear: rename_func("DAYOFYEAR"), -187 exp.DataType: _datatype_sql, -188 exp.DateAdd: _date_delta_sql, -189 exp.DateSub: _date_delta_sql, -190 exp.DateDiff: lambda self, e: self.func( -191 "DATE_DIFF", f"'{e.args.get('unit', 'day')}'", e.expression, e.this -192 ), -193 exp.DateStrToDate: datestrtodate_sql, -194 exp.DateToDi: lambda self, e: f"CAST(STRFTIME({self.sql(e, 'this')}, {DuckDB.dateint_format}) AS INT)", -195 exp.DiToDate: lambda self, e: f"CAST(STRPTIME(CAST({self.sql(e, 'this')} AS TEXT), {DuckDB.dateint_format}) AS DATE)", -196 exp.Explode: rename_func("UNNEST"), -197 exp.IntDiv: lambda self, e: self.binary(e, "//"), -198 exp.JSONExtract: arrow_json_extract_sql, -199 exp.JSONExtractScalar: arrow_json_extract_scalar_sql, -200 exp.JSONBExtract: arrow_json_extract_sql, -201 exp.JSONBExtractScalar: arrow_json_extract_scalar_sql, -202 exp.LogicalOr: rename_func("BOOL_OR"), -203 exp.LogicalAnd: rename_func("BOOL_AND"), -204 exp.Properties: no_properties_sql, -205 exp.RegexpExtract: _regexp_extract_sql, -206 exp.RegexpLike: rename_func("REGEXP_MATCHES"), -207 exp.RegexpSplit: rename_func("STR_SPLIT_REGEX"), -208 exp.SafeDivide: no_safe_divide_sql, -209 exp.Split: rename_func("STR_SPLIT"), -210 exp.SortArray: _sort_array_sql, -211 exp.StrPosition: str_position_sql, -212 exp.StrToDate: lambda self, e: f"CAST({str_to_time_sql(self, e)} AS DATE)", -213 exp.StrToTime: str_to_time_sql, -214 exp.StrToUnix: lambda self, e: f"EPOCH(STRPTIME({self.sql(e, 'this')}, {self.format_time(e)}))", -215 exp.Struct: _struct_sql, -216 exp.TimestampTrunc: timestamptrunc_sql, -217 exp.TimeStrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE)", -218 exp.TimeStrToTime: timestrtotime_sql, -219 exp.TimeStrToUnix: lambda self, e: f"EPOCH(CAST({self.sql(e, 'this')} AS TIMESTAMP))", -220 exp.TimeToStr: lambda self, e: f"STRFTIME({self.sql(e, 'this')}, {self.format_time(e)})", -221 exp.TimeToUnix: rename_func("EPOCH"), -222 exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS TEXT), '-', ''), 1, 8) AS INT)", -223 exp.TsOrDsAdd: _ts_or_ds_add_sql, -224 exp.TsOrDsToDate: ts_or_ds_to_date_sql("duckdb"), -225 exp.UnixToStr: lambda self, e: f"STRFTIME(TO_TIMESTAMP({self.sql(e, 'this')}), {self.format_time(e)})", -226 exp.UnixToTime: rename_func("TO_TIMESTAMP"), -227 exp.UnixToTimeStr: lambda self, e: f"CAST(TO_TIMESTAMP({self.sql(e, 'this')}) AS TEXT)", -228 exp.WeekOfYear: rename_func("WEEKOFYEAR"), -229 } -230 -231 TYPE_MAPPING = { -232 **generator.Generator.TYPE_MAPPING, -233 exp.DataType.Type.BINARY: "BLOB", -234 exp.DataType.Type.CHAR: "TEXT", -235 exp.DataType.Type.FLOAT: "REAL", -236 exp.DataType.Type.NCHAR: "TEXT", -237 exp.DataType.Type.NVARCHAR: "TEXT", -238 exp.DataType.Type.UINT: "UINTEGER", -239 exp.DataType.Type.VARBINARY: "BLOB", -240 exp.DataType.Type.VARCHAR: "TEXT", -241 } -242 -243 STAR_MAPPING = {**generator.Generator.STAR_MAPPING, "except": "EXCLUDE"} -244 -245 PROPERTIES_LOCATION = { -246 **generator.Generator.PROPERTIES_LOCATION, -247 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, -248 } -249 -250 def tablesample_sql( -251 self, expression: exp.TableSample, seed_prefix: str = "SEED", sep: str = " AS " -252 ) -> str: -253 return super().tablesample_sql(expression, seed_prefix="REPEATABLE", sep=sep) +159 class Generator(generator.Generator): +160 JOIN_HINTS = False +161 TABLE_HINTS = False +162 LIMIT_FETCH = "LIMIT" +163 STRUCT_DELIMITER = ("(", ")") +164 RENAME_TABLE_WITH_DB = False +165 +166 TRANSFORMS = { +167 **generator.Generator.TRANSFORMS, +168 exp.ApproxDistinct: approx_count_distinct_sql, +169 exp.Array: lambda self, e: self.func("ARRAY", e.expressions[0]) +170 if isinstance(seq_get(e.expressions, 0), exp.Select) +171 else rename_func("LIST_VALUE")(self, e), +172 exp.ArraySize: rename_func("ARRAY_LENGTH"), +173 exp.ArraySort: _array_sort_sql, +174 exp.ArraySum: rename_func("LIST_SUM"), +175 exp.CommentColumnConstraint: no_comment_column_constraint_sql, +176 exp.CurrentDate: lambda self, e: "CURRENT_DATE", +177 exp.CurrentTime: lambda self, e: "CURRENT_TIME", +178 exp.CurrentTimestamp: lambda self, e: "CURRENT_TIMESTAMP", +179 exp.DayOfMonth: rename_func("DAYOFMONTH"), +180 exp.DayOfWeek: rename_func("DAYOFWEEK"), +181 exp.DayOfYear: rename_func("DAYOFYEAR"), +182 exp.DataType: _datatype_sql, +183 exp.DateAdd: _date_delta_sql, +184 exp.DateSub: _date_delta_sql, +185 exp.DateDiff: lambda self, e: self.func( +186 "DATE_DIFF", f"'{e.args.get('unit', 'day')}'", e.expression, e.this +187 ), +188 exp.DateStrToDate: datestrtodate_sql, +189 exp.DateToDi: lambda self, e: f"CAST(STRFTIME({self.sql(e, 'this')}, {DuckDB.DATEINT_FORMAT}) AS INT)", +190 exp.DiToDate: lambda self, e: f"CAST(STRPTIME(CAST({self.sql(e, 'this')} AS TEXT), {DuckDB.DATEINT_FORMAT}) AS DATE)", +191 exp.Explode: rename_func("UNNEST"), +192 exp.IntDiv: lambda self, e: self.binary(e, "//"), +193 exp.JSONExtract: arrow_json_extract_sql, +194 exp.JSONExtractScalar: arrow_json_extract_scalar_sql, +195 exp.JSONBExtract: arrow_json_extract_sql, +196 exp.JSONBExtractScalar: arrow_json_extract_scalar_sql, +197 exp.LogicalOr: rename_func("BOOL_OR"), +198 exp.LogicalAnd: rename_func("BOOL_AND"), +199 exp.Properties: no_properties_sql, +200 exp.RegexpExtract: _regexp_extract_sql, +201 exp.RegexpLike: rename_func("REGEXP_MATCHES"), +202 exp.RegexpSplit: rename_func("STR_SPLIT_REGEX"), +203 exp.SafeDivide: no_safe_divide_sql, +204 exp.Split: rename_func("STR_SPLIT"), +205 exp.SortArray: _sort_array_sql, +206 exp.StrPosition: str_position_sql, +207 exp.StrToDate: lambda self, e: f"CAST({str_to_time_sql(self, e)} AS DATE)", +208 exp.StrToTime: str_to_time_sql, +209 exp.StrToUnix: lambda self, e: f"EPOCH(STRPTIME({self.sql(e, 'this')}, {self.format_time(e)}))", +210 exp.Struct: _struct_sql, +211 exp.TimestampTrunc: timestamptrunc_sql, +212 exp.TimeStrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE)", +213 exp.TimeStrToTime: timestrtotime_sql, +214 exp.TimeStrToUnix: lambda self, e: f"EPOCH(CAST({self.sql(e, 'this')} AS TIMESTAMP))", +215 exp.TimeToStr: lambda self, e: f"STRFTIME({self.sql(e, 'this')}, {self.format_time(e)})", +216 exp.TimeToUnix: rename_func("EPOCH"), +217 exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS TEXT), '-', ''), 1, 8) AS INT)", +218 exp.TsOrDsAdd: _ts_or_ds_add_sql, +219 exp.TsOrDsToDate: ts_or_ds_to_date_sql("duckdb"), +220 exp.UnixToStr: lambda self, e: f"STRFTIME(TO_TIMESTAMP({self.sql(e, 'this')}), {self.format_time(e)})", +221 exp.UnixToTime: rename_func("TO_TIMESTAMP"), +222 exp.UnixToTimeStr: lambda self, e: f"CAST(TO_TIMESTAMP({self.sql(e, 'this')}) AS TEXT)", +223 exp.WeekOfYear: rename_func("WEEKOFYEAR"), +224 } +225 +226 TYPE_MAPPING = { +227 **generator.Generator.TYPE_MAPPING, +228 exp.DataType.Type.BINARY: "BLOB", +229 exp.DataType.Type.CHAR: "TEXT", +230 exp.DataType.Type.FLOAT: "REAL", +231 exp.DataType.Type.NCHAR: "TEXT", +232 exp.DataType.Type.NVARCHAR: "TEXT", +233 exp.DataType.Type.UINT: "UINTEGER", +234 exp.DataType.Type.VARBINARY: "BLOB", +235 exp.DataType.Type.VARCHAR: "TEXT", +236 } +237 +238 STAR_MAPPING = {**generator.Generator.STAR_MAPPING, "except": "EXCLUDE"} +239 +240 UNWRAPPED_INTERVAL_VALUES = (exp.Column, exp.Literal, exp.Paren) +241 +242 PROPERTIES_LOCATION = { +243 **generator.Generator.PROPERTIES_LOCATION, +244 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, +245 } +246 +247 def interval_sql(self, expression: exp.Interval) -> str: +248 multiplier: t.Optional[int] = None +249 unit = expression.text("unit").lower() +250 +251 if unit.startswith("week"): +252 multiplier = 7 +253 if unit.startswith("quarter"): +254 multiplier = 90 +255 +256 if multiplier: +257 return f"({multiplier} * {super().interval_sql(exp.Interval(this=expression.this, unit=exp.var('day')))})" +258 +259 return super().interval_sql(expression) +260 +261 def tablesample_sql( +262 self, expression: exp.TableSample, seed_prefix: str = "SEED", sep: str = " AS " +263 ) -> str: +264 return super().tablesample_sql(expression, seed_prefix="REPEATABLE", sep=sep)
  • @@ -348,168 +362,183 @@
    -
     93class DuckDB(Dialect):
    - 94    null_ordering = "nulls_are_last"
    - 95
    - 96    class Tokenizer(tokens.Tokenizer):
    - 97        KEYWORDS = {
    - 98            **tokens.Tokenizer.KEYWORDS,
    - 99            "~": TokenType.RLIKE,
    -100            ":=": TokenType.EQ,
    -101            "//": TokenType.DIV,
    -102            "ATTACH": TokenType.COMMAND,
    -103            "BINARY": TokenType.VARBINARY,
    -104            "BPCHAR": TokenType.TEXT,
    -105            "BITSTRING": TokenType.BIT,
    -106            "CHAR": TokenType.TEXT,
    -107            "CHARACTER VARYING": TokenType.TEXT,
    -108            "EXCLUDE": TokenType.EXCEPT,
    -109            "INT1": TokenType.TINYINT,
    -110            "LOGICAL": TokenType.BOOLEAN,
    -111            "NUMERIC": TokenType.DOUBLE,
    -112            "PIVOT_WIDER": TokenType.PIVOT,
    -113            "SIGNED": TokenType.INT,
    -114            "STRING": TokenType.VARCHAR,
    -115            "UBIGINT": TokenType.UBIGINT,
    -116            "UINTEGER": TokenType.UINT,
    -117            "USMALLINT": TokenType.USMALLINT,
    -118            "UTINYINT": TokenType.UTINYINT,
    -119        }
    -120
    -121    class Parser(parser.Parser):
    -122        FUNCTIONS = {
    -123            **parser.Parser.FUNCTIONS,
    -124            "ARRAY_LENGTH": exp.ArraySize.from_arg_list,
    -125            "ARRAY_SORT": exp.SortArray.from_arg_list,
    -126            "ARRAY_REVERSE_SORT": _sort_array_reverse,
    -127            "DATEDIFF": _parse_date_diff,
    -128            "DATE_DIFF": _parse_date_diff,
    -129            "EPOCH": exp.TimeToUnix.from_arg_list,
    -130            "EPOCH_MS": lambda args: exp.UnixToTime(
    -131                this=exp.Div(
    -132                    this=seq_get(args, 0),
    -133                    expression=exp.Literal.number(1000),
    -134                )
    -135            ),
    -136            "LIST_REVERSE_SORT": _sort_array_reverse,
    -137            "LIST_SORT": exp.SortArray.from_arg_list,
    -138            "LIST_VALUE": exp.Array.from_arg_list,
    -139            "REGEXP_MATCHES": exp.RegexpLike.from_arg_list,
    -140            "STRFTIME": format_time_lambda(exp.TimeToStr, "duckdb"),
    -141            "STRING_SPLIT": exp.Split.from_arg_list,
    -142            "STRING_SPLIT_REGEX": exp.RegexpSplit.from_arg_list,
    -143            "STRING_TO_ARRAY": exp.Split.from_arg_list,
    -144            "STRPTIME": format_time_lambda(exp.StrToTime, "duckdb"),
    -145            "STRUCT_PACK": exp.Struct.from_arg_list,
    -146            "STR_SPLIT": exp.Split.from_arg_list,
    -147            "STR_SPLIT_REGEX": exp.RegexpSplit.from_arg_list,
    -148            "TO_TIMESTAMP": exp.UnixToTime.from_arg_list,
    -149            "UNNEST": exp.Explode.from_arg_list,
    -150        }
    -151
    -152        TYPE_TOKENS = {
    -153            *parser.Parser.TYPE_TOKENS,
    -154            TokenType.UBIGINT,
    -155            TokenType.UINT,
    -156            TokenType.USMALLINT,
    -157            TokenType.UTINYINT,
    -158        }
    +            
     89class DuckDB(Dialect):
    + 90    NULL_ORDERING = "nulls_are_last"
    + 91
    + 92    class Tokenizer(tokens.Tokenizer):
    + 93        KEYWORDS = {
    + 94            **tokens.Tokenizer.KEYWORDS,
    + 95            "~": TokenType.RLIKE,
    + 96            ":=": TokenType.EQ,
    + 97            "//": TokenType.DIV,
    + 98            "ATTACH": TokenType.COMMAND,
    + 99            "BINARY": TokenType.VARBINARY,
    +100            "BPCHAR": TokenType.TEXT,
    +101            "BITSTRING": TokenType.BIT,
    +102            "CHAR": TokenType.TEXT,
    +103            "CHARACTER VARYING": TokenType.TEXT,
    +104            "EXCLUDE": TokenType.EXCEPT,
    +105            "INT1": TokenType.TINYINT,
    +106            "LOGICAL": TokenType.BOOLEAN,
    +107            "NUMERIC": TokenType.DOUBLE,
    +108            "PIVOT_WIDER": TokenType.PIVOT,
    +109            "SIGNED": TokenType.INT,
    +110            "STRING": TokenType.VARCHAR,
    +111            "UBIGINT": TokenType.UBIGINT,
    +112            "UINTEGER": TokenType.UINT,
    +113            "USMALLINT": TokenType.USMALLINT,
    +114            "UTINYINT": TokenType.UTINYINT,
    +115        }
    +116
    +117    class Parser(parser.Parser):
    +118        CONCAT_NULL_OUTPUTS_STRING = True
    +119
    +120        FUNCTIONS = {
    +121            **parser.Parser.FUNCTIONS,
    +122            "ARRAY_LENGTH": exp.ArraySize.from_arg_list,
    +123            "ARRAY_SORT": exp.SortArray.from_arg_list,
    +124            "ARRAY_REVERSE_SORT": _sort_array_reverse,
    +125            "DATEDIFF": _parse_date_diff,
    +126            "DATE_DIFF": _parse_date_diff,
    +127            "EPOCH": exp.TimeToUnix.from_arg_list,
    +128            "EPOCH_MS": lambda args: exp.UnixToTime(
    +129                this=exp.Div(this=seq_get(args, 0), expression=exp.Literal.number(1000))
    +130            ),
    +131            "LIST_REVERSE_SORT": _sort_array_reverse,
    +132            "LIST_SORT": exp.SortArray.from_arg_list,
    +133            "LIST_VALUE": exp.Array.from_arg_list,
    +134            "REGEXP_MATCHES": exp.RegexpLike.from_arg_list,
    +135            "STRFTIME": format_time_lambda(exp.TimeToStr, "duckdb"),
    +136            "STRING_SPLIT": exp.Split.from_arg_list,
    +137            "STRING_SPLIT_REGEX": exp.RegexpSplit.from_arg_list,
    +138            "STRING_TO_ARRAY": exp.Split.from_arg_list,
    +139            "STRPTIME": format_time_lambda(exp.StrToTime, "duckdb"),
    +140            "STRUCT_PACK": exp.Struct.from_arg_list,
    +141            "STR_SPLIT": exp.Split.from_arg_list,
    +142            "STR_SPLIT_REGEX": exp.RegexpSplit.from_arg_list,
    +143            "TO_TIMESTAMP": exp.UnixToTime.from_arg_list,
    +144            "UNNEST": exp.Explode.from_arg_list,
    +145        }
    +146
    +147        TYPE_TOKENS = {
    +148            *parser.Parser.TYPE_TOKENS,
    +149            TokenType.UBIGINT,
    +150            TokenType.UINT,
    +151            TokenType.USMALLINT,
    +152            TokenType.UTINYINT,
    +153        }
    +154
    +155        def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]:
    +156            if len(aggregations) == 1:
    +157                return super()._pivot_column_names(aggregations)
    +158            return pivot_column_names(aggregations, dialect="duckdb")
     159
    -160        def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]:
    -161            if len(aggregations) == 1:
    -162                return super()._pivot_column_names(aggregations)
    -163            return pivot_column_names(aggregations, dialect="duckdb")
    -164
    -165    class Generator(generator.Generator):
    -166        JOIN_HINTS = False
    -167        TABLE_HINTS = False
    -168        LIMIT_FETCH = "LIMIT"
    -169        STRUCT_DELIMITER = ("(", ")")
    -170        RENAME_TABLE_WITH_DB = False
    -171
    -172        TRANSFORMS = {
    -173            **generator.Generator.TRANSFORMS,
    -174            exp.ApproxDistinct: approx_count_distinct_sql,
    -175            exp.Array: lambda self, e: self.func("ARRAY", e.expressions[0])
    -176            if isinstance(seq_get(e.expressions, 0), exp.Select)
    -177            else rename_func("LIST_VALUE")(self, e),
    -178            exp.ArraySize: rename_func("ARRAY_LENGTH"),
    -179            exp.ArraySort: _array_sort_sql,
    -180            exp.ArraySum: rename_func("LIST_SUM"),
    -181            exp.CommentColumnConstraint: no_comment_column_constraint_sql,
    -182            exp.CurrentDate: lambda self, e: "CURRENT_DATE",
    -183            exp.CurrentTime: lambda self, e: "CURRENT_TIME",
    -184            exp.CurrentTimestamp: lambda self, e: "CURRENT_TIMESTAMP",
    -185            exp.DayOfMonth: rename_func("DAYOFMONTH"),
    -186            exp.DayOfWeek: rename_func("DAYOFWEEK"),
    -187            exp.DayOfYear: rename_func("DAYOFYEAR"),
    -188            exp.DataType: _datatype_sql,
    -189            exp.DateAdd: _date_delta_sql,
    -190            exp.DateSub: _date_delta_sql,
    -191            exp.DateDiff: lambda self, e: self.func(
    -192                "DATE_DIFF", f"'{e.args.get('unit', 'day')}'", e.expression, e.this
    -193            ),
    -194            exp.DateStrToDate: datestrtodate_sql,
    -195            exp.DateToDi: lambda self, e: f"CAST(STRFTIME({self.sql(e, 'this')}, {DuckDB.dateint_format}) AS INT)",
    -196            exp.DiToDate: lambda self, e: f"CAST(STRPTIME(CAST({self.sql(e, 'this')} AS TEXT), {DuckDB.dateint_format}) AS DATE)",
    -197            exp.Explode: rename_func("UNNEST"),
    -198            exp.IntDiv: lambda self, e: self.binary(e, "//"),
    -199            exp.JSONExtract: arrow_json_extract_sql,
    -200            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
    -201            exp.JSONBExtract: arrow_json_extract_sql,
    -202            exp.JSONBExtractScalar: arrow_json_extract_scalar_sql,
    -203            exp.LogicalOr: rename_func("BOOL_OR"),
    -204            exp.LogicalAnd: rename_func("BOOL_AND"),
    -205            exp.Properties: no_properties_sql,
    -206            exp.RegexpExtract: _regexp_extract_sql,
    -207            exp.RegexpLike: rename_func("REGEXP_MATCHES"),
    -208            exp.RegexpSplit: rename_func("STR_SPLIT_REGEX"),
    -209            exp.SafeDivide: no_safe_divide_sql,
    -210            exp.Split: rename_func("STR_SPLIT"),
    -211            exp.SortArray: _sort_array_sql,
    -212            exp.StrPosition: str_position_sql,
    -213            exp.StrToDate: lambda self, e: f"CAST({str_to_time_sql(self, e)} AS DATE)",
    -214            exp.StrToTime: str_to_time_sql,
    -215            exp.StrToUnix: lambda self, e: f"EPOCH(STRPTIME({self.sql(e, 'this')}, {self.format_time(e)}))",
    -216            exp.Struct: _struct_sql,
    -217            exp.TimestampTrunc: timestamptrunc_sql,
    -218            exp.TimeStrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE)",
    -219            exp.TimeStrToTime: timestrtotime_sql,
    -220            exp.TimeStrToUnix: lambda self, e: f"EPOCH(CAST({self.sql(e, 'this')} AS TIMESTAMP))",
    -221            exp.TimeToStr: lambda self, e: f"STRFTIME({self.sql(e, 'this')}, {self.format_time(e)})",
    -222            exp.TimeToUnix: rename_func("EPOCH"),
    -223            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS TEXT), '-', ''), 1, 8) AS INT)",
    -224            exp.TsOrDsAdd: _ts_or_ds_add_sql,
    -225            exp.TsOrDsToDate: ts_or_ds_to_date_sql("duckdb"),
    -226            exp.UnixToStr: lambda self, e: f"STRFTIME(TO_TIMESTAMP({self.sql(e, 'this')}), {self.format_time(e)})",
    -227            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
    -228            exp.UnixToTimeStr: lambda self, e: f"CAST(TO_TIMESTAMP({self.sql(e, 'this')}) AS TEXT)",
    -229            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
    -230        }
    -231
    -232        TYPE_MAPPING = {
    -233            **generator.Generator.TYPE_MAPPING,
    -234            exp.DataType.Type.BINARY: "BLOB",
    -235            exp.DataType.Type.CHAR: "TEXT",
    -236            exp.DataType.Type.FLOAT: "REAL",
    -237            exp.DataType.Type.NCHAR: "TEXT",
    -238            exp.DataType.Type.NVARCHAR: "TEXT",
    -239            exp.DataType.Type.UINT: "UINTEGER",
    -240            exp.DataType.Type.VARBINARY: "BLOB",
    -241            exp.DataType.Type.VARCHAR: "TEXT",
    -242        }
    -243
    -244        STAR_MAPPING = {**generator.Generator.STAR_MAPPING, "except": "EXCLUDE"}
    -245
    -246        PROPERTIES_LOCATION = {
    -247            **generator.Generator.PROPERTIES_LOCATION,
    -248            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    -249        }
    -250
    -251        def tablesample_sql(
    -252            self, expression: exp.TableSample, seed_prefix: str = "SEED", sep: str = " AS "
    -253        ) -> str:
    -254            return super().tablesample_sql(expression, seed_prefix="REPEATABLE", sep=sep)
    +160    class Generator(generator.Generator):
    +161        JOIN_HINTS = False
    +162        TABLE_HINTS = False
    +163        LIMIT_FETCH = "LIMIT"
    +164        STRUCT_DELIMITER = ("(", ")")
    +165        RENAME_TABLE_WITH_DB = False
    +166
    +167        TRANSFORMS = {
    +168            **generator.Generator.TRANSFORMS,
    +169            exp.ApproxDistinct: approx_count_distinct_sql,
    +170            exp.Array: lambda self, e: self.func("ARRAY", e.expressions[0])
    +171            if isinstance(seq_get(e.expressions, 0), exp.Select)
    +172            else rename_func("LIST_VALUE")(self, e),
    +173            exp.ArraySize: rename_func("ARRAY_LENGTH"),
    +174            exp.ArraySort: _array_sort_sql,
    +175            exp.ArraySum: rename_func("LIST_SUM"),
    +176            exp.CommentColumnConstraint: no_comment_column_constraint_sql,
    +177            exp.CurrentDate: lambda self, e: "CURRENT_DATE",
    +178            exp.CurrentTime: lambda self, e: "CURRENT_TIME",
    +179            exp.CurrentTimestamp: lambda self, e: "CURRENT_TIMESTAMP",
    +180            exp.DayOfMonth: rename_func("DAYOFMONTH"),
    +181            exp.DayOfWeek: rename_func("DAYOFWEEK"),
    +182            exp.DayOfYear: rename_func("DAYOFYEAR"),
    +183            exp.DataType: _datatype_sql,
    +184            exp.DateAdd: _date_delta_sql,
    +185            exp.DateSub: _date_delta_sql,
    +186            exp.DateDiff: lambda self, e: self.func(
    +187                "DATE_DIFF", f"'{e.args.get('unit', 'day')}'", e.expression, e.this
    +188            ),
    +189            exp.DateStrToDate: datestrtodate_sql,
    +190            exp.DateToDi: lambda self, e: f"CAST(STRFTIME({self.sql(e, 'this')}, {DuckDB.DATEINT_FORMAT}) AS INT)",
    +191            exp.DiToDate: lambda self, e: f"CAST(STRPTIME(CAST({self.sql(e, 'this')} AS TEXT), {DuckDB.DATEINT_FORMAT}) AS DATE)",
    +192            exp.Explode: rename_func("UNNEST"),
    +193            exp.IntDiv: lambda self, e: self.binary(e, "//"),
    +194            exp.JSONExtract: arrow_json_extract_sql,
    +195            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
    +196            exp.JSONBExtract: arrow_json_extract_sql,
    +197            exp.JSONBExtractScalar: arrow_json_extract_scalar_sql,
    +198            exp.LogicalOr: rename_func("BOOL_OR"),
    +199            exp.LogicalAnd: rename_func("BOOL_AND"),
    +200            exp.Properties: no_properties_sql,
    +201            exp.RegexpExtract: _regexp_extract_sql,
    +202            exp.RegexpLike: rename_func("REGEXP_MATCHES"),
    +203            exp.RegexpSplit: rename_func("STR_SPLIT_REGEX"),
    +204            exp.SafeDivide: no_safe_divide_sql,
    +205            exp.Split: rename_func("STR_SPLIT"),
    +206            exp.SortArray: _sort_array_sql,
    +207            exp.StrPosition: str_position_sql,
    +208            exp.StrToDate: lambda self, e: f"CAST({str_to_time_sql(self, e)} AS DATE)",
    +209            exp.StrToTime: str_to_time_sql,
    +210            exp.StrToUnix: lambda self, e: f"EPOCH(STRPTIME({self.sql(e, 'this')}, {self.format_time(e)}))",
    +211            exp.Struct: _struct_sql,
    +212            exp.TimestampTrunc: timestamptrunc_sql,
    +213            exp.TimeStrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE)",
    +214            exp.TimeStrToTime: timestrtotime_sql,
    +215            exp.TimeStrToUnix: lambda self, e: f"EPOCH(CAST({self.sql(e, 'this')} AS TIMESTAMP))",
    +216            exp.TimeToStr: lambda self, e: f"STRFTIME({self.sql(e, 'this')}, {self.format_time(e)})",
    +217            exp.TimeToUnix: rename_func("EPOCH"),
    +218            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS TEXT), '-', ''), 1, 8) AS INT)",
    +219            exp.TsOrDsAdd: _ts_or_ds_add_sql,
    +220            exp.TsOrDsToDate: ts_or_ds_to_date_sql("duckdb"),
    +221            exp.UnixToStr: lambda self, e: f"STRFTIME(TO_TIMESTAMP({self.sql(e, 'this')}), {self.format_time(e)})",
    +222            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
    +223            exp.UnixToTimeStr: lambda self, e: f"CAST(TO_TIMESTAMP({self.sql(e, 'this')}) AS TEXT)",
    +224            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
    +225        }
    +226
    +227        TYPE_MAPPING = {
    +228            **generator.Generator.TYPE_MAPPING,
    +229            exp.DataType.Type.BINARY: "BLOB",
    +230            exp.DataType.Type.CHAR: "TEXT",
    +231            exp.DataType.Type.FLOAT: "REAL",
    +232            exp.DataType.Type.NCHAR: "TEXT",
    +233            exp.DataType.Type.NVARCHAR: "TEXT",
    +234            exp.DataType.Type.UINT: "UINTEGER",
    +235            exp.DataType.Type.VARBINARY: "BLOB",
    +236            exp.DataType.Type.VARCHAR: "TEXT",
    +237        }
    +238
    +239        STAR_MAPPING = {**generator.Generator.STAR_MAPPING, "except": "EXCLUDE"}
    +240
    +241        UNWRAPPED_INTERVAL_VALUES = (exp.Column, exp.Literal, exp.Paren)
    +242
    +243        PROPERTIES_LOCATION = {
    +244            **generator.Generator.PROPERTIES_LOCATION,
    +245            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +246        }
    +247
    +248        def interval_sql(self, expression: exp.Interval) -> str:
    +249            multiplier: t.Optional[int] = None
    +250            unit = expression.text("unit").lower()
    +251
    +252            if unit.startswith("week"):
    +253                multiplier = 7
    +254            if unit.startswith("quarter"):
    +255                multiplier = 90
    +256
    +257            if multiplier:
    +258                return f"({multiplier} * {super().interval_sql(exp.Interval(this=expression.this, unit=exp.var('day')))})"
    +259
    +260            return super().interval_sql(expression)
    +261
    +262        def tablesample_sql(
    +263            self, expression: exp.TableSample, seed_prefix: str = "SEED", sep: str = " AS "
    +264        ) -> str:
    +265            return super().tablesample_sql(expression, seed_prefix="REPEATABLE", sep=sep)
     
    @@ -544,30 +573,30 @@
    -
     96    class Tokenizer(tokens.Tokenizer):
    - 97        KEYWORDS = {
    - 98            **tokens.Tokenizer.KEYWORDS,
    - 99            "~": TokenType.RLIKE,
    -100            ":=": TokenType.EQ,
    -101            "//": TokenType.DIV,
    -102            "ATTACH": TokenType.COMMAND,
    -103            "BINARY": TokenType.VARBINARY,
    -104            "BPCHAR": TokenType.TEXT,
    -105            "BITSTRING": TokenType.BIT,
    -106            "CHAR": TokenType.TEXT,
    -107            "CHARACTER VARYING": TokenType.TEXT,
    -108            "EXCLUDE": TokenType.EXCEPT,
    -109            "INT1": TokenType.TINYINT,
    -110            "LOGICAL": TokenType.BOOLEAN,
    -111            "NUMERIC": TokenType.DOUBLE,
    -112            "PIVOT_WIDER": TokenType.PIVOT,
    -113            "SIGNED": TokenType.INT,
    -114            "STRING": TokenType.VARCHAR,
    -115            "UBIGINT": TokenType.UBIGINT,
    -116            "UINTEGER": TokenType.UINT,
    -117            "USMALLINT": TokenType.USMALLINT,
    -118            "UTINYINT": TokenType.UTINYINT,
    -119        }
    +            
     92    class Tokenizer(tokens.Tokenizer):
    + 93        KEYWORDS = {
    + 94            **tokens.Tokenizer.KEYWORDS,
    + 95            "~": TokenType.RLIKE,
    + 96            ":=": TokenType.EQ,
    + 97            "//": TokenType.DIV,
    + 98            "ATTACH": TokenType.COMMAND,
    + 99            "BINARY": TokenType.VARBINARY,
    +100            "BPCHAR": TokenType.TEXT,
    +101            "BITSTRING": TokenType.BIT,
    +102            "CHAR": TokenType.TEXT,
    +103            "CHARACTER VARYING": TokenType.TEXT,
    +104            "EXCLUDE": TokenType.EXCEPT,
    +105            "INT1": TokenType.TINYINT,
    +106            "LOGICAL": TokenType.BOOLEAN,
    +107            "NUMERIC": TokenType.DOUBLE,
    +108            "PIVOT_WIDER": TokenType.PIVOT,
    +109            "SIGNED": TokenType.INT,
    +110            "STRING": TokenType.VARCHAR,
    +111            "UBIGINT": TokenType.UBIGINT,
    +112            "UINTEGER": TokenType.UINT,
    +113            "USMALLINT": TokenType.USMALLINT,
    +114            "UTINYINT": TokenType.UTINYINT,
    +115        }
     
    @@ -579,6 +608,7 @@ @@ -595,73 +625,64 @@
    -
    121    class Parser(parser.Parser):
    -122        FUNCTIONS = {
    -123            **parser.Parser.FUNCTIONS,
    -124            "ARRAY_LENGTH": exp.ArraySize.from_arg_list,
    -125            "ARRAY_SORT": exp.SortArray.from_arg_list,
    -126            "ARRAY_REVERSE_SORT": _sort_array_reverse,
    -127            "DATEDIFF": _parse_date_diff,
    -128            "DATE_DIFF": _parse_date_diff,
    -129            "EPOCH": exp.TimeToUnix.from_arg_list,
    -130            "EPOCH_MS": lambda args: exp.UnixToTime(
    -131                this=exp.Div(
    -132                    this=seq_get(args, 0),
    -133                    expression=exp.Literal.number(1000),
    -134                )
    -135            ),
    -136            "LIST_REVERSE_SORT": _sort_array_reverse,
    -137            "LIST_SORT": exp.SortArray.from_arg_list,
    -138            "LIST_VALUE": exp.Array.from_arg_list,
    -139            "REGEXP_MATCHES": exp.RegexpLike.from_arg_list,
    -140            "STRFTIME": format_time_lambda(exp.TimeToStr, "duckdb"),
    -141            "STRING_SPLIT": exp.Split.from_arg_list,
    -142            "STRING_SPLIT_REGEX": exp.RegexpSplit.from_arg_list,
    -143            "STRING_TO_ARRAY": exp.Split.from_arg_list,
    -144            "STRPTIME": format_time_lambda(exp.StrToTime, "duckdb"),
    -145            "STRUCT_PACK": exp.Struct.from_arg_list,
    -146            "STR_SPLIT": exp.Split.from_arg_list,
    -147            "STR_SPLIT_REGEX": exp.RegexpSplit.from_arg_list,
    -148            "TO_TIMESTAMP": exp.UnixToTime.from_arg_list,
    -149            "UNNEST": exp.Explode.from_arg_list,
    -150        }
    -151
    -152        TYPE_TOKENS = {
    -153            *parser.Parser.TYPE_TOKENS,
    -154            TokenType.UBIGINT,
    -155            TokenType.UINT,
    -156            TokenType.USMALLINT,
    -157            TokenType.UTINYINT,
    -158        }
    -159
    -160        def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]:
    -161            if len(aggregations) == 1:
    -162                return super()._pivot_column_names(aggregations)
    -163            return pivot_column_names(aggregations, dialect="duckdb")
    +            
    117    class Parser(parser.Parser):
    +118        CONCAT_NULL_OUTPUTS_STRING = True
    +119
    +120        FUNCTIONS = {
    +121            **parser.Parser.FUNCTIONS,
    +122            "ARRAY_LENGTH": exp.ArraySize.from_arg_list,
    +123            "ARRAY_SORT": exp.SortArray.from_arg_list,
    +124            "ARRAY_REVERSE_SORT": _sort_array_reverse,
    +125            "DATEDIFF": _parse_date_diff,
    +126            "DATE_DIFF": _parse_date_diff,
    +127            "EPOCH": exp.TimeToUnix.from_arg_list,
    +128            "EPOCH_MS": lambda args: exp.UnixToTime(
    +129                this=exp.Div(this=seq_get(args, 0), expression=exp.Literal.number(1000))
    +130            ),
    +131            "LIST_REVERSE_SORT": _sort_array_reverse,
    +132            "LIST_SORT": exp.SortArray.from_arg_list,
    +133            "LIST_VALUE": exp.Array.from_arg_list,
    +134            "REGEXP_MATCHES": exp.RegexpLike.from_arg_list,
    +135            "STRFTIME": format_time_lambda(exp.TimeToStr, "duckdb"),
    +136            "STRING_SPLIT": exp.Split.from_arg_list,
    +137            "STRING_SPLIT_REGEX": exp.RegexpSplit.from_arg_list,
    +138            "STRING_TO_ARRAY": exp.Split.from_arg_list,
    +139            "STRPTIME": format_time_lambda(exp.StrToTime, "duckdb"),
    +140            "STRUCT_PACK": exp.Struct.from_arg_list,
    +141            "STR_SPLIT": exp.Split.from_arg_list,
    +142            "STR_SPLIT_REGEX": exp.RegexpSplit.from_arg_list,
    +143            "TO_TIMESTAMP": exp.UnixToTime.from_arg_list,
    +144            "UNNEST": exp.Explode.from_arg_list,
    +145        }
    +146
    +147        TYPE_TOKENS = {
    +148            *parser.Parser.TYPE_TOKENS,
    +149            TokenType.UBIGINT,
    +150            TokenType.UINT,
    +151            TokenType.USMALLINT,
    +152            TokenType.UTINYINT,
    +153        }
    +154
    +155        def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]:
    +156            if len(aggregations) == 1:
    +157                return super()._pivot_column_names(aggregations)
    +158            return pivot_column_names(aggregations, dialect="duckdb")
     
    -

    Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces -a parsed syntax tree.

    +

    Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

    Arguments:
      -
    • error_level: the desired error level. +
    • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
    • -
    • error_message_context: determines the amount of context to capture from a +
    • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). -Default: 50.
    • -
    • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. -Default: 0
    • -
    • alias_post_tablesample: If the table alias comes after tablesample. -Default: False
    • +Default: 100
    • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
    • -
    • null_ordering: Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    @@ -694,142 +715,143 @@ Default: "nulls_are_small"
    -
    165    class Generator(generator.Generator):
    -166        JOIN_HINTS = False
    -167        TABLE_HINTS = False
    -168        LIMIT_FETCH = "LIMIT"
    -169        STRUCT_DELIMITER = ("(", ")")
    -170        RENAME_TABLE_WITH_DB = False
    -171
    -172        TRANSFORMS = {
    -173            **generator.Generator.TRANSFORMS,
    -174            exp.ApproxDistinct: approx_count_distinct_sql,
    -175            exp.Array: lambda self, e: self.func("ARRAY", e.expressions[0])
    -176            if isinstance(seq_get(e.expressions, 0), exp.Select)
    -177            else rename_func("LIST_VALUE")(self, e),
    -178            exp.ArraySize: rename_func("ARRAY_LENGTH"),
    -179            exp.ArraySort: _array_sort_sql,
    -180            exp.ArraySum: rename_func("LIST_SUM"),
    -181            exp.CommentColumnConstraint: no_comment_column_constraint_sql,
    -182            exp.CurrentDate: lambda self, e: "CURRENT_DATE",
    -183            exp.CurrentTime: lambda self, e: "CURRENT_TIME",
    -184            exp.CurrentTimestamp: lambda self, e: "CURRENT_TIMESTAMP",
    -185            exp.DayOfMonth: rename_func("DAYOFMONTH"),
    -186            exp.DayOfWeek: rename_func("DAYOFWEEK"),
    -187            exp.DayOfYear: rename_func("DAYOFYEAR"),
    -188            exp.DataType: _datatype_sql,
    -189            exp.DateAdd: _date_delta_sql,
    -190            exp.DateSub: _date_delta_sql,
    -191            exp.DateDiff: lambda self, e: self.func(
    -192                "DATE_DIFF", f"'{e.args.get('unit', 'day')}'", e.expression, e.this
    -193            ),
    -194            exp.DateStrToDate: datestrtodate_sql,
    -195            exp.DateToDi: lambda self, e: f"CAST(STRFTIME({self.sql(e, 'this')}, {DuckDB.dateint_format}) AS INT)",
    -196            exp.DiToDate: lambda self, e: f"CAST(STRPTIME(CAST({self.sql(e, 'this')} AS TEXT), {DuckDB.dateint_format}) AS DATE)",
    -197            exp.Explode: rename_func("UNNEST"),
    -198            exp.IntDiv: lambda self, e: self.binary(e, "//"),
    -199            exp.JSONExtract: arrow_json_extract_sql,
    -200            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
    -201            exp.JSONBExtract: arrow_json_extract_sql,
    -202            exp.JSONBExtractScalar: arrow_json_extract_scalar_sql,
    -203            exp.LogicalOr: rename_func("BOOL_OR"),
    -204            exp.LogicalAnd: rename_func("BOOL_AND"),
    -205            exp.Properties: no_properties_sql,
    -206            exp.RegexpExtract: _regexp_extract_sql,
    -207            exp.RegexpLike: rename_func("REGEXP_MATCHES"),
    -208            exp.RegexpSplit: rename_func("STR_SPLIT_REGEX"),
    -209            exp.SafeDivide: no_safe_divide_sql,
    -210            exp.Split: rename_func("STR_SPLIT"),
    -211            exp.SortArray: _sort_array_sql,
    -212            exp.StrPosition: str_position_sql,
    -213            exp.StrToDate: lambda self, e: f"CAST({str_to_time_sql(self, e)} AS DATE)",
    -214            exp.StrToTime: str_to_time_sql,
    -215            exp.StrToUnix: lambda self, e: f"EPOCH(STRPTIME({self.sql(e, 'this')}, {self.format_time(e)}))",
    -216            exp.Struct: _struct_sql,
    -217            exp.TimestampTrunc: timestamptrunc_sql,
    -218            exp.TimeStrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE)",
    -219            exp.TimeStrToTime: timestrtotime_sql,
    -220            exp.TimeStrToUnix: lambda self, e: f"EPOCH(CAST({self.sql(e, 'this')} AS TIMESTAMP))",
    -221            exp.TimeToStr: lambda self, e: f"STRFTIME({self.sql(e, 'this')}, {self.format_time(e)})",
    -222            exp.TimeToUnix: rename_func("EPOCH"),
    -223            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS TEXT), '-', ''), 1, 8) AS INT)",
    -224            exp.TsOrDsAdd: _ts_or_ds_add_sql,
    -225            exp.TsOrDsToDate: ts_or_ds_to_date_sql("duckdb"),
    -226            exp.UnixToStr: lambda self, e: f"STRFTIME(TO_TIMESTAMP({self.sql(e, 'this')}), {self.format_time(e)})",
    -227            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
    -228            exp.UnixToTimeStr: lambda self, e: f"CAST(TO_TIMESTAMP({self.sql(e, 'this')}) AS TEXT)",
    -229            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
    -230        }
    -231
    -232        TYPE_MAPPING = {
    -233            **generator.Generator.TYPE_MAPPING,
    -234            exp.DataType.Type.BINARY: "BLOB",
    -235            exp.DataType.Type.CHAR: "TEXT",
    -236            exp.DataType.Type.FLOAT: "REAL",
    -237            exp.DataType.Type.NCHAR: "TEXT",
    -238            exp.DataType.Type.NVARCHAR: "TEXT",
    -239            exp.DataType.Type.UINT: "UINTEGER",
    -240            exp.DataType.Type.VARBINARY: "BLOB",
    -241            exp.DataType.Type.VARCHAR: "TEXT",
    -242        }
    -243
    -244        STAR_MAPPING = {**generator.Generator.STAR_MAPPING, "except": "EXCLUDE"}
    -245
    -246        PROPERTIES_LOCATION = {
    -247            **generator.Generator.PROPERTIES_LOCATION,
    -248            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    -249        }
    -250
    -251        def tablesample_sql(
    -252            self, expression: exp.TableSample, seed_prefix: str = "SEED", sep: str = " AS "
    -253        ) -> str:
    -254            return super().tablesample_sql(expression, seed_prefix="REPEATABLE", sep=sep)
    +            
    160    class Generator(generator.Generator):
    +161        JOIN_HINTS = False
    +162        TABLE_HINTS = False
    +163        LIMIT_FETCH = "LIMIT"
    +164        STRUCT_DELIMITER = ("(", ")")
    +165        RENAME_TABLE_WITH_DB = False
    +166
    +167        TRANSFORMS = {
    +168            **generator.Generator.TRANSFORMS,
    +169            exp.ApproxDistinct: approx_count_distinct_sql,
    +170            exp.Array: lambda self, e: self.func("ARRAY", e.expressions[0])
    +171            if isinstance(seq_get(e.expressions, 0), exp.Select)
    +172            else rename_func("LIST_VALUE")(self, e),
    +173            exp.ArraySize: rename_func("ARRAY_LENGTH"),
    +174            exp.ArraySort: _array_sort_sql,
    +175            exp.ArraySum: rename_func("LIST_SUM"),
    +176            exp.CommentColumnConstraint: no_comment_column_constraint_sql,
    +177            exp.CurrentDate: lambda self, e: "CURRENT_DATE",
    +178            exp.CurrentTime: lambda self, e: "CURRENT_TIME",
    +179            exp.CurrentTimestamp: lambda self, e: "CURRENT_TIMESTAMP",
    +180            exp.DayOfMonth: rename_func("DAYOFMONTH"),
    +181            exp.DayOfWeek: rename_func("DAYOFWEEK"),
    +182            exp.DayOfYear: rename_func("DAYOFYEAR"),
    +183            exp.DataType: _datatype_sql,
    +184            exp.DateAdd: _date_delta_sql,
    +185            exp.DateSub: _date_delta_sql,
    +186            exp.DateDiff: lambda self, e: self.func(
    +187                "DATE_DIFF", f"'{e.args.get('unit', 'day')}'", e.expression, e.this
    +188            ),
    +189            exp.DateStrToDate: datestrtodate_sql,
    +190            exp.DateToDi: lambda self, e: f"CAST(STRFTIME({self.sql(e, 'this')}, {DuckDB.DATEINT_FORMAT}) AS INT)",
    +191            exp.DiToDate: lambda self, e: f"CAST(STRPTIME(CAST({self.sql(e, 'this')} AS TEXT), {DuckDB.DATEINT_FORMAT}) AS DATE)",
    +192            exp.Explode: rename_func("UNNEST"),
    +193            exp.IntDiv: lambda self, e: self.binary(e, "//"),
    +194            exp.JSONExtract: arrow_json_extract_sql,
    +195            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
    +196            exp.JSONBExtract: arrow_json_extract_sql,
    +197            exp.JSONBExtractScalar: arrow_json_extract_scalar_sql,
    +198            exp.LogicalOr: rename_func("BOOL_OR"),
    +199            exp.LogicalAnd: rename_func("BOOL_AND"),
    +200            exp.Properties: no_properties_sql,
    +201            exp.RegexpExtract: _regexp_extract_sql,
    +202            exp.RegexpLike: rename_func("REGEXP_MATCHES"),
    +203            exp.RegexpSplit: rename_func("STR_SPLIT_REGEX"),
    +204            exp.SafeDivide: no_safe_divide_sql,
    +205            exp.Split: rename_func("STR_SPLIT"),
    +206            exp.SortArray: _sort_array_sql,
    +207            exp.StrPosition: str_position_sql,
    +208            exp.StrToDate: lambda self, e: f"CAST({str_to_time_sql(self, e)} AS DATE)",
    +209            exp.StrToTime: str_to_time_sql,
    +210            exp.StrToUnix: lambda self, e: f"EPOCH(STRPTIME({self.sql(e, 'this')}, {self.format_time(e)}))",
    +211            exp.Struct: _struct_sql,
    +212            exp.TimestampTrunc: timestamptrunc_sql,
    +213            exp.TimeStrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE)",
    +214            exp.TimeStrToTime: timestrtotime_sql,
    +215            exp.TimeStrToUnix: lambda self, e: f"EPOCH(CAST({self.sql(e, 'this')} AS TIMESTAMP))",
    +216            exp.TimeToStr: lambda self, e: f"STRFTIME({self.sql(e, 'this')}, {self.format_time(e)})",
    +217            exp.TimeToUnix: rename_func("EPOCH"),
    +218            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS TEXT), '-', ''), 1, 8) AS INT)",
    +219            exp.TsOrDsAdd: _ts_or_ds_add_sql,
    +220            exp.TsOrDsToDate: ts_or_ds_to_date_sql("duckdb"),
    +221            exp.UnixToStr: lambda self, e: f"STRFTIME(TO_TIMESTAMP({self.sql(e, 'this')}), {self.format_time(e)})",
    +222            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
    +223            exp.UnixToTimeStr: lambda self, e: f"CAST(TO_TIMESTAMP({self.sql(e, 'this')}) AS TEXT)",
    +224            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
    +225        }
    +226
    +227        TYPE_MAPPING = {
    +228            **generator.Generator.TYPE_MAPPING,
    +229            exp.DataType.Type.BINARY: "BLOB",
    +230            exp.DataType.Type.CHAR: "TEXT",
    +231            exp.DataType.Type.FLOAT: "REAL",
    +232            exp.DataType.Type.NCHAR: "TEXT",
    +233            exp.DataType.Type.NVARCHAR: "TEXT",
    +234            exp.DataType.Type.UINT: "UINTEGER",
    +235            exp.DataType.Type.VARBINARY: "BLOB",
    +236            exp.DataType.Type.VARCHAR: "TEXT",
    +237        }
    +238
    +239        STAR_MAPPING = {**generator.Generator.STAR_MAPPING, "except": "EXCLUDE"}
    +240
    +241        UNWRAPPED_INTERVAL_VALUES = (exp.Column, exp.Literal, exp.Paren)
    +242
    +243        PROPERTIES_LOCATION = {
    +244            **generator.Generator.PROPERTIES_LOCATION,
    +245            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +246        }
    +247
    +248        def interval_sql(self, expression: exp.Interval) -> str:
    +249            multiplier: t.Optional[int] = None
    +250            unit = expression.text("unit").lower()
    +251
    +252            if unit.startswith("week"):
    +253                multiplier = 7
    +254            if unit.startswith("quarter"):
    +255                multiplier = 90
    +256
    +257            if multiplier:
    +258                return f"({multiplier} * {super().interval_sql(exp.Interval(this=expression.this, unit=exp.var('day')))})"
    +259
    +260            return super().interval_sql(expression)
    +261
    +262        def tablesample_sql(
    +263            self, expression: exp.TableSample, seed_prefix: str = "SEED", sep: str = " AS "
    +264        ) -> str:
    +265            return super().tablesample_sql(expression, seed_prefix="REPEATABLE", sep=sep)
     
    -

    Generator interprets the given syntax tree and produces a SQL string as an output.

    +

    Generator converts a given syntax tree to the corresponding SQL string.

    Arguments:
      -
    • time_mapping (dict): the dictionary of custom time mappings in which the key -represents a python time format and the output the target time format
    • -
    • time_trie (trie): a trie of the time_mapping keys
    • -
    • pretty (bool): if set to True the returned string will be formatted. Default: False.
    • -
    • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
    • -
    • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
    • -
    • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
    • -
    • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
    • -
    • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
    • -
    • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
    • -
    • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
    • -
    • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
    • -
    • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
    • -
    • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
    • -
    • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
    • -
    • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
    • -
    • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
    • -
    • normalize (bool): if set to True all identifiers will lower cased
    • -
    • string_escape (str): specifies a string escape character. Default: '.
    • -
    • identifier_escape (str): specifies an identifier escape character. Default: ".
    • -
    • pad (int): determines padding in a formatted string. Default: 2.
    • -
    • indent (int): determines the size of indentation in a formatted string. Default: 4.
    • -
    • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
    • -
    • normalize_functions (str): normalize function names, "upper", "lower", or None -Default: "upper"
    • -
    • alias_post_tablesample (bool): if the table alias comes after tablesample -Default: False
    • -
    • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit -Default: False
    • -
    • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters -unsupported expressions. Default ErrorLevel.WARN.
    • -
    • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    • -
    • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +
    • pretty: Whether or not to format the produced SQL string. +Default: False.
    • +
    • identify: Determines when an identifier should be quoted. Possible values are: +False (default): Never quote, except in cases where it's mandatory by the dialect. +True or 'always': Always quote. +'safe': Only quote identifiers that are case insensitive.
    • +
    • normalize: Whether or not to normalize identifiers to lowercase. +Default: False.
    • +
    • pad: Determines the pad size in a formatted string. +Default: 2.
    • +
    • indent: Determines the indentation size in a formatted string. +Default: 2.
    • +
    • normalize_functions: Whether or not to normalize all function names. Possible values are: +"upper" or True (default): Convert names to uppercase. +"lower": Convert names to lowercase. +False: Disables function name normalization.
    • +
    • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. +Default ErrorLevel.WARN.
    • +
    • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
    • -
    • leading_comma (bool): if the the comma is leading or trailing in select statements +
    • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. +This is only relevant when generating in pretty mode. Default: False
    • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true @@ -841,6 +863,36 @@ Default: True
    +
    + +
    + + def + interval_sql(self, expression: sqlglot.expressions.Interval) -> str: + + + +
    + +
    248        def interval_sql(self, expression: exp.Interval) -> str:
    +249            multiplier: t.Optional[int] = None
    +250            unit = expression.text("unit").lower()
    +251
    +252            if unit.startswith("week"):
    +253                multiplier = 7
    +254            if unit.startswith("quarter"):
    +255                multiplier = 90
    +256
    +257            if multiplier:
    +258                return f"({multiplier} * {super().interval_sql(exp.Interval(this=expression.this, unit=exp.var('day')))})"
    +259
    +260            return super().interval_sql(expression)
    +
    + + + + +
    @@ -852,10 +904,10 @@ Default: True
    -
    251        def tablesample_sql(
    -252            self, expression: exp.TableSample, seed_prefix: str = "SEED", sep: str = " AS "
    -253        ) -> str:
    -254            return super().tablesample_sql(expression, seed_prefix="REPEATABLE", sep=sep)
    +            
    262        def tablesample_sql(
    +263            self, expression: exp.TableSample, seed_prefix: str = "SEED", sep: str = " AS "
    +264        ) -> str:
    +265            return super().tablesample_sql(expression, seed_prefix="REPEATABLE", sep=sep)
     
    @@ -891,6 +943,7 @@ Default: True
    notnullcolumnconstraint_sql
    primarykeycolumnconstraint_sql
    uniquecolumnconstraint_sql
    +
    createable_sql
    create_sql
    clone_sql
    describe_sql
    @@ -972,10 +1025,12 @@ Default: True
    ordered_sql
    matchrecognize_sql
    query_modifiers
    +
    offset_limit_modifiers
    after_having_modifiers
    after_limit_modifiers
    select_sql
    schema_sql
    +
    schema_columns_sql
    star_sql
    parameter_sql
    sessionparameter_sql
    @@ -1000,7 +1055,7 @@ Default: True
    nextvaluefor_sql
    extract_sql
    trim_sql
    -
    concat_sql
    +
    safeconcat_sql
    check_sql
    foreignkey_sql
    primarykey_sql
    @@ -1012,7 +1067,6 @@ Default: True
    openjson_sql
    in_sql
    in_unnest_op
    -
    interval_sql
    return_sql
    reference_sql
    anonymous_sql
    @@ -1051,6 +1105,7 @@ Default: True
    respectnulls_sql
    intdiv_sql
    dpipe_sql
    +
    safedpipe_sql
    div_sql
    overlaps_sql
    distance_sql
    @@ -1099,6 +1154,7 @@ Default: True
    dictproperty_sql
    dictrange_sql
    dictsubproperty_sql
    +
    oncluster_sql
    diff --git a/docs/sqlglot/dialects/hive.html b/docs/sqlglot/dialects/hive.html index cdfe847..d5f7b16 100644 --- a/docs/sqlglot/dialects/hive.html +++ b/docs/sqlglot/dialects/hive.html @@ -171,12 +171,12 @@ 80 _, multiplier = DATE_DELTA_INTERVAL.get(unit, ("", 1)) 81 multiplier_sql = f" / {multiplier}" if multiplier > 1 else "" 82 diff_sql = f"{sql_func}({self.format_args(expression.this, expression.expression)})" - 83 return f"{diff_sql}{multiplier_sql}" - 84 + 83 + 84 return f"{diff_sql}{multiplier_sql}" 85 - 86def _json_format_sql(self: generator.Generator, expression: exp.JSONFormat) -> str: - 87 this = expression.this - 88 + 86 + 87def _json_format_sql(self: generator.Generator, expression: exp.JSONFormat) -> str: + 88 this = expression.this 89 if not this.type: 90 from sqlglot.optimizer.annotate_types import annotate_types 91 @@ -204,7 +204,7 @@ 113def _str_to_date_sql(self: generator.Generator, expression: exp.StrToDate) -> str: 114 this = self.sql(expression, "this") 115 time_format = self.format_time(expression) -116 if time_format not in (Hive.time_format, Hive.date_format): +116 if time_format not in (Hive.TIME_FORMAT, Hive.DATE_FORMAT): 117 this = f"FROM_UNIXTIME(UNIX_TIMESTAMP({this}, {time_format}))" 118 return f"CAST({this} AS DATE)" 119 @@ -212,7 +212,7 @@ 121def _str_to_time_sql(self: generator.Generator, expression: exp.StrToTime) -> str: 122 this = self.sql(expression, "this") 123 time_format = self.format_time(expression) -124 if time_format not in (Hive.time_format, Hive.date_format): +124 if time_format not in (Hive.TIME_FORMAT, Hive.DATE_FORMAT): 125 this = f"FROM_UNIXTIME(UNIX_TIMESTAMP({this}, {time_format}))" 126 return f"CAST({this} AS TIMESTAMP)" 127 @@ -221,7 +221,7 @@ 130 self: generator.Generator, expression: exp.UnixToStr | exp.StrToUnix 131) -> t.Optional[str]: 132 time_format = self.format_time(expression) -133 if time_format == Hive.time_format: +133 if time_format == Hive.TIME_FORMAT: 134 return None 135 return time_format 136 @@ -235,16 +235,16 @@ 144def _to_date_sql(self: generator.Generator, expression: exp.TsOrDsToDate) -> str: 145 this = self.sql(expression, "this") 146 time_format = self.format_time(expression) -147 if time_format and time_format not in (Hive.time_format, Hive.date_format): +147 if time_format and time_format not in (Hive.TIME_FORMAT, Hive.DATE_FORMAT): 148 return f"TO_DATE({this}, {time_format})" 149 return f"TO_DATE({this})" 150 151 152class Hive(Dialect): -153 alias_post_tablesample = True -154 identifiers_can_start_with_digit = True +153 ALIAS_POST_TABLESAMPLE = True +154 IDENTIFIERS_CAN_START_WITH_DIGIT = True 155 -156 time_mapping = { +156 TIME_MAPPING = { 157 "y": "%Y", 158 "Y": "%Y", 159 "YYYY": "%Y", @@ -275,9 +275,9 @@ 184 "EEEE": "%A", 185 } 186 -187 date_format = "'yyyy-MM-dd'" -188 dateint_format = "'yyyyMMdd'" -189 time_format = "'yyyy-MM-dd HH:mm:ss'" +187 DATE_FORMAT = "'yyyy-MM-dd'" +188 DATEINT_FORMAT = "'yyyyMMdd'" +189 TIME_FORMAT = "'yyyy-MM-dd HH:mm:ss'" 190 191 class Tokenizer(tokens.Tokenizer): 192 QUOTES = ["'", '"'] @@ -315,219 +315,211 @@ 224 "BASE64": exp.ToBase64.from_arg_list, 225 "COLLECT_LIST": exp.ArrayAgg.from_arg_list, 226 "DATE_ADD": lambda args: exp.TsOrDsAdd( -227 this=seq_get(args, 0), -228 expression=seq_get(args, 1), -229 unit=exp.Literal.string("DAY"), -230 ), -231 "DATEDIFF": lambda args: exp.DateDiff( -232 this=exp.TsOrDsToDate(this=seq_get(args, 0)), -233 expression=exp.TsOrDsToDate(this=seq_get(args, 1)), -234 ), -235 "DATE_SUB": lambda args: exp.TsOrDsAdd( -236 this=seq_get(args, 0), -237 expression=exp.Mul( -238 this=seq_get(args, 1), -239 expression=exp.Literal.number(-1), -240 ), -241 unit=exp.Literal.string("DAY"), -242 ), -243 "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")( -244 [ -245 exp.TimeStrToTime(this=seq_get(args, 0)), -246 seq_get(args, 1), -247 ] -248 ), -249 "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))), -250 "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True), -251 "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list, -252 "LOCATE": locate_to_strposition, -253 "MAP": parse_var_map, -254 "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)), -255 "PERCENTILE": exp.Quantile.from_arg_list, -256 "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list, -257 "COLLECT_SET": exp.SetAgg.from_arg_list, -258 "SIZE": exp.ArraySize.from_arg_list, -259 "SPLIT": exp.RegexpSplit.from_arg_list, -260 "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"), -261 "TO_JSON": exp.JSONFormat.from_arg_list, -262 "UNBASE64": exp.FromBase64.from_arg_list, -263 "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True), -264 "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)), -265 } -266 -267 PROPERTY_PARSERS = { -268 **parser.Parser.PROPERTY_PARSERS, -269 "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties( -270 expressions=self._parse_wrapped_csv(self._parse_property) -271 ), -272 } -273 -274 QUERY_MODIFIER_PARSERS = { -275 **parser.Parser.QUERY_MODIFIER_PARSERS, -276 "distribute": lambda self: self._parse_sort(exp.Distribute, "DISTRIBUTE", "BY"), -277 "sort": lambda self: self._parse_sort(exp.Sort, "SORT", "BY"), -278 "cluster": lambda self: self._parse_sort(exp.Cluster, "CLUSTER", "BY"), -279 } -280 -281 def _parse_types( -282 self, check_func: bool = False, schema: bool = False -283 ) -> t.Optional[exp.Expression]: -284 """ -285 Spark (and most likely Hive) treats casts to CHAR(length) and VARCHAR(length) as casts to -286 STRING in all contexts except for schema definitions. For example, this is in Spark v3.4.0: -287 -288 spark-sql (default)> select cast(1234 as varchar(2)); -289 23/06/06 15:51:18 WARN CharVarcharUtils: The Spark cast operator does not support -290 char/varchar type and simply treats them as string type. Please use string type -291 directly to avoid confusion. Otherwise, you can set spark.sql.legacy.charVarcharAsString -292 to true, so that Spark treat them as string type as same as Spark 3.0 and earlier -293 -294 1234 -295 Time taken: 4.265 seconds, Fetched 1 row(s) -296 -297 This shows that Spark doesn't truncate the value into '12', which is inconsistent with -298 what other dialects (e.g. postgres) do, so we need to drop the length to transpile correctly. -299 -300 Reference: https://spark.apache.org/docs/latest/sql-ref-datatypes.html -301 """ -302 this = super()._parse_types(check_func=check_func, schema=schema) -303 -304 if this and not schema: -305 return this.transform( -306 lambda node: node.replace(exp.DataType.build("text")) -307 if isinstance(node, exp.DataType) and node.is_type("char", "varchar") -308 else node, -309 copy=False, -310 ) -311 -312 return this -313 -314 class Generator(generator.Generator): -315 LIMIT_FETCH = "LIMIT" -316 TABLESAMPLE_WITH_METHOD = False -317 TABLESAMPLE_SIZE_IS_PERCENT = True -318 JOIN_HINTS = False -319 TABLE_HINTS = False -320 INDEX_ON = "ON TABLE" -321 -322 TYPE_MAPPING = { -323 **generator.Generator.TYPE_MAPPING, -324 exp.DataType.Type.TEXT: "STRING", -325 exp.DataType.Type.DATETIME: "TIMESTAMP", -326 exp.DataType.Type.VARBINARY: "BINARY", -327 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", -328 exp.DataType.Type.BIT: "BOOLEAN", -329 } -330 -331 TRANSFORMS = { -332 **generator.Generator.TRANSFORMS, -333 exp.Group: transforms.preprocess([transforms.unalias_group]), -334 exp.Select: transforms.preprocess( -335 [ -336 transforms.eliminate_qualify, -337 transforms.eliminate_distinct_on, -338 transforms.unnest_to_explode, -339 ] -340 ), -341 exp.Property: _property_sql, -342 exp.ApproxDistinct: approx_count_distinct_sql, -343 exp.ArrayConcat: rename_func("CONCAT"), -344 exp.ArrayJoin: lambda self, e: self.func("CONCAT_WS", e.expression, e.this), -345 exp.ArraySize: rename_func("SIZE"), -346 exp.ArraySort: _array_sort_sql, -347 exp.With: no_recursive_cte_sql, -348 exp.DateAdd: _add_date_sql, -349 exp.DateDiff: _date_diff_sql, -350 exp.DateStrToDate: rename_func("TO_DATE"), -351 exp.DateSub: _add_date_sql, -352 exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.dateint_format}) AS INT)", -353 exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.dateint_format})", -354 exp.FileFormatProperty: lambda self, e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}", -355 exp.FromBase64: rename_func("UNBASE64"), -356 exp.If: if_sql, -357 exp.ILike: no_ilike_sql, -358 exp.JSONExtract: rename_func("GET_JSON_OBJECT"), -359 exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"), -360 exp.JSONFormat: _json_format_sql, -361 exp.Left: left_to_substring_sql, -362 exp.Map: var_map_sql, -363 exp.Max: max_or_greatest, -364 exp.Min: min_or_least, -365 exp.VarMap: var_map_sql, -366 exp.Create: create_with_partitions_sql, -367 exp.Quantile: rename_func("PERCENTILE"), -368 exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"), -369 exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"), -370 exp.RegexpSplit: rename_func("SPLIT"), -371 exp.Right: right_to_substring_sql, -372 exp.SafeDivide: no_safe_divide_sql, -373 exp.SchemaCommentProperty: lambda self, e: self.naked_property(e), -374 exp.SetAgg: rename_func("COLLECT_SET"), -375 exp.Split: lambda self, e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))", -376 exp.StrPosition: strposition_to_locate_sql, -377 exp.StrToDate: _str_to_date_sql, -378 exp.StrToTime: _str_to_time_sql, -379 exp.StrToUnix: _str_to_unix_sql, -380 exp.StructExtract: struct_extract_sql, -381 exp.TimeStrToDate: rename_func("TO_DATE"), -382 exp.TimeStrToTime: timestrtotime_sql, -383 exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"), -384 exp.TimeToStr: _time_to_str, -385 exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"), -386 exp.ToBase64: rename_func("BASE64"), -387 exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)", -388 exp.TsOrDsAdd: lambda self, e: f"DATE_ADD({self.sql(e, 'this')}, {self.sql(e, 'expression')})", -389 exp.TsOrDsToDate: _to_date_sql, -390 exp.TryCast: no_trycast_sql, -391 exp.UnixToStr: lambda self, e: self.func( -392 "FROM_UNIXTIME", e.this, _time_format(self, e) -393 ), -394 exp.UnixToTime: rename_func("FROM_UNIXTIME"), -395 exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"), -396 exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}", -397 exp.RowFormatSerdeProperty: lambda self, e: f"ROW FORMAT SERDE {self.sql(e, 'this')}", -398 exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"), -399 exp.NumberToStr: rename_func("FORMAT_NUMBER"), -400 exp.LastDateOfMonth: rename_func("LAST_DAY"), -401 exp.National: lambda self, e: self.national_sql(e, prefix=""), -402 } -403 -404 PROPERTIES_LOCATION = { -405 **generator.Generator.PROPERTIES_LOCATION, -406 exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA, -407 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, -408 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, -409 } -410 -411 def arrayagg_sql(self, expression: exp.ArrayAgg) -> str: -412 return self.func( -413 "COLLECT_LIST", -414 expression.this.this if isinstance(expression.this, exp.Order) else expression.this, -415 ) -416 -417 def with_properties(self, properties: exp.Properties) -> str: -418 return self.properties( -419 properties, -420 prefix=self.seg("TBLPROPERTIES"), -421 ) -422 -423 def datatype_sql(self, expression: exp.DataType) -> str: -424 if ( -425 expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR) -426 and not expression.expressions -427 ): -428 expression = exp.DataType.build("text") -429 elif expression.this in exp.DataType.TEMPORAL_TYPES: -430 expression = exp.DataType.build(expression.this) -431 -432 return super().datatype_sql(expression) -433 -434 def after_having_modifiers(self, expression: exp.Expression) -> t.List[str]: -435 return super().after_having_modifiers(expression) + [ -436 self.sql(expression, "distribute"), -437 self.sql(expression, "sort"), -438 self.sql(expression, "cluster"), -439 ] +227 this=seq_get(args, 0), expression=seq_get(args, 1), unit=exp.Literal.string("DAY") +228 ), +229 "DATEDIFF": lambda args: exp.DateDiff( +230 this=exp.TsOrDsToDate(this=seq_get(args, 0)), +231 expression=exp.TsOrDsToDate(this=seq_get(args, 1)), +232 ), +233 "DATE_SUB": lambda args: exp.TsOrDsAdd( +234 this=seq_get(args, 0), +235 expression=exp.Mul(this=seq_get(args, 1), expression=exp.Literal.number(-1)), +236 unit=exp.Literal.string("DAY"), +237 ), +238 "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")( +239 [ +240 exp.TimeStrToTime(this=seq_get(args, 0)), +241 seq_get(args, 1), +242 ] +243 ), +244 "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))), +245 "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True), +246 "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list, +247 "LOCATE": locate_to_strposition, +248 "MAP": parse_var_map, +249 "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)), +250 "PERCENTILE": exp.Quantile.from_arg_list, +251 "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list, +252 "COLLECT_SET": exp.SetAgg.from_arg_list, +253 "SIZE": exp.ArraySize.from_arg_list, +254 "SPLIT": exp.RegexpSplit.from_arg_list, +255 "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"), +256 "TO_JSON": exp.JSONFormat.from_arg_list, +257 "UNBASE64": exp.FromBase64.from_arg_list, +258 "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True), +259 "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)), +260 } +261 +262 PROPERTY_PARSERS = { +263 **parser.Parser.PROPERTY_PARSERS, +264 "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties( +265 expressions=self._parse_wrapped_csv(self._parse_property) +266 ), +267 } +268 +269 QUERY_MODIFIER_PARSERS = { +270 **parser.Parser.QUERY_MODIFIER_PARSERS, +271 "distribute": lambda self: self._parse_sort(exp.Distribute, "DISTRIBUTE", "BY"), +272 "sort": lambda self: self._parse_sort(exp.Sort, "SORT", "BY"), +273 "cluster": lambda self: self._parse_sort(exp.Cluster, "CLUSTER", "BY"), +274 } +275 +276 def _parse_types( +277 self, check_func: bool = False, schema: bool = False +278 ) -> t.Optional[exp.Expression]: +279 """ +280 Spark (and most likely Hive) treats casts to CHAR(length) and VARCHAR(length) as casts to +281 STRING in all contexts except for schema definitions. For example, this is in Spark v3.4.0: +282 +283 spark-sql (default)> select cast(1234 as varchar(2)); +284 23/06/06 15:51:18 WARN CharVarcharUtils: The Spark cast operator does not support +285 char/varchar type and simply treats them as string type. Please use string type +286 directly to avoid confusion. Otherwise, you can set spark.sql.legacy.charVarcharAsString +287 to true, so that Spark treat them as string type as same as Spark 3.0 and earlier +288 +289 1234 +290 Time taken: 4.265 seconds, Fetched 1 row(s) +291 +292 This shows that Spark doesn't truncate the value into '12', which is inconsistent with +293 what other dialects (e.g. postgres) do, so we need to drop the length to transpile correctly. +294 +295 Reference: https://spark.apache.org/docs/latest/sql-ref-datatypes.html +296 """ +297 this = super()._parse_types(check_func=check_func, schema=schema) +298 +299 if this and not schema: +300 return this.transform( +301 lambda node: node.replace(exp.DataType.build("text")) +302 if isinstance(node, exp.DataType) and node.is_type("char", "varchar") +303 else node, +304 copy=False, +305 ) +306 +307 return this +308 +309 class Generator(generator.Generator): +310 LIMIT_FETCH = "LIMIT" +311 TABLESAMPLE_WITH_METHOD = False +312 TABLESAMPLE_SIZE_IS_PERCENT = True +313 JOIN_HINTS = False +314 TABLE_HINTS = False +315 INDEX_ON = "ON TABLE" +316 +317 TYPE_MAPPING = { +318 **generator.Generator.TYPE_MAPPING, +319 exp.DataType.Type.TEXT: "STRING", +320 exp.DataType.Type.DATETIME: "TIMESTAMP", +321 exp.DataType.Type.VARBINARY: "BINARY", +322 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", +323 exp.DataType.Type.BIT: "BOOLEAN", +324 } +325 +326 TRANSFORMS = { +327 **generator.Generator.TRANSFORMS, +328 exp.Group: transforms.preprocess([transforms.unalias_group]), +329 exp.Select: transforms.preprocess( +330 [ +331 transforms.eliminate_qualify, +332 transforms.eliminate_distinct_on, +333 transforms.unnest_to_explode, +334 ] +335 ), +336 exp.Property: _property_sql, +337 exp.ApproxDistinct: approx_count_distinct_sql, +338 exp.ArrayConcat: rename_func("CONCAT"), +339 exp.ArrayJoin: lambda self, e: self.func("CONCAT_WS", e.expression, e.this), +340 exp.ArraySize: rename_func("SIZE"), +341 exp.ArraySort: _array_sort_sql, +342 exp.With: no_recursive_cte_sql, +343 exp.DateAdd: _add_date_sql, +344 exp.DateDiff: _date_diff_sql, +345 exp.DateStrToDate: rename_func("TO_DATE"), +346 exp.DateSub: _add_date_sql, +347 exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.DATEINT_FORMAT}) AS INT)", +348 exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.DATEINT_FORMAT})", +349 exp.FileFormatProperty: lambda self, e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}", +350 exp.FromBase64: rename_func("UNBASE64"), +351 exp.If: if_sql, +352 exp.ILike: no_ilike_sql, +353 exp.JSONExtract: rename_func("GET_JSON_OBJECT"), +354 exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"), +355 exp.JSONFormat: _json_format_sql, +356 exp.Left: left_to_substring_sql, +357 exp.Map: var_map_sql, +358 exp.Max: max_or_greatest, +359 exp.Min: min_or_least, +360 exp.VarMap: var_map_sql, +361 exp.Create: create_with_partitions_sql, +362 exp.Quantile: rename_func("PERCENTILE"), +363 exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"), +364 exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"), +365 exp.RegexpSplit: rename_func("SPLIT"), +366 exp.Right: right_to_substring_sql, +367 exp.SafeDivide: no_safe_divide_sql, +368 exp.SchemaCommentProperty: lambda self, e: self.naked_property(e), +369 exp.SetAgg: rename_func("COLLECT_SET"), +370 exp.Split: lambda self, e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))", +371 exp.StrPosition: strposition_to_locate_sql, +372 exp.StrToDate: _str_to_date_sql, +373 exp.StrToTime: _str_to_time_sql, +374 exp.StrToUnix: _str_to_unix_sql, +375 exp.StructExtract: struct_extract_sql, +376 exp.TimeStrToDate: rename_func("TO_DATE"), +377 exp.TimeStrToTime: timestrtotime_sql, +378 exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"), +379 exp.TimeToStr: _time_to_str, +380 exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"), +381 exp.ToBase64: rename_func("BASE64"), +382 exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)", +383 exp.TsOrDsAdd: lambda self, e: f"DATE_ADD({self.sql(e, 'this')}, {self.sql(e, 'expression')})", +384 exp.TsOrDsToDate: _to_date_sql, +385 exp.TryCast: no_trycast_sql, +386 exp.UnixToStr: lambda self, e: self.func( +387 "FROM_UNIXTIME", e.this, _time_format(self, e) +388 ), +389 exp.UnixToTime: rename_func("FROM_UNIXTIME"), +390 exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"), +391 exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}", +392 exp.RowFormatSerdeProperty: lambda self, e: f"ROW FORMAT SERDE {self.sql(e, 'this')}", +393 exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"), +394 exp.NumberToStr: rename_func("FORMAT_NUMBER"), +395 exp.LastDateOfMonth: rename_func("LAST_DAY"), +396 exp.National: lambda self, e: self.national_sql(e, prefix=""), +397 } +398 +399 PROPERTIES_LOCATION = { +400 **generator.Generator.PROPERTIES_LOCATION, +401 exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA, +402 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, +403 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, +404 } +405 +406 def arrayagg_sql(self, expression: exp.ArrayAgg) -> str: +407 return self.func( +408 "COLLECT_LIST", +409 expression.this.this if isinstance(expression.this, exp.Order) else expression.this, +410 ) +411 +412 def with_properties(self, properties: exp.Properties) -> str: +413 return self.properties(properties, prefix=self.seg("TBLPROPERTIES")) +414 +415 def datatype_sql(self, expression: exp.DataType) -> str: +416 if ( +417 expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR) +418 and not expression.expressions +419 ): +420 expression = exp.DataType.build("text") +421 elif expression.this in exp.DataType.TEMPORAL_TYPES: +422 expression = exp.DataType.build(expression.this) +423 +424 return super().datatype_sql(expression) +425 +426 def after_having_modifiers(self, expression: exp.Expression) -> t.List[str]: +427 return super().after_having_modifiers(expression) + [ +428 self.sql(expression, "distribute"), +429 self.sql(expression, "sort"), +430 self.sql(expression, "cluster"), +431 ]
    @@ -544,10 +536,10 @@
    153class Hive(Dialect):
    -154    alias_post_tablesample = True
    -155    identifiers_can_start_with_digit = True
    +154    ALIAS_POST_TABLESAMPLE = True
    +155    IDENTIFIERS_CAN_START_WITH_DIGIT = True
     156
    -157    time_mapping = {
    +157    TIME_MAPPING = {
     158        "y": "%Y",
     159        "Y": "%Y",
     160        "YYYY": "%Y",
    @@ -578,9 +570,9 @@
     185        "EEEE": "%A",
     186    }
     187
    -188    date_format = "'yyyy-MM-dd'"
    -189    dateint_format = "'yyyyMMdd'"
    -190    time_format = "'yyyy-MM-dd HH:mm:ss'"
    +188    DATE_FORMAT = "'yyyy-MM-dd'"
    +189    DATEINT_FORMAT = "'yyyyMMdd'"
    +190    TIME_FORMAT = "'yyyy-MM-dd HH:mm:ss'"
     191
     192    class Tokenizer(tokens.Tokenizer):
     193        QUOTES = ["'", '"']
    @@ -618,219 +610,211 @@
     225            "BASE64": exp.ToBase64.from_arg_list,
     226            "COLLECT_LIST": exp.ArrayAgg.from_arg_list,
     227            "DATE_ADD": lambda args: exp.TsOrDsAdd(
    -228                this=seq_get(args, 0),
    -229                expression=seq_get(args, 1),
    -230                unit=exp.Literal.string("DAY"),
    -231            ),
    -232            "DATEDIFF": lambda args: exp.DateDiff(
    -233                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
    -234                expression=exp.TsOrDsToDate(this=seq_get(args, 1)),
    -235            ),
    -236            "DATE_SUB": lambda args: exp.TsOrDsAdd(
    -237                this=seq_get(args, 0),
    -238                expression=exp.Mul(
    -239                    this=seq_get(args, 1),
    -240                    expression=exp.Literal.number(-1),
    -241                ),
    -242                unit=exp.Literal.string("DAY"),
    -243            ),
    -244            "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")(
    -245                [
    -246                    exp.TimeStrToTime(this=seq_get(args, 0)),
    -247                    seq_get(args, 1),
    -248                ]
    -249            ),
    -250            "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
    -251            "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True),
    -252            "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list,
    -253            "LOCATE": locate_to_strposition,
    -254            "MAP": parse_var_map,
    -255            "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)),
    -256            "PERCENTILE": exp.Quantile.from_arg_list,
    -257            "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list,
    -258            "COLLECT_SET": exp.SetAgg.from_arg_list,
    -259            "SIZE": exp.ArraySize.from_arg_list,
    -260            "SPLIT": exp.RegexpSplit.from_arg_list,
    -261            "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"),
    -262            "TO_JSON": exp.JSONFormat.from_arg_list,
    -263            "UNBASE64": exp.FromBase64.from_arg_list,
    -264            "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True),
    -265            "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)),
    -266        }
    -267
    -268        PROPERTY_PARSERS = {
    -269            **parser.Parser.PROPERTY_PARSERS,
    -270            "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties(
    -271                expressions=self._parse_wrapped_csv(self._parse_property)
    -272            ),
    -273        }
    -274
    -275        QUERY_MODIFIER_PARSERS = {
    -276            **parser.Parser.QUERY_MODIFIER_PARSERS,
    -277            "distribute": lambda self: self._parse_sort(exp.Distribute, "DISTRIBUTE", "BY"),
    -278            "sort": lambda self: self._parse_sort(exp.Sort, "SORT", "BY"),
    -279            "cluster": lambda self: self._parse_sort(exp.Cluster, "CLUSTER", "BY"),
    -280        }
    -281
    -282        def _parse_types(
    -283            self, check_func: bool = False, schema: bool = False
    -284        ) -> t.Optional[exp.Expression]:
    -285            """
    -286            Spark (and most likely Hive) treats casts to CHAR(length) and VARCHAR(length) as casts to
    -287            STRING in all contexts except for schema definitions. For example, this is in Spark v3.4.0:
    -288
    -289                spark-sql (default)> select cast(1234 as varchar(2));
    -290                23/06/06 15:51:18 WARN CharVarcharUtils: The Spark cast operator does not support
    -291                char/varchar type and simply treats them as string type. Please use string type
    -292                directly to avoid confusion. Otherwise, you can set spark.sql.legacy.charVarcharAsString
    -293                to true, so that Spark treat them as string type as same as Spark 3.0 and earlier
    -294
    -295                1234
    -296                Time taken: 4.265 seconds, Fetched 1 row(s)
    -297
    -298            This shows that Spark doesn't truncate the value into '12', which is inconsistent with
    -299            what other dialects (e.g. postgres) do, so we need to drop the length to transpile correctly.
    -300
    -301            Reference: https://spark.apache.org/docs/latest/sql-ref-datatypes.html
    -302            """
    -303            this = super()._parse_types(check_func=check_func, schema=schema)
    -304
    -305            if this and not schema:
    -306                return this.transform(
    -307                    lambda node: node.replace(exp.DataType.build("text"))
    -308                    if isinstance(node, exp.DataType) and node.is_type("char", "varchar")
    -309                    else node,
    -310                    copy=False,
    -311                )
    -312
    -313            return this
    -314
    -315    class Generator(generator.Generator):
    -316        LIMIT_FETCH = "LIMIT"
    -317        TABLESAMPLE_WITH_METHOD = False
    -318        TABLESAMPLE_SIZE_IS_PERCENT = True
    -319        JOIN_HINTS = False
    -320        TABLE_HINTS = False
    -321        INDEX_ON = "ON TABLE"
    -322
    -323        TYPE_MAPPING = {
    -324            **generator.Generator.TYPE_MAPPING,
    -325            exp.DataType.Type.TEXT: "STRING",
    -326            exp.DataType.Type.DATETIME: "TIMESTAMP",
    -327            exp.DataType.Type.VARBINARY: "BINARY",
    -328            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
    -329            exp.DataType.Type.BIT: "BOOLEAN",
    -330        }
    -331
    -332        TRANSFORMS = {
    -333            **generator.Generator.TRANSFORMS,
    -334            exp.Group: transforms.preprocess([transforms.unalias_group]),
    -335            exp.Select: transforms.preprocess(
    -336                [
    -337                    transforms.eliminate_qualify,
    -338                    transforms.eliminate_distinct_on,
    -339                    transforms.unnest_to_explode,
    -340                ]
    -341            ),
    -342            exp.Property: _property_sql,
    -343            exp.ApproxDistinct: approx_count_distinct_sql,
    -344            exp.ArrayConcat: rename_func("CONCAT"),
    -345            exp.ArrayJoin: lambda self, e: self.func("CONCAT_WS", e.expression, e.this),
    -346            exp.ArraySize: rename_func("SIZE"),
    -347            exp.ArraySort: _array_sort_sql,
    -348            exp.With: no_recursive_cte_sql,
    -349            exp.DateAdd: _add_date_sql,
    -350            exp.DateDiff: _date_diff_sql,
    -351            exp.DateStrToDate: rename_func("TO_DATE"),
    -352            exp.DateSub: _add_date_sql,
    -353            exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.dateint_format}) AS INT)",
    -354            exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.dateint_format})",
    -355            exp.FileFormatProperty: lambda self, e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}",
    -356            exp.FromBase64: rename_func("UNBASE64"),
    -357            exp.If: if_sql,
    -358            exp.ILike: no_ilike_sql,
    -359            exp.JSONExtract: rename_func("GET_JSON_OBJECT"),
    -360            exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"),
    -361            exp.JSONFormat: _json_format_sql,
    -362            exp.Left: left_to_substring_sql,
    -363            exp.Map: var_map_sql,
    -364            exp.Max: max_or_greatest,
    -365            exp.Min: min_or_least,
    -366            exp.VarMap: var_map_sql,
    -367            exp.Create: create_with_partitions_sql,
    -368            exp.Quantile: rename_func("PERCENTILE"),
    -369            exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"),
    -370            exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"),
    -371            exp.RegexpSplit: rename_func("SPLIT"),
    -372            exp.Right: right_to_substring_sql,
    -373            exp.SafeDivide: no_safe_divide_sql,
    -374            exp.SchemaCommentProperty: lambda self, e: self.naked_property(e),
    -375            exp.SetAgg: rename_func("COLLECT_SET"),
    -376            exp.Split: lambda self, e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))",
    -377            exp.StrPosition: strposition_to_locate_sql,
    -378            exp.StrToDate: _str_to_date_sql,
    -379            exp.StrToTime: _str_to_time_sql,
    -380            exp.StrToUnix: _str_to_unix_sql,
    -381            exp.StructExtract: struct_extract_sql,
    -382            exp.TimeStrToDate: rename_func("TO_DATE"),
    -383            exp.TimeStrToTime: timestrtotime_sql,
    -384            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
    -385            exp.TimeToStr: _time_to_str,
    -386            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
    -387            exp.ToBase64: rename_func("BASE64"),
    -388            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)",
    -389            exp.TsOrDsAdd: lambda self, e: f"DATE_ADD({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    -390            exp.TsOrDsToDate: _to_date_sql,
    -391            exp.TryCast: no_trycast_sql,
    -392            exp.UnixToStr: lambda self, e: self.func(
    -393                "FROM_UNIXTIME", e.this, _time_format(self, e)
    -394            ),
    -395            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
    -396            exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"),
    -397            exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}",
    -398            exp.RowFormatSerdeProperty: lambda self, e: f"ROW FORMAT SERDE {self.sql(e, 'this')}",
    -399            exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"),
    -400            exp.NumberToStr: rename_func("FORMAT_NUMBER"),
    -401            exp.LastDateOfMonth: rename_func("LAST_DAY"),
    -402            exp.National: lambda self, e: self.national_sql(e, prefix=""),
    -403        }
    -404
    -405        PROPERTIES_LOCATION = {
    -406            **generator.Generator.PROPERTIES_LOCATION,
    -407            exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA,
    -408            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
    -409            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    -410        }
    -411
    -412        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
    -413            return self.func(
    -414                "COLLECT_LIST",
    -415                expression.this.this if isinstance(expression.this, exp.Order) else expression.this,
    -416            )
    -417
    -418        def with_properties(self, properties: exp.Properties) -> str:
    -419            return self.properties(
    -420                properties,
    -421                prefix=self.seg("TBLPROPERTIES"),
    -422            )
    -423
    -424        def datatype_sql(self, expression: exp.DataType) -> str:
    -425            if (
    -426                expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR)
    -427                and not expression.expressions
    -428            ):
    -429                expression = exp.DataType.build("text")
    -430            elif expression.this in exp.DataType.TEMPORAL_TYPES:
    -431                expression = exp.DataType.build(expression.this)
    -432
    -433            return super().datatype_sql(expression)
    -434
    -435        def after_having_modifiers(self, expression: exp.Expression) -> t.List[str]:
    -436            return super().after_having_modifiers(expression) + [
    -437                self.sql(expression, "distribute"),
    -438                self.sql(expression, "sort"),
    -439                self.sql(expression, "cluster"),
    -440            ]
    +228                this=seq_get(args, 0), expression=seq_get(args, 1), unit=exp.Literal.string("DAY")
    +229            ),
    +230            "DATEDIFF": lambda args: exp.DateDiff(
    +231                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
    +232                expression=exp.TsOrDsToDate(this=seq_get(args, 1)),
    +233            ),
    +234            "DATE_SUB": lambda args: exp.TsOrDsAdd(
    +235                this=seq_get(args, 0),
    +236                expression=exp.Mul(this=seq_get(args, 1), expression=exp.Literal.number(-1)),
    +237                unit=exp.Literal.string("DAY"),
    +238            ),
    +239            "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")(
    +240                [
    +241                    exp.TimeStrToTime(this=seq_get(args, 0)),
    +242                    seq_get(args, 1),
    +243                ]
    +244            ),
    +245            "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
    +246            "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True),
    +247            "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list,
    +248            "LOCATE": locate_to_strposition,
    +249            "MAP": parse_var_map,
    +250            "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)),
    +251            "PERCENTILE": exp.Quantile.from_arg_list,
    +252            "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list,
    +253            "COLLECT_SET": exp.SetAgg.from_arg_list,
    +254            "SIZE": exp.ArraySize.from_arg_list,
    +255            "SPLIT": exp.RegexpSplit.from_arg_list,
    +256            "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"),
    +257            "TO_JSON": exp.JSONFormat.from_arg_list,
    +258            "UNBASE64": exp.FromBase64.from_arg_list,
    +259            "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True),
    +260            "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)),
    +261        }
    +262
    +263        PROPERTY_PARSERS = {
    +264            **parser.Parser.PROPERTY_PARSERS,
    +265            "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties(
    +266                expressions=self._parse_wrapped_csv(self._parse_property)
    +267            ),
    +268        }
    +269
    +270        QUERY_MODIFIER_PARSERS = {
    +271            **parser.Parser.QUERY_MODIFIER_PARSERS,
    +272            "distribute": lambda self: self._parse_sort(exp.Distribute, "DISTRIBUTE", "BY"),
    +273            "sort": lambda self: self._parse_sort(exp.Sort, "SORT", "BY"),
    +274            "cluster": lambda self: self._parse_sort(exp.Cluster, "CLUSTER", "BY"),
    +275        }
    +276
    +277        def _parse_types(
    +278            self, check_func: bool = False, schema: bool = False
    +279        ) -> t.Optional[exp.Expression]:
    +280            """
    +281            Spark (and most likely Hive) treats casts to CHAR(length) and VARCHAR(length) as casts to
    +282            STRING in all contexts except for schema definitions. For example, this is in Spark v3.4.0:
    +283
    +284                spark-sql (default)> select cast(1234 as varchar(2));
    +285                23/06/06 15:51:18 WARN CharVarcharUtils: The Spark cast operator does not support
    +286                char/varchar type and simply treats them as string type. Please use string type
    +287                directly to avoid confusion. Otherwise, you can set spark.sql.legacy.charVarcharAsString
    +288                to true, so that Spark treat them as string type as same as Spark 3.0 and earlier
    +289
    +290                1234
    +291                Time taken: 4.265 seconds, Fetched 1 row(s)
    +292
    +293            This shows that Spark doesn't truncate the value into '12', which is inconsistent with
    +294            what other dialects (e.g. postgres) do, so we need to drop the length to transpile correctly.
    +295
    +296            Reference: https://spark.apache.org/docs/latest/sql-ref-datatypes.html
    +297            """
    +298            this = super()._parse_types(check_func=check_func, schema=schema)
    +299
    +300            if this and not schema:
    +301                return this.transform(
    +302                    lambda node: node.replace(exp.DataType.build("text"))
    +303                    if isinstance(node, exp.DataType) and node.is_type("char", "varchar")
    +304                    else node,
    +305                    copy=False,
    +306                )
    +307
    +308            return this
    +309
    +310    class Generator(generator.Generator):
    +311        LIMIT_FETCH = "LIMIT"
    +312        TABLESAMPLE_WITH_METHOD = False
    +313        TABLESAMPLE_SIZE_IS_PERCENT = True
    +314        JOIN_HINTS = False
    +315        TABLE_HINTS = False
    +316        INDEX_ON = "ON TABLE"
    +317
    +318        TYPE_MAPPING = {
    +319            **generator.Generator.TYPE_MAPPING,
    +320            exp.DataType.Type.TEXT: "STRING",
    +321            exp.DataType.Type.DATETIME: "TIMESTAMP",
    +322            exp.DataType.Type.VARBINARY: "BINARY",
    +323            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
    +324            exp.DataType.Type.BIT: "BOOLEAN",
    +325        }
    +326
    +327        TRANSFORMS = {
    +328            **generator.Generator.TRANSFORMS,
    +329            exp.Group: transforms.preprocess([transforms.unalias_group]),
    +330            exp.Select: transforms.preprocess(
    +331                [
    +332                    transforms.eliminate_qualify,
    +333                    transforms.eliminate_distinct_on,
    +334                    transforms.unnest_to_explode,
    +335                ]
    +336            ),
    +337            exp.Property: _property_sql,
    +338            exp.ApproxDistinct: approx_count_distinct_sql,
    +339            exp.ArrayConcat: rename_func("CONCAT"),
    +340            exp.ArrayJoin: lambda self, e: self.func("CONCAT_WS", e.expression, e.this),
    +341            exp.ArraySize: rename_func("SIZE"),
    +342            exp.ArraySort: _array_sort_sql,
    +343            exp.With: no_recursive_cte_sql,
    +344            exp.DateAdd: _add_date_sql,
    +345            exp.DateDiff: _date_diff_sql,
    +346            exp.DateStrToDate: rename_func("TO_DATE"),
    +347            exp.DateSub: _add_date_sql,
    +348            exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.DATEINT_FORMAT}) AS INT)",
    +349            exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.DATEINT_FORMAT})",
    +350            exp.FileFormatProperty: lambda self, e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}",
    +351            exp.FromBase64: rename_func("UNBASE64"),
    +352            exp.If: if_sql,
    +353            exp.ILike: no_ilike_sql,
    +354            exp.JSONExtract: rename_func("GET_JSON_OBJECT"),
    +355            exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"),
    +356            exp.JSONFormat: _json_format_sql,
    +357            exp.Left: left_to_substring_sql,
    +358            exp.Map: var_map_sql,
    +359            exp.Max: max_or_greatest,
    +360            exp.Min: min_or_least,
    +361            exp.VarMap: var_map_sql,
    +362            exp.Create: create_with_partitions_sql,
    +363            exp.Quantile: rename_func("PERCENTILE"),
    +364            exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"),
    +365            exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"),
    +366            exp.RegexpSplit: rename_func("SPLIT"),
    +367            exp.Right: right_to_substring_sql,
    +368            exp.SafeDivide: no_safe_divide_sql,
    +369            exp.SchemaCommentProperty: lambda self, e: self.naked_property(e),
    +370            exp.SetAgg: rename_func("COLLECT_SET"),
    +371            exp.Split: lambda self, e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))",
    +372            exp.StrPosition: strposition_to_locate_sql,
    +373            exp.StrToDate: _str_to_date_sql,
    +374            exp.StrToTime: _str_to_time_sql,
    +375            exp.StrToUnix: _str_to_unix_sql,
    +376            exp.StructExtract: struct_extract_sql,
    +377            exp.TimeStrToDate: rename_func("TO_DATE"),
    +378            exp.TimeStrToTime: timestrtotime_sql,
    +379            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
    +380            exp.TimeToStr: _time_to_str,
    +381            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
    +382            exp.ToBase64: rename_func("BASE64"),
    +383            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)",
    +384            exp.TsOrDsAdd: lambda self, e: f"DATE_ADD({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    +385            exp.TsOrDsToDate: _to_date_sql,
    +386            exp.TryCast: no_trycast_sql,
    +387            exp.UnixToStr: lambda self, e: self.func(
    +388                "FROM_UNIXTIME", e.this, _time_format(self, e)
    +389            ),
    +390            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
    +391            exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"),
    +392            exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}",
    +393            exp.RowFormatSerdeProperty: lambda self, e: f"ROW FORMAT SERDE {self.sql(e, 'this')}",
    +394            exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"),
    +395            exp.NumberToStr: rename_func("FORMAT_NUMBER"),
    +396            exp.LastDateOfMonth: rename_func("LAST_DAY"),
    +397            exp.National: lambda self, e: self.national_sql(e, prefix=""),
    +398        }
    +399
    +400        PROPERTIES_LOCATION = {
    +401            **generator.Generator.PROPERTIES_LOCATION,
    +402            exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA,
    +403            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
    +404            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +405        }
    +406
    +407        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
    +408            return self.func(
    +409                "COLLECT_LIST",
    +410                expression.this.this if isinstance(expression.this, exp.Order) else expression.this,
    +411            )
    +412
    +413        def with_properties(self, properties: exp.Properties) -> str:
    +414            return self.properties(properties, prefix=self.seg("TBLPROPERTIES"))
    +415
    +416        def datatype_sql(self, expression: exp.DataType) -> str:
    +417            if (
    +418                expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR)
    +419                and not expression.expressions
    +420            ):
    +421                expression = exp.DataType.build("text")
    +422            elif expression.this in exp.DataType.TEMPORAL_TYPES:
    +423                expression = exp.DataType.build(expression.this)
    +424
    +425            return super().datatype_sql(expression)
    +426
    +427        def after_having_modifiers(self, expression: exp.Expression) -> t.List[str]:
    +428            return super().after_having_modifiers(expression) + [
    +429                self.sql(expression, "distribute"),
    +430                self.sql(expression, "sort"),
    +431                self.sql(expression, "cluster"),
    +432            ]
     
    @@ -902,6 +886,7 @@ @@ -927,116 +912,103 @@ 225 "BASE64": exp.ToBase64.from_arg_list, 226 "COLLECT_LIST": exp.ArrayAgg.from_arg_list, 227 "DATE_ADD": lambda args: exp.TsOrDsAdd( -228 this=seq_get(args, 0), -229 expression=seq_get(args, 1), -230 unit=exp.Literal.string("DAY"), -231 ), -232 "DATEDIFF": lambda args: exp.DateDiff( -233 this=exp.TsOrDsToDate(this=seq_get(args, 0)), -234 expression=exp.TsOrDsToDate(this=seq_get(args, 1)), -235 ), -236 "DATE_SUB": lambda args: exp.TsOrDsAdd( -237 this=seq_get(args, 0), -238 expression=exp.Mul( -239 this=seq_get(args, 1), -240 expression=exp.Literal.number(-1), -241 ), -242 unit=exp.Literal.string("DAY"), -243 ), -244 "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")( -245 [ -246 exp.TimeStrToTime(this=seq_get(args, 0)), -247 seq_get(args, 1), -248 ] -249 ), -250 "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))), -251 "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True), -252 "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list, -253 "LOCATE": locate_to_strposition, -254 "MAP": parse_var_map, -255 "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)), -256 "PERCENTILE": exp.Quantile.from_arg_list, -257 "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list, -258 "COLLECT_SET": exp.SetAgg.from_arg_list, -259 "SIZE": exp.ArraySize.from_arg_list, -260 "SPLIT": exp.RegexpSplit.from_arg_list, -261 "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"), -262 "TO_JSON": exp.JSONFormat.from_arg_list, -263 "UNBASE64": exp.FromBase64.from_arg_list, -264 "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True), -265 "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)), -266 } -267 -268 PROPERTY_PARSERS = { -269 **parser.Parser.PROPERTY_PARSERS, -270 "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties( -271 expressions=self._parse_wrapped_csv(self._parse_property) -272 ), -273 } -274 -275 QUERY_MODIFIER_PARSERS = { -276 **parser.Parser.QUERY_MODIFIER_PARSERS, -277 "distribute": lambda self: self._parse_sort(exp.Distribute, "DISTRIBUTE", "BY"), -278 "sort": lambda self: self._parse_sort(exp.Sort, "SORT", "BY"), -279 "cluster": lambda self: self._parse_sort(exp.Cluster, "CLUSTER", "BY"), -280 } -281 -282 def _parse_types( -283 self, check_func: bool = False, schema: bool = False -284 ) -> t.Optional[exp.Expression]: -285 """ -286 Spark (and most likely Hive) treats casts to CHAR(length) and VARCHAR(length) as casts to -287 STRING in all contexts except for schema definitions. For example, this is in Spark v3.4.0: -288 -289 spark-sql (default)> select cast(1234 as varchar(2)); -290 23/06/06 15:51:18 WARN CharVarcharUtils: The Spark cast operator does not support -291 char/varchar type and simply treats them as string type. Please use string type -292 directly to avoid confusion. Otherwise, you can set spark.sql.legacy.charVarcharAsString -293 to true, so that Spark treat them as string type as same as Spark 3.0 and earlier -294 -295 1234 -296 Time taken: 4.265 seconds, Fetched 1 row(s) -297 -298 This shows that Spark doesn't truncate the value into '12', which is inconsistent with -299 what other dialects (e.g. postgres) do, so we need to drop the length to transpile correctly. -300 -301 Reference: https://spark.apache.org/docs/latest/sql-ref-datatypes.html -302 """ -303 this = super()._parse_types(check_func=check_func, schema=schema) -304 -305 if this and not schema: -306 return this.transform( -307 lambda node: node.replace(exp.DataType.build("text")) -308 if isinstance(node, exp.DataType) and node.is_type("char", "varchar") -309 else node, -310 copy=False, -311 ) -312 -313 return this +228 this=seq_get(args, 0), expression=seq_get(args, 1), unit=exp.Literal.string("DAY") +229 ), +230 "DATEDIFF": lambda args: exp.DateDiff( +231 this=exp.TsOrDsToDate(this=seq_get(args, 0)), +232 expression=exp.TsOrDsToDate(this=seq_get(args, 1)), +233 ), +234 "DATE_SUB": lambda args: exp.TsOrDsAdd( +235 this=seq_get(args, 0), +236 expression=exp.Mul(this=seq_get(args, 1), expression=exp.Literal.number(-1)), +237 unit=exp.Literal.string("DAY"), +238 ), +239 "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")( +240 [ +241 exp.TimeStrToTime(this=seq_get(args, 0)), +242 seq_get(args, 1), +243 ] +244 ), +245 "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))), +246 "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True), +247 "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list, +248 "LOCATE": locate_to_strposition, +249 "MAP": parse_var_map, +250 "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)), +251 "PERCENTILE": exp.Quantile.from_arg_list, +252 "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list, +253 "COLLECT_SET": exp.SetAgg.from_arg_list, +254 "SIZE": exp.ArraySize.from_arg_list, +255 "SPLIT": exp.RegexpSplit.from_arg_list, +256 "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"), +257 "TO_JSON": exp.JSONFormat.from_arg_list, +258 "UNBASE64": exp.FromBase64.from_arg_list, +259 "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True), +260 "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)), +261 } +262 +263 PROPERTY_PARSERS = { +264 **parser.Parser.PROPERTY_PARSERS, +265 "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties( +266 expressions=self._parse_wrapped_csv(self._parse_property) +267 ), +268 } +269 +270 QUERY_MODIFIER_PARSERS = { +271 **parser.Parser.QUERY_MODIFIER_PARSERS, +272 "distribute": lambda self: self._parse_sort(exp.Distribute, "DISTRIBUTE", "BY"), +273 "sort": lambda self: self._parse_sort(exp.Sort, "SORT", "BY"), +274 "cluster": lambda self: self._parse_sort(exp.Cluster, "CLUSTER", "BY"), +275 } +276 +277 def _parse_types( +278 self, check_func: bool = False, schema: bool = False +279 ) -> t.Optional[exp.Expression]: +280 """ +281 Spark (and most likely Hive) treats casts to CHAR(length) and VARCHAR(length) as casts to +282 STRING in all contexts except for schema definitions. For example, this is in Spark v3.4.0: +283 +284 spark-sql (default)> select cast(1234 as varchar(2)); +285 23/06/06 15:51:18 WARN CharVarcharUtils: The Spark cast operator does not support +286 char/varchar type and simply treats them as string type. Please use string type +287 directly to avoid confusion. Otherwise, you can set spark.sql.legacy.charVarcharAsString +288 to true, so that Spark treat them as string type as same as Spark 3.0 and earlier +289 +290 1234 +291 Time taken: 4.265 seconds, Fetched 1 row(s) +292 +293 This shows that Spark doesn't truncate the value into '12', which is inconsistent with +294 what other dialects (e.g. postgres) do, so we need to drop the length to transpile correctly. +295 +296 Reference: https://spark.apache.org/docs/latest/sql-ref-datatypes.html +297 """ +298 this = super()._parse_types(check_func=check_func, schema=schema) +299 +300 if this and not schema: +301 return this.transform( +302 lambda node: node.replace(exp.DataType.build("text")) +303 if isinstance(node, exp.DataType) and node.is_type("char", "varchar") +304 else node, +305 copy=False, +306 ) +307 +308 return this
    -

    Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces -a parsed syntax tree.

    +

    Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

    Arguments:
      -
    • error_level: the desired error level. +
    • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
    • -
    • error_message_context: determines the amount of context to capture from a +
    • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). -Default: 50.
    • -
    • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. -Default: 0
    • -
    • alias_post_tablesample: If the table alias comes after tablesample. -Default: False
    • +Default: 100
    • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
    • -
    • null_ordering: Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    @@ -1069,178 +1041,160 @@ Default: "nulls_are_small"
    -
    315    class Generator(generator.Generator):
    -316        LIMIT_FETCH = "LIMIT"
    -317        TABLESAMPLE_WITH_METHOD = False
    -318        TABLESAMPLE_SIZE_IS_PERCENT = True
    -319        JOIN_HINTS = False
    -320        TABLE_HINTS = False
    -321        INDEX_ON = "ON TABLE"
    -322
    -323        TYPE_MAPPING = {
    -324            **generator.Generator.TYPE_MAPPING,
    -325            exp.DataType.Type.TEXT: "STRING",
    -326            exp.DataType.Type.DATETIME: "TIMESTAMP",
    -327            exp.DataType.Type.VARBINARY: "BINARY",
    -328            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
    -329            exp.DataType.Type.BIT: "BOOLEAN",
    -330        }
    -331
    -332        TRANSFORMS = {
    -333            **generator.Generator.TRANSFORMS,
    -334            exp.Group: transforms.preprocess([transforms.unalias_group]),
    -335            exp.Select: transforms.preprocess(
    -336                [
    -337                    transforms.eliminate_qualify,
    -338                    transforms.eliminate_distinct_on,
    -339                    transforms.unnest_to_explode,
    -340                ]
    -341            ),
    -342            exp.Property: _property_sql,
    -343            exp.ApproxDistinct: approx_count_distinct_sql,
    -344            exp.ArrayConcat: rename_func("CONCAT"),
    -345            exp.ArrayJoin: lambda self, e: self.func("CONCAT_WS", e.expression, e.this),
    -346            exp.ArraySize: rename_func("SIZE"),
    -347            exp.ArraySort: _array_sort_sql,
    -348            exp.With: no_recursive_cte_sql,
    -349            exp.DateAdd: _add_date_sql,
    -350            exp.DateDiff: _date_diff_sql,
    -351            exp.DateStrToDate: rename_func("TO_DATE"),
    -352            exp.DateSub: _add_date_sql,
    -353            exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.dateint_format}) AS INT)",
    -354            exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.dateint_format})",
    -355            exp.FileFormatProperty: lambda self, e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}",
    -356            exp.FromBase64: rename_func("UNBASE64"),
    -357            exp.If: if_sql,
    -358            exp.ILike: no_ilike_sql,
    -359            exp.JSONExtract: rename_func("GET_JSON_OBJECT"),
    -360            exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"),
    -361            exp.JSONFormat: _json_format_sql,
    -362            exp.Left: left_to_substring_sql,
    -363            exp.Map: var_map_sql,
    -364            exp.Max: max_or_greatest,
    -365            exp.Min: min_or_least,
    -366            exp.VarMap: var_map_sql,
    -367            exp.Create: create_with_partitions_sql,
    -368            exp.Quantile: rename_func("PERCENTILE"),
    -369            exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"),
    -370            exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"),
    -371            exp.RegexpSplit: rename_func("SPLIT"),
    -372            exp.Right: right_to_substring_sql,
    -373            exp.SafeDivide: no_safe_divide_sql,
    -374            exp.SchemaCommentProperty: lambda self, e: self.naked_property(e),
    -375            exp.SetAgg: rename_func("COLLECT_SET"),
    -376            exp.Split: lambda self, e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))",
    -377            exp.StrPosition: strposition_to_locate_sql,
    -378            exp.StrToDate: _str_to_date_sql,
    -379            exp.StrToTime: _str_to_time_sql,
    -380            exp.StrToUnix: _str_to_unix_sql,
    -381            exp.StructExtract: struct_extract_sql,
    -382            exp.TimeStrToDate: rename_func("TO_DATE"),
    -383            exp.TimeStrToTime: timestrtotime_sql,
    -384            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
    -385            exp.TimeToStr: _time_to_str,
    -386            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
    -387            exp.ToBase64: rename_func("BASE64"),
    -388            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)",
    -389            exp.TsOrDsAdd: lambda self, e: f"DATE_ADD({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    -390            exp.TsOrDsToDate: _to_date_sql,
    -391            exp.TryCast: no_trycast_sql,
    -392            exp.UnixToStr: lambda self, e: self.func(
    -393                "FROM_UNIXTIME", e.this, _time_format(self, e)
    -394            ),
    -395            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
    -396            exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"),
    -397            exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}",
    -398            exp.RowFormatSerdeProperty: lambda self, e: f"ROW FORMAT SERDE {self.sql(e, 'this')}",
    -399            exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"),
    -400            exp.NumberToStr: rename_func("FORMAT_NUMBER"),
    -401            exp.LastDateOfMonth: rename_func("LAST_DAY"),
    -402            exp.National: lambda self, e: self.national_sql(e, prefix=""),
    -403        }
    -404
    -405        PROPERTIES_LOCATION = {
    -406            **generator.Generator.PROPERTIES_LOCATION,
    -407            exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA,
    -408            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
    -409            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    -410        }
    -411
    -412        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
    -413            return self.func(
    -414                "COLLECT_LIST",
    -415                expression.this.this if isinstance(expression.this, exp.Order) else expression.this,
    -416            )
    -417
    -418        def with_properties(self, properties: exp.Properties) -> str:
    -419            return self.properties(
    -420                properties,
    -421                prefix=self.seg("TBLPROPERTIES"),
    -422            )
    -423
    -424        def datatype_sql(self, expression: exp.DataType) -> str:
    -425            if (
    -426                expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR)
    -427                and not expression.expressions
    -428            ):
    -429                expression = exp.DataType.build("text")
    -430            elif expression.this in exp.DataType.TEMPORAL_TYPES:
    -431                expression = exp.DataType.build(expression.this)
    -432
    -433            return super().datatype_sql(expression)
    -434
    -435        def after_having_modifiers(self, expression: exp.Expression) -> t.List[str]:
    -436            return super().after_having_modifiers(expression) + [
    -437                self.sql(expression, "distribute"),
    -438                self.sql(expression, "sort"),
    -439                self.sql(expression, "cluster"),
    -440            ]
    +            
    310    class Generator(generator.Generator):
    +311        LIMIT_FETCH = "LIMIT"
    +312        TABLESAMPLE_WITH_METHOD = False
    +313        TABLESAMPLE_SIZE_IS_PERCENT = True
    +314        JOIN_HINTS = False
    +315        TABLE_HINTS = False
    +316        INDEX_ON = "ON TABLE"
    +317
    +318        TYPE_MAPPING = {
    +319            **generator.Generator.TYPE_MAPPING,
    +320            exp.DataType.Type.TEXT: "STRING",
    +321            exp.DataType.Type.DATETIME: "TIMESTAMP",
    +322            exp.DataType.Type.VARBINARY: "BINARY",
    +323            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
    +324            exp.DataType.Type.BIT: "BOOLEAN",
    +325        }
    +326
    +327        TRANSFORMS = {
    +328            **generator.Generator.TRANSFORMS,
    +329            exp.Group: transforms.preprocess([transforms.unalias_group]),
    +330            exp.Select: transforms.preprocess(
    +331                [
    +332                    transforms.eliminate_qualify,
    +333                    transforms.eliminate_distinct_on,
    +334                    transforms.unnest_to_explode,
    +335                ]
    +336            ),
    +337            exp.Property: _property_sql,
    +338            exp.ApproxDistinct: approx_count_distinct_sql,
    +339            exp.ArrayConcat: rename_func("CONCAT"),
    +340            exp.ArrayJoin: lambda self, e: self.func("CONCAT_WS", e.expression, e.this),
    +341            exp.ArraySize: rename_func("SIZE"),
    +342            exp.ArraySort: _array_sort_sql,
    +343            exp.With: no_recursive_cte_sql,
    +344            exp.DateAdd: _add_date_sql,
    +345            exp.DateDiff: _date_diff_sql,
    +346            exp.DateStrToDate: rename_func("TO_DATE"),
    +347            exp.DateSub: _add_date_sql,
    +348            exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.DATEINT_FORMAT}) AS INT)",
    +349            exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.DATEINT_FORMAT})",
    +350            exp.FileFormatProperty: lambda self, e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}",
    +351            exp.FromBase64: rename_func("UNBASE64"),
    +352            exp.If: if_sql,
    +353            exp.ILike: no_ilike_sql,
    +354            exp.JSONExtract: rename_func("GET_JSON_OBJECT"),
    +355            exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"),
    +356            exp.JSONFormat: _json_format_sql,
    +357            exp.Left: left_to_substring_sql,
    +358            exp.Map: var_map_sql,
    +359            exp.Max: max_or_greatest,
    +360            exp.Min: min_or_least,
    +361            exp.VarMap: var_map_sql,
    +362            exp.Create: create_with_partitions_sql,
    +363            exp.Quantile: rename_func("PERCENTILE"),
    +364            exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"),
    +365            exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"),
    +366            exp.RegexpSplit: rename_func("SPLIT"),
    +367            exp.Right: right_to_substring_sql,
    +368            exp.SafeDivide: no_safe_divide_sql,
    +369            exp.SchemaCommentProperty: lambda self, e: self.naked_property(e),
    +370            exp.SetAgg: rename_func("COLLECT_SET"),
    +371            exp.Split: lambda self, e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))",
    +372            exp.StrPosition: strposition_to_locate_sql,
    +373            exp.StrToDate: _str_to_date_sql,
    +374            exp.StrToTime: _str_to_time_sql,
    +375            exp.StrToUnix: _str_to_unix_sql,
    +376            exp.StructExtract: struct_extract_sql,
    +377            exp.TimeStrToDate: rename_func("TO_DATE"),
    +378            exp.TimeStrToTime: timestrtotime_sql,
    +379            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
    +380            exp.TimeToStr: _time_to_str,
    +381            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
    +382            exp.ToBase64: rename_func("BASE64"),
    +383            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)",
    +384            exp.TsOrDsAdd: lambda self, e: f"DATE_ADD({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    +385            exp.TsOrDsToDate: _to_date_sql,
    +386            exp.TryCast: no_trycast_sql,
    +387            exp.UnixToStr: lambda self, e: self.func(
    +388                "FROM_UNIXTIME", e.this, _time_format(self, e)
    +389            ),
    +390            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
    +391            exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"),
    +392            exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}",
    +393            exp.RowFormatSerdeProperty: lambda self, e: f"ROW FORMAT SERDE {self.sql(e, 'this')}",
    +394            exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"),
    +395            exp.NumberToStr: rename_func("FORMAT_NUMBER"),
    +396            exp.LastDateOfMonth: rename_func("LAST_DAY"),
    +397            exp.National: lambda self, e: self.national_sql(e, prefix=""),
    +398        }
    +399
    +400        PROPERTIES_LOCATION = {
    +401            **generator.Generator.PROPERTIES_LOCATION,
    +402            exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA,
    +403            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
    +404            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +405        }
    +406
    +407        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
    +408            return self.func(
    +409                "COLLECT_LIST",
    +410                expression.this.this if isinstance(expression.this, exp.Order) else expression.this,
    +411            )
    +412
    +413        def with_properties(self, properties: exp.Properties) -> str:
    +414            return self.properties(properties, prefix=self.seg("TBLPROPERTIES"))
    +415
    +416        def datatype_sql(self, expression: exp.DataType) -> str:
    +417            if (
    +418                expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR)
    +419                and not expression.expressions
    +420            ):
    +421                expression = exp.DataType.build("text")
    +422            elif expression.this in exp.DataType.TEMPORAL_TYPES:
    +423                expression = exp.DataType.build(expression.this)
    +424
    +425            return super().datatype_sql(expression)
    +426
    +427        def after_having_modifiers(self, expression: exp.Expression) -> t.List[str]:
    +428            return super().after_having_modifiers(expression) + [
    +429                self.sql(expression, "distribute"),
    +430                self.sql(expression, "sort"),
    +431                self.sql(expression, "cluster"),
    +432            ]
     
    -

    Generator interprets the given syntax tree and produces a SQL string as an output.

    +

    Generator converts a given syntax tree to the corresponding SQL string.

    Arguments:
      -
    • time_mapping (dict): the dictionary of custom time mappings in which the key -represents a python time format and the output the target time format
    • -
    • time_trie (trie): a trie of the time_mapping keys
    • -
    • pretty (bool): if set to True the returned string will be formatted. Default: False.
    • -
    • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
    • -
    • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
    • -
    • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
    • -
    • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
    • -
    • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
    • -
    • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
    • -
    • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
    • -
    • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
    • -
    • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
    • -
    • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
    • -
    • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
    • -
    • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
    • -
    • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
    • -
    • normalize (bool): if set to True all identifiers will lower cased
    • -
    • string_escape (str): specifies a string escape character. Default: '.
    • -
    • identifier_escape (str): specifies an identifier escape character. Default: ".
    • -
    • pad (int): determines padding in a formatted string. Default: 2.
    • -
    • indent (int): determines the size of indentation in a formatted string. Default: 4.
    • -
    • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
    • -
    • normalize_functions (str): normalize function names, "upper", "lower", or None -Default: "upper"
    • -
    • alias_post_tablesample (bool): if the table alias comes after tablesample -Default: False
    • -
    • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit -Default: False
    • -
    • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters -unsupported expressions. Default ErrorLevel.WARN.
    • -
    • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    • -
    • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +
    • pretty: Whether or not to format the produced SQL string. +Default: False.
    • +
    • identify: Determines when an identifier should be quoted. Possible values are: +False (default): Never quote, except in cases where it's mandatory by the dialect. +True or 'always': Always quote. +'safe': Only quote identifiers that are case insensitive.
    • +
    • normalize: Whether or not to normalize identifiers to lowercase. +Default: False.
    • +
    • pad: Determines the pad size in a formatted string. +Default: 2.
    • +
    • indent: Determines the indentation size in a formatted string. +Default: 2.
    • +
    • normalize_functions: Whether or not to normalize all function names. Possible values are: +"upper" or True (default): Convert names to uppercase. +"lower": Convert names to lowercase. +False: Disables function name normalization.
    • +
    • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. +Default ErrorLevel.WARN.
    • +
    • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
    • -
    • leading_comma (bool): if the the comma is leading or trailing in select statements +
    • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. +This is only relevant when generating in pretty mode. Default: False
    • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true @@ -1263,11 +1217,11 @@ Default: True
    -
    412        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
    -413            return self.func(
    -414                "COLLECT_LIST",
    -415                expression.this.this if isinstance(expression.this, exp.Order) else expression.this,
    -416            )
    +            
    407        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
    +408            return self.func(
    +409                "COLLECT_LIST",
    +410                expression.this.this if isinstance(expression.this, exp.Order) else expression.this,
    +411            )
     
    @@ -1285,11 +1239,8 @@ Default: True
    -
    418        def with_properties(self, properties: exp.Properties) -> str:
    -419            return self.properties(
    -420                properties,
    -421                prefix=self.seg("TBLPROPERTIES"),
    -422            )
    +            
    413        def with_properties(self, properties: exp.Properties) -> str:
    +414            return self.properties(properties, prefix=self.seg("TBLPROPERTIES"))
     
    @@ -1307,16 +1258,16 @@ Default: True
    -
    424        def datatype_sql(self, expression: exp.DataType) -> str:
    -425            if (
    -426                expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR)
    -427                and not expression.expressions
    -428            ):
    -429                expression = exp.DataType.build("text")
    -430            elif expression.this in exp.DataType.TEMPORAL_TYPES:
    -431                expression = exp.DataType.build(expression.this)
    -432
    -433            return super().datatype_sql(expression)
    +            
    416        def datatype_sql(self, expression: exp.DataType) -> str:
    +417            if (
    +418                expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR)
    +419                and not expression.expressions
    +420            ):
    +421                expression = exp.DataType.build("text")
    +422            elif expression.this in exp.DataType.TEMPORAL_TYPES:
    +423                expression = exp.DataType.build(expression.this)
    +424
    +425            return super().datatype_sql(expression)
     
    @@ -1334,12 +1285,12 @@ Default: True
    -
    435        def after_having_modifiers(self, expression: exp.Expression) -> t.List[str]:
    -436            return super().after_having_modifiers(expression) + [
    -437                self.sql(expression, "distribute"),
    -438                self.sql(expression, "sort"),
    -439                self.sql(expression, "cluster"),
    -440            ]
    +            
    427        def after_having_modifiers(self, expression: exp.Expression) -> t.List[str]:
    +428            return super().after_having_modifiers(expression) + [
    +429                self.sql(expression, "distribute"),
    +430                self.sql(expression, "sort"),
    +431                self.sql(expression, "cluster"),
    +432            ]
     
    @@ -1375,6 +1326,7 @@ Default: True
    notnullcolumnconstraint_sql
    primarykeycolumnconstraint_sql
    uniquecolumnconstraint_sql
    +
    createable_sql
    create_sql
    clone_sql
    describe_sql
    @@ -1455,9 +1407,11 @@ Default: True
    ordered_sql
    matchrecognize_sql
    query_modifiers
    +
    offset_limit_modifiers
    after_limit_modifiers
    select_sql
    schema_sql
    +
    schema_columns_sql
    star_sql
    parameter_sql
    sessionparameter_sql
    @@ -1482,7 +1436,7 @@ Default: True
    nextvaluefor_sql
    extract_sql
    trim_sql
    -
    concat_sql
    +
    safeconcat_sql
    check_sql
    foreignkey_sql
    primarykey_sql
    @@ -1533,6 +1487,7 @@ Default: True
    respectnulls_sql
    intdiv_sql
    dpipe_sql
    +
    safedpipe_sql
    div_sql
    overlaps_sql
    distance_sql
    @@ -1581,6 +1536,7 @@ Default: True
    dictproperty_sql
    dictrange_sql
    dictsubproperty_sql
    +
    oncluster_sql
    diff --git a/docs/sqlglot/dialects/mysql.html b/docs/sqlglot/dialects/mysql.html index 42f39e6..489aa39 100644 --- a/docs/sqlglot/dialects/mysql.html +++ b/docs/sqlglot/dialects/mysql.html @@ -176,10 +176,10 @@ 94 95 96class MySQL(Dialect): - 97 time_format = "'%Y-%m-%d %T'" + 97 TIME_FORMAT = "'%Y-%m-%d %T'" 98 99 # https://prestodb.io/docs/current/functions/datetime.html#mysql-date-functions -100 time_mapping = { +100 TIME_MAPPING = { 101 "%M": "%B", 102 "%c": "%-m", 103 "%e": "%-d", @@ -210,361 +210,365 @@ 128 "MEDIUMBLOB": TokenType.MEDIUMBLOB, 129 "MEDIUMTEXT": TokenType.MEDIUMTEXT, 130 "SEPARATOR": TokenType.SEPARATOR, -131 "START": TokenType.BEGIN, -132 "_ARMSCII8": TokenType.INTRODUCER, -133 "_ASCII": TokenType.INTRODUCER, -134 "_BIG5": TokenType.INTRODUCER, -135 "_BINARY": TokenType.INTRODUCER, -136 "_CP1250": TokenType.INTRODUCER, -137 "_CP1251": TokenType.INTRODUCER, -138 "_CP1256": TokenType.INTRODUCER, -139 "_CP1257": TokenType.INTRODUCER, -140 "_CP850": TokenType.INTRODUCER, -141 "_CP852": TokenType.INTRODUCER, -142 "_CP866": TokenType.INTRODUCER, -143 "_CP932": TokenType.INTRODUCER, -144 "_DEC8": TokenType.INTRODUCER, -145 "_EUCJPMS": TokenType.INTRODUCER, -146 "_EUCKR": TokenType.INTRODUCER, -147 "_GB18030": TokenType.INTRODUCER, -148 "_GB2312": TokenType.INTRODUCER, -149 "_GBK": TokenType.INTRODUCER, -150 "_GEOSTD8": TokenType.INTRODUCER, -151 "_GREEK": TokenType.INTRODUCER, -152 "_HEBREW": TokenType.INTRODUCER, -153 "_HP8": TokenType.INTRODUCER, -154 "_KEYBCS2": TokenType.INTRODUCER, -155 "_KOI8R": TokenType.INTRODUCER, -156 "_KOI8U": TokenType.INTRODUCER, -157 "_LATIN1": TokenType.INTRODUCER, -158 "_LATIN2": TokenType.INTRODUCER, -159 "_LATIN5": TokenType.INTRODUCER, -160 "_LATIN7": TokenType.INTRODUCER, -161 "_MACCE": TokenType.INTRODUCER, -162 "_MACROMAN": TokenType.INTRODUCER, -163 "_SJIS": TokenType.INTRODUCER, -164 "_SWE7": TokenType.INTRODUCER, -165 "_TIS620": TokenType.INTRODUCER, -166 "_UCS2": TokenType.INTRODUCER, -167 "_UJIS": TokenType.INTRODUCER, -168 # https://dev.mysql.com/doc/refman/8.0/en/string-literals.html -169 "_UTF8": TokenType.INTRODUCER, -170 "_UTF16": TokenType.INTRODUCER, -171 "_UTF16LE": TokenType.INTRODUCER, -172 "_UTF32": TokenType.INTRODUCER, -173 "_UTF8MB3": TokenType.INTRODUCER, -174 "_UTF8MB4": TokenType.INTRODUCER, -175 "@@": TokenType.SESSION_PARAMETER, -176 } -177 -178 COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW} -179 -180 class Parser(parser.Parser): -181 FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS, TokenType.SCHEMA, TokenType.DATABASE} -182 -183 FUNCTIONS = { -184 **parser.Parser.FUNCTIONS, -185 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), -186 "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "mysql"), -187 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), -188 "INSTR": lambda args: exp.StrPosition(substr=seq_get(args, 1), this=seq_get(args, 0)), -189 "LOCATE": locate_to_strposition, -190 "STR_TO_DATE": _str_to_date, -191 } -192 -193 FUNCTION_PARSERS = { -194 **parser.Parser.FUNCTION_PARSERS, -195 "GROUP_CONCAT": lambda self: self.expression( -196 exp.GroupConcat, -197 this=self._parse_lambda(), -198 separator=self._match(TokenType.SEPARATOR) and self._parse_field(), -199 ), -200 } -201 -202 STATEMENT_PARSERS = { -203 **parser.Parser.STATEMENT_PARSERS, -204 TokenType.SHOW: lambda self: self._parse_show(), -205 } -206 -207 SHOW_PARSERS = { -208 "BINARY LOGS": _show_parser("BINARY LOGS"), -209 "MASTER LOGS": _show_parser("BINARY LOGS"), -210 "BINLOG EVENTS": _show_parser("BINLOG EVENTS"), -211 "CHARACTER SET": _show_parser("CHARACTER SET"), -212 "CHARSET": _show_parser("CHARACTER SET"), -213 "COLLATION": _show_parser("COLLATION"), -214 "FULL COLUMNS": _show_parser("COLUMNS", target="FROM", full=True), -215 "COLUMNS": _show_parser("COLUMNS", target="FROM"), -216 "CREATE DATABASE": _show_parser("CREATE DATABASE", target=True), -217 "CREATE EVENT": _show_parser("CREATE EVENT", target=True), -218 "CREATE FUNCTION": _show_parser("CREATE FUNCTION", target=True), -219 "CREATE PROCEDURE": _show_parser("CREATE PROCEDURE", target=True), -220 "CREATE TABLE": _show_parser("CREATE TABLE", target=True), -221 "CREATE TRIGGER": _show_parser("CREATE TRIGGER", target=True), -222 "CREATE VIEW": _show_parser("CREATE VIEW", target=True), -223 "DATABASES": _show_parser("DATABASES"), -224 "ENGINE": _show_parser("ENGINE", target=True), -225 "STORAGE ENGINES": _show_parser("ENGINES"), -226 "ENGINES": _show_parser("ENGINES"), -227 "ERRORS": _show_parser("ERRORS"), -228 "EVENTS": _show_parser("EVENTS"), -229 "FUNCTION CODE": _show_parser("FUNCTION CODE", target=True), -230 "FUNCTION STATUS": _show_parser("FUNCTION STATUS"), -231 "GRANTS": _show_parser("GRANTS", target="FOR"), -232 "INDEX": _show_parser("INDEX", target="FROM"), -233 "MASTER STATUS": _show_parser("MASTER STATUS"), -234 "OPEN TABLES": _show_parser("OPEN TABLES"), -235 "PLUGINS": _show_parser("PLUGINS"), -236 "PROCEDURE CODE": _show_parser("PROCEDURE CODE", target=True), -237 "PROCEDURE STATUS": _show_parser("PROCEDURE STATUS"), -238 "PRIVILEGES": _show_parser("PRIVILEGES"), -239 "FULL PROCESSLIST": _show_parser("PROCESSLIST", full=True), -240 "PROCESSLIST": _show_parser("PROCESSLIST"), -241 "PROFILE": _show_parser("PROFILE"), -242 "PROFILES": _show_parser("PROFILES"), -243 "RELAYLOG EVENTS": _show_parser("RELAYLOG EVENTS"), -244 "REPLICAS": _show_parser("REPLICAS"), -245 "SLAVE HOSTS": _show_parser("REPLICAS"), -246 "REPLICA STATUS": _show_parser("REPLICA STATUS"), -247 "SLAVE STATUS": _show_parser("REPLICA STATUS"), -248 "GLOBAL STATUS": _show_parser("STATUS", global_=True), -249 "SESSION STATUS": _show_parser("STATUS"), -250 "STATUS": _show_parser("STATUS"), -251 "TABLE STATUS": _show_parser("TABLE STATUS"), -252 "FULL TABLES": _show_parser("TABLES", full=True), -253 "TABLES": _show_parser("TABLES"), -254 "TRIGGERS": _show_parser("TRIGGERS"), -255 "GLOBAL VARIABLES": _show_parser("VARIABLES", global_=True), -256 "SESSION VARIABLES": _show_parser("VARIABLES"), -257 "VARIABLES": _show_parser("VARIABLES"), -258 "WARNINGS": _show_parser("WARNINGS"), -259 } -260 -261 SET_PARSERS = { -262 **parser.Parser.SET_PARSERS, -263 "PERSIST": lambda self: self._parse_set_item_assignment("PERSIST"), -264 "PERSIST_ONLY": lambda self: self._parse_set_item_assignment("PERSIST_ONLY"), -265 "CHARACTER SET": lambda self: self._parse_set_item_charset("CHARACTER SET"), -266 "CHARSET": lambda self: self._parse_set_item_charset("CHARACTER SET"), -267 "NAMES": lambda self: self._parse_set_item_names(), -268 } -269 -270 PROFILE_TYPES = { -271 "ALL", -272 "BLOCK IO", -273 "CONTEXT SWITCHES", -274 "CPU", -275 "IPC", -276 "MEMORY", -277 "PAGE FAULTS", -278 "SOURCE", -279 "SWAPS", -280 } -281 -282 LOG_DEFAULTS_TO_LN = True -283 -284 def _parse_show_mysql( -285 self, -286 this: str, -287 target: bool | str = False, -288 full: t.Optional[bool] = None, -289 global_: t.Optional[bool] = None, -290 ) -> exp.Show: -291 if target: -292 if isinstance(target, str): -293 self._match_text_seq(target) -294 target_id = self._parse_id_var() -295 else: -296 target_id = None -297 -298 log = self._parse_string() if self._match_text_seq("IN") else None -299 -300 if this in {"BINLOG EVENTS", "RELAYLOG EVENTS"}: -301 position = self._parse_number() if self._match_text_seq("FROM") else None -302 db = None -303 else: -304 position = None -305 db = None -306 -307 if self._match(TokenType.FROM): -308 db = self._parse_id_var() -309 elif self._match(TokenType.DOT): -310 db = target_id -311 target_id = self._parse_id_var() -312 -313 channel = self._parse_id_var() if self._match_text_seq("FOR", "CHANNEL") else None -314 -315 like = self._parse_string() if self._match_text_seq("LIKE") else None -316 where = self._parse_where() +131 "ENUM": TokenType.ENUM, +132 "START": TokenType.BEGIN, +133 "_ARMSCII8": TokenType.INTRODUCER, +134 "_ASCII": TokenType.INTRODUCER, +135 "_BIG5": TokenType.INTRODUCER, +136 "_BINARY": TokenType.INTRODUCER, +137 "_CP1250": TokenType.INTRODUCER, +138 "_CP1251": TokenType.INTRODUCER, +139 "_CP1256": TokenType.INTRODUCER, +140 "_CP1257": TokenType.INTRODUCER, +141 "_CP850": TokenType.INTRODUCER, +142 "_CP852": TokenType.INTRODUCER, +143 "_CP866": TokenType.INTRODUCER, +144 "_CP932": TokenType.INTRODUCER, +145 "_DEC8": TokenType.INTRODUCER, +146 "_EUCJPMS": TokenType.INTRODUCER, +147 "_EUCKR": TokenType.INTRODUCER, +148 "_GB18030": TokenType.INTRODUCER, +149 "_GB2312": TokenType.INTRODUCER, +150 "_GBK": TokenType.INTRODUCER, +151 "_GEOSTD8": TokenType.INTRODUCER, +152 "_GREEK": TokenType.INTRODUCER, +153 "_HEBREW": TokenType.INTRODUCER, +154 "_HP8": TokenType.INTRODUCER, +155 "_KEYBCS2": TokenType.INTRODUCER, +156 "_KOI8R": TokenType.INTRODUCER, +157 "_KOI8U": TokenType.INTRODUCER, +158 "_LATIN1": TokenType.INTRODUCER, +159 "_LATIN2": TokenType.INTRODUCER, +160 "_LATIN5": TokenType.INTRODUCER, +161 "_LATIN7": TokenType.INTRODUCER, +162 "_MACCE": TokenType.INTRODUCER, +163 "_MACROMAN": TokenType.INTRODUCER, +164 "_SJIS": TokenType.INTRODUCER, +165 "_SWE7": TokenType.INTRODUCER, +166 "_TIS620": TokenType.INTRODUCER, +167 "_UCS2": TokenType.INTRODUCER, +168 "_UJIS": TokenType.INTRODUCER, +169 # https://dev.mysql.com/doc/refman/8.0/en/string-literals.html +170 "_UTF8": TokenType.INTRODUCER, +171 "_UTF16": TokenType.INTRODUCER, +172 "_UTF16LE": TokenType.INTRODUCER, +173 "_UTF32": TokenType.INTRODUCER, +174 "_UTF8MB3": TokenType.INTRODUCER, +175 "_UTF8MB4": TokenType.INTRODUCER, +176 "@@": TokenType.SESSION_PARAMETER, +177 } +178 +179 COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW} +180 +181 class Parser(parser.Parser): +182 FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS, TokenType.SCHEMA, TokenType.DATABASE} +183 +184 FUNCTIONS = { +185 **parser.Parser.FUNCTIONS, +186 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), +187 "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "mysql"), +188 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), +189 "INSTR": lambda args: exp.StrPosition(substr=seq_get(args, 1), this=seq_get(args, 0)), +190 "LOCATE": locate_to_strposition, +191 "STR_TO_DATE": _str_to_date, +192 } +193 +194 FUNCTION_PARSERS = { +195 **parser.Parser.FUNCTION_PARSERS, +196 "GROUP_CONCAT": lambda self: self.expression( +197 exp.GroupConcat, +198 this=self._parse_lambda(), +199 separator=self._match(TokenType.SEPARATOR) and self._parse_field(), +200 ), +201 } +202 +203 STATEMENT_PARSERS = { +204 **parser.Parser.STATEMENT_PARSERS, +205 TokenType.SHOW: lambda self: self._parse_show(), +206 } +207 +208 SHOW_PARSERS = { +209 "BINARY LOGS": _show_parser("BINARY LOGS"), +210 "MASTER LOGS": _show_parser("BINARY LOGS"), +211 "BINLOG EVENTS": _show_parser("BINLOG EVENTS"), +212 "CHARACTER SET": _show_parser("CHARACTER SET"), +213 "CHARSET": _show_parser("CHARACTER SET"), +214 "COLLATION": _show_parser("COLLATION"), +215 "FULL COLUMNS": _show_parser("COLUMNS", target="FROM", full=True), +216 "COLUMNS": _show_parser("COLUMNS", target="FROM"), +217 "CREATE DATABASE": _show_parser("CREATE DATABASE", target=True), +218 "CREATE EVENT": _show_parser("CREATE EVENT", target=True), +219 "CREATE FUNCTION": _show_parser("CREATE FUNCTION", target=True), +220 "CREATE PROCEDURE": _show_parser("CREATE PROCEDURE", target=True), +221 "CREATE TABLE": _show_parser("CREATE TABLE", target=True), +222 "CREATE TRIGGER": _show_parser("CREATE TRIGGER", target=True), +223 "CREATE VIEW": _show_parser("CREATE VIEW", target=True), +224 "DATABASES": _show_parser("DATABASES"), +225 "ENGINE": _show_parser("ENGINE", target=True), +226 "STORAGE ENGINES": _show_parser("ENGINES"), +227 "ENGINES": _show_parser("ENGINES"), +228 "ERRORS": _show_parser("ERRORS"), +229 "EVENTS": _show_parser("EVENTS"), +230 "FUNCTION CODE": _show_parser("FUNCTION CODE", target=True), +231 "FUNCTION STATUS": _show_parser("FUNCTION STATUS"), +232 "GRANTS": _show_parser("GRANTS", target="FOR"), +233 "INDEX": _show_parser("INDEX", target="FROM"), +234 "MASTER STATUS": _show_parser("MASTER STATUS"), +235 "OPEN TABLES": _show_parser("OPEN TABLES"), +236 "PLUGINS": _show_parser("PLUGINS"), +237 "PROCEDURE CODE": _show_parser("PROCEDURE CODE", target=True), +238 "PROCEDURE STATUS": _show_parser("PROCEDURE STATUS"), +239 "PRIVILEGES": _show_parser("PRIVILEGES"), +240 "FULL PROCESSLIST": _show_parser("PROCESSLIST", full=True), +241 "PROCESSLIST": _show_parser("PROCESSLIST"), +242 "PROFILE": _show_parser("PROFILE"), +243 "PROFILES": _show_parser("PROFILES"), +244 "RELAYLOG EVENTS": _show_parser("RELAYLOG EVENTS"), +245 "REPLICAS": _show_parser("REPLICAS"), +246 "SLAVE HOSTS": _show_parser("REPLICAS"), +247 "REPLICA STATUS": _show_parser("REPLICA STATUS"), +248 "SLAVE STATUS": _show_parser("REPLICA STATUS"), +249 "GLOBAL STATUS": _show_parser("STATUS", global_=True), +250 "SESSION STATUS": _show_parser("STATUS"), +251 "STATUS": _show_parser("STATUS"), +252 "TABLE STATUS": _show_parser("TABLE STATUS"), +253 "FULL TABLES": _show_parser("TABLES", full=True), +254 "TABLES": _show_parser("TABLES"), +255 "TRIGGERS": _show_parser("TRIGGERS"), +256 "GLOBAL VARIABLES": _show_parser("VARIABLES", global_=True), +257 "SESSION VARIABLES": _show_parser("VARIABLES"), +258 "VARIABLES": _show_parser("VARIABLES"), +259 "WARNINGS": _show_parser("WARNINGS"), +260 } +261 +262 SET_PARSERS = { +263 **parser.Parser.SET_PARSERS, +264 "PERSIST": lambda self: self._parse_set_item_assignment("PERSIST"), +265 "PERSIST_ONLY": lambda self: self._parse_set_item_assignment("PERSIST_ONLY"), +266 "CHARACTER SET": lambda self: self._parse_set_item_charset("CHARACTER SET"), +267 "CHARSET": lambda self: self._parse_set_item_charset("CHARACTER SET"), +268 "NAMES": lambda self: self._parse_set_item_names(), +269 } +270 +271 PROFILE_TYPES = { +272 "ALL", +273 "BLOCK IO", +274 "CONTEXT SWITCHES", +275 "CPU", +276 "IPC", +277 "MEMORY", +278 "PAGE FAULTS", +279 "SOURCE", +280 "SWAPS", +281 } +282 +283 TYPE_TOKENS = { +284 *parser.Parser.TYPE_TOKENS, +285 TokenType.SET, +286 } +287 +288 ENUM_TYPE_TOKENS = { +289 *parser.Parser.ENUM_TYPE_TOKENS, +290 TokenType.SET, +291 } +292 +293 LOG_DEFAULTS_TO_LN = True +294 +295 def _parse_show_mysql( +296 self, +297 this: str, +298 target: bool | str = False, +299 full: t.Optional[bool] = None, +300 global_: t.Optional[bool] = None, +301 ) -> exp.Show: +302 if target: +303 if isinstance(target, str): +304 self._match_text_seq(target) +305 target_id = self._parse_id_var() +306 else: +307 target_id = None +308 +309 log = self._parse_string() if self._match_text_seq("IN") else None +310 +311 if this in {"BINLOG EVENTS", "RELAYLOG EVENTS"}: +312 position = self._parse_number() if self._match_text_seq("FROM") else None +313 db = None +314 else: +315 position = None +316 db = None 317 -318 if this == "PROFILE": -319 types = self._parse_csv(lambda: self._parse_var_from_options(self.PROFILE_TYPES)) -320 query = self._parse_number() if self._match_text_seq("FOR", "QUERY") else None -321 offset = self._parse_number() if self._match_text_seq("OFFSET") else None -322 limit = self._parse_number() if self._match_text_seq("LIMIT") else None -323 else: -324 types, query = None, None -325 offset, limit = self._parse_oldstyle_limit() -326 -327 mutex = True if self._match_text_seq("MUTEX") else None -328 mutex = False if self._match_text_seq("STATUS") else mutex -329 -330 return self.expression( -331 exp.Show, -332 this=this, -333 target=target_id, -334 full=full, -335 log=log, -336 position=position, -337 db=db, -338 channel=channel, -339 like=like, -340 where=where, -341 types=types, -342 query=query, -343 offset=offset, -344 limit=limit, -345 mutex=mutex, -346 **{"global": global_}, # type: ignore -347 ) -348 -349 def _parse_oldstyle_limit( -350 self, -351 ) -> t.Tuple[t.Optional[exp.Expression], t.Optional[exp.Expression]]: -352 limit = None -353 offset = None -354 if self._match_text_seq("LIMIT"): -355 parts = self._parse_csv(self._parse_number) -356 if len(parts) == 1: -357 limit = parts[0] -358 elif len(parts) == 2: -359 limit = parts[1] -360 offset = parts[0] -361 -362 return offset, limit -363 -364 def _parse_set_item_charset(self, kind: str) -> exp.Expression: -365 this = self._parse_string() or self._parse_id_var() -366 return self.expression(exp.SetItem, this=this, kind=kind) -367 -368 def _parse_set_item_names(self) -> exp.Expression: -369 charset = self._parse_string() or self._parse_id_var() -370 if self._match_text_seq("COLLATE"): -371 collate = self._parse_string() or self._parse_id_var() -372 else: -373 collate = None +318 if self._match(TokenType.FROM): +319 db = self._parse_id_var() +320 elif self._match(TokenType.DOT): +321 db = target_id +322 target_id = self._parse_id_var() +323 +324 channel = self._parse_id_var() if self._match_text_seq("FOR", "CHANNEL") else None +325 +326 like = self._parse_string() if self._match_text_seq("LIKE") else None +327 where = self._parse_where() +328 +329 if this == "PROFILE": +330 types = self._parse_csv(lambda: self._parse_var_from_options(self.PROFILE_TYPES)) +331 query = self._parse_number() if self._match_text_seq("FOR", "QUERY") else None +332 offset = self._parse_number() if self._match_text_seq("OFFSET") else None +333 limit = self._parse_number() if self._match_text_seq("LIMIT") else None +334 else: +335 types, query = None, None +336 offset, limit = self._parse_oldstyle_limit() +337 +338 mutex = True if self._match_text_seq("MUTEX") else None +339 mutex = False if self._match_text_seq("STATUS") else mutex +340 +341 return self.expression( +342 exp.Show, +343 this=this, +344 target=target_id, +345 full=full, +346 log=log, +347 position=position, +348 db=db, +349 channel=channel, +350 like=like, +351 where=where, +352 types=types, +353 query=query, +354 offset=offset, +355 limit=limit, +356 mutex=mutex, +357 **{"global": global_}, # type: ignore +358 ) +359 +360 def _parse_oldstyle_limit( +361 self, +362 ) -> t.Tuple[t.Optional[exp.Expression], t.Optional[exp.Expression]]: +363 limit = None +364 offset = None +365 if self._match_text_seq("LIMIT"): +366 parts = self._parse_csv(self._parse_number) +367 if len(parts) == 1: +368 limit = parts[0] +369 elif len(parts) == 2: +370 limit = parts[1] +371 offset = parts[0] +372 +373 return offset, limit 374 -375 return self.expression( -376 exp.SetItem, -377 this=charset, -378 collate=collate, -379 kind="NAMES", -380 ) -381 -382 class Generator(generator.Generator): -383 LOCKING_READS_SUPPORTED = True -384 NULL_ORDERING_SUPPORTED = False -385 JOIN_HINTS = False -386 TABLE_HINTS = False +375 def _parse_set_item_charset(self, kind: str) -> exp.Expression: +376 this = self._parse_string() or self._parse_id_var() +377 return self.expression(exp.SetItem, this=this, kind=kind) +378 +379 def _parse_set_item_names(self) -> exp.Expression: +380 charset = self._parse_string() or self._parse_id_var() +381 if self._match_text_seq("COLLATE"): +382 collate = self._parse_string() or self._parse_id_var() +383 else: +384 collate = None +385 +386 return self.expression(exp.SetItem, this=charset, collate=collate, kind="NAMES") 387 -388 TRANSFORMS = { -389 **generator.Generator.TRANSFORMS, -390 exp.CurrentDate: no_paren_current_date_sql, -391 exp.DateDiff: lambda self, e: self.func("DATEDIFF", e.this, e.expression), -392 exp.DateAdd: _date_add_sql("ADD"), -393 exp.DateStrToDate: datestrtodate_sql, -394 exp.DateSub: _date_add_sql("SUB"), -395 exp.DateTrunc: _date_trunc_sql, -396 exp.DayOfMonth: rename_func("DAYOFMONTH"), -397 exp.DayOfWeek: rename_func("DAYOFWEEK"), -398 exp.DayOfYear: rename_func("DAYOFYEAR"), -399 exp.GroupConcat: lambda self, e: f"""GROUP_CONCAT({self.sql(e, "this")} SEPARATOR {self.sql(e, "separator") or "','"})""", -400 exp.ILike: no_ilike_sql, -401 exp.JSONExtractScalar: arrow_json_extract_scalar_sql, -402 exp.Max: max_or_greatest, -403 exp.Min: min_or_least, -404 exp.NullSafeEQ: lambda self, e: self.binary(e, "<=>"), -405 exp.NullSafeNEQ: lambda self, e: self.not_sql(self.binary(e, "<=>")), -406 exp.Pivot: no_pivot_sql, -407 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), -408 exp.StrPosition: strposition_to_locate_sql, -409 exp.StrToDate: _str_to_date_sql, -410 exp.StrToTime: _str_to_date_sql, -411 exp.TableSample: no_tablesample_sql, -412 exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"), -413 exp.TimeToStr: lambda self, e: self.func("DATE_FORMAT", e.this, self.format_time(e)), -414 exp.Trim: _trim_sql, -415 exp.TryCast: no_trycast_sql, -416 exp.WeekOfYear: rename_func("WEEKOFYEAR"), -417 } -418 -419 TYPE_MAPPING = generator.Generator.TYPE_MAPPING.copy() -420 TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMTEXT) -421 TYPE_MAPPING.pop(exp.DataType.Type.LONGTEXT) -422 TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMBLOB) -423 TYPE_MAPPING.pop(exp.DataType.Type.LONGBLOB) +388 class Generator(generator.Generator): +389 LOCKING_READS_SUPPORTED = True +390 NULL_ORDERING_SUPPORTED = False +391 JOIN_HINTS = False +392 TABLE_HINTS = False +393 +394 TRANSFORMS = { +395 **generator.Generator.TRANSFORMS, +396 exp.CurrentDate: no_paren_current_date_sql, +397 exp.DateDiff: lambda self, e: self.func("DATEDIFF", e.this, e.expression), +398 exp.DateAdd: _date_add_sql("ADD"), +399 exp.DateStrToDate: datestrtodate_sql, +400 exp.DateSub: _date_add_sql("SUB"), +401 exp.DateTrunc: _date_trunc_sql, +402 exp.DayOfMonth: rename_func("DAYOFMONTH"), +403 exp.DayOfWeek: rename_func("DAYOFWEEK"), +404 exp.DayOfYear: rename_func("DAYOFYEAR"), +405 exp.GroupConcat: lambda self, e: f"""GROUP_CONCAT({self.sql(e, "this")} SEPARATOR {self.sql(e, "separator") or "','"})""", +406 exp.ILike: no_ilike_sql, +407 exp.JSONExtractScalar: arrow_json_extract_scalar_sql, +408 exp.Max: max_or_greatest, +409 exp.Min: min_or_least, +410 exp.NullSafeEQ: lambda self, e: self.binary(e, "<=>"), +411 exp.NullSafeNEQ: lambda self, e: self.not_sql(self.binary(e, "<=>")), +412 exp.Pivot: no_pivot_sql, +413 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), +414 exp.StrPosition: strposition_to_locate_sql, +415 exp.StrToDate: _str_to_date_sql, +416 exp.StrToTime: _str_to_date_sql, +417 exp.TableSample: no_tablesample_sql, +418 exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"), +419 exp.TimeToStr: lambda self, e: self.func("DATE_FORMAT", e.this, self.format_time(e)), +420 exp.Trim: _trim_sql, +421 exp.TryCast: no_trycast_sql, +422 exp.WeekOfYear: rename_func("WEEKOFYEAR"), +423 } 424 -425 PROPERTIES_LOCATION = { -426 **generator.Generator.PROPERTIES_LOCATION, -427 exp.TransientProperty: exp.Properties.Location.UNSUPPORTED, -428 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, -429 } +425 TYPE_MAPPING = generator.Generator.TYPE_MAPPING.copy() +426 TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMTEXT) +427 TYPE_MAPPING.pop(exp.DataType.Type.LONGTEXT) +428 TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMBLOB) +429 TYPE_MAPPING.pop(exp.DataType.Type.LONGBLOB) 430 -431 LIMIT_FETCH = "LIMIT" -432 -433 def show_sql(self, expression: exp.Show) -> str: -434 this = f" {expression.name}" -435 full = " FULL" if expression.args.get("full") else "" -436 global_ = " GLOBAL" if expression.args.get("global") else "" -437 -438 target = self.sql(expression, "target") -439 target = f" {target}" if target else "" -440 if expression.name in {"COLUMNS", "INDEX"}: -441 target = f" FROM{target}" -442 elif expression.name == "GRANTS": -443 target = f" FOR{target}" -444 -445 db = self._prefixed_sql("FROM", expression, "db") -446 -447 like = self._prefixed_sql("LIKE", expression, "like") -448 where = self.sql(expression, "where") -449 -450 types = self.expressions(expression, key="types") -451 types = f" {types}" if types else types -452 query = self._prefixed_sql("FOR QUERY", expression, "query") -453 -454 if expression.name == "PROFILE": -455 offset = self._prefixed_sql("OFFSET", expression, "offset") -456 limit = self._prefixed_sql("LIMIT", expression, "limit") -457 else: -458 offset = "" -459 limit = self._oldstyle_limit_sql(expression) -460 -461 log = self._prefixed_sql("IN", expression, "log") -462 position = self._prefixed_sql("FROM", expression, "position") -463 -464 channel = self._prefixed_sql("FOR CHANNEL", expression, "channel") -465 -466 if expression.name == "ENGINE": -467 mutex_or_status = " MUTEX" if expression.args.get("mutex") else " STATUS" -468 else: -469 mutex_or_status = "" -470 -471 return f"SHOW{full}{global_}{this}{target}{types}{db}{query}{log}{position}{channel}{mutex_or_status}{like}{where}{offset}{limit}" -472 -473 def _prefixed_sql(self, prefix: str, expression: exp.Expression, arg: str) -> str: -474 sql = self.sql(expression, arg) -475 if not sql: -476 return "" -477 return f" {prefix} {sql}" +431 PROPERTIES_LOCATION = { +432 **generator.Generator.PROPERTIES_LOCATION, +433 exp.TransientProperty: exp.Properties.Location.UNSUPPORTED, +434 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, +435 } +436 +437 LIMIT_FETCH = "LIMIT" +438 +439 def show_sql(self, expression: exp.Show) -> str: +440 this = f" {expression.name}" +441 full = " FULL" if expression.args.get("full") else "" +442 global_ = " GLOBAL" if expression.args.get("global") else "" +443 +444 target = self.sql(expression, "target") +445 target = f" {target}" if target else "" +446 if expression.name in {"COLUMNS", "INDEX"}: +447 target = f" FROM{target}" +448 elif expression.name == "GRANTS": +449 target = f" FOR{target}" +450 +451 db = self._prefixed_sql("FROM", expression, "db") +452 +453 like = self._prefixed_sql("LIKE", expression, "like") +454 where = self.sql(expression, "where") +455 +456 types = self.expressions(expression, key="types") +457 types = f" {types}" if types else types +458 query = self._prefixed_sql("FOR QUERY", expression, "query") +459 +460 if expression.name == "PROFILE": +461 offset = self._prefixed_sql("OFFSET", expression, "offset") +462 limit = self._prefixed_sql("LIMIT", expression, "limit") +463 else: +464 offset = "" +465 limit = self._oldstyle_limit_sql(expression) +466 +467 log = self._prefixed_sql("IN", expression, "log") +468 position = self._prefixed_sql("FROM", expression, "position") +469 +470 channel = self._prefixed_sql("FOR CHANNEL", expression, "channel") +471 +472 if expression.name == "ENGINE": +473 mutex_or_status = " MUTEX" if expression.args.get("mutex") else " STATUS" +474 else: +475 mutex_or_status = "" +476 +477 return f"SHOW{full}{global_}{this}{target}{types}{db}{query}{log}{position}{channel}{mutex_or_status}{like}{where}{offset}{limit}" 478 -479 def _oldstyle_limit_sql(self, expression: exp.Show) -> str: -480 limit = self.sql(expression, "limit") -481 offset = self.sql(expression, "offset") -482 if limit: -483 limit_offset = f"{offset}, {limit}" if offset else limit -484 return f" LIMIT {limit_offset}" -485 return "" +479 def _prefixed_sql(self, prefix: str, expression: exp.Expression, arg: str) -> str: +480 sql = self.sql(expression, arg) +481 return f" {prefix} {sql}" if sql else "" +482 +483 def _oldstyle_limit_sql(self, expression: exp.Show) -> str: +484 limit = self.sql(expression, "limit") +485 offset = self.sql(expression, "offset") +486 if limit: +487 limit_offset = f"{offset}, {limit}" if offset else limit +488 return f" LIMIT {limit_offset}" +489 return ""
    @@ -581,10 +585,10 @@
     97class MySQL(Dialect):
    - 98    time_format = "'%Y-%m-%d %T'"
    + 98    TIME_FORMAT = "'%Y-%m-%d %T'"
      99
     100    # https://prestodb.io/docs/current/functions/datetime.html#mysql-date-functions
    -101    time_mapping = {
    +101    TIME_MAPPING = {
     102        "%M": "%B",
     103        "%c": "%-m",
     104        "%e": "%-d",
    @@ -615,361 +619,365 @@
     129            "MEDIUMBLOB": TokenType.MEDIUMBLOB,
     130            "MEDIUMTEXT": TokenType.MEDIUMTEXT,
     131            "SEPARATOR": TokenType.SEPARATOR,
    -132            "START": TokenType.BEGIN,
    -133            "_ARMSCII8": TokenType.INTRODUCER,
    -134            "_ASCII": TokenType.INTRODUCER,
    -135            "_BIG5": TokenType.INTRODUCER,
    -136            "_BINARY": TokenType.INTRODUCER,
    -137            "_CP1250": TokenType.INTRODUCER,
    -138            "_CP1251": TokenType.INTRODUCER,
    -139            "_CP1256": TokenType.INTRODUCER,
    -140            "_CP1257": TokenType.INTRODUCER,
    -141            "_CP850": TokenType.INTRODUCER,
    -142            "_CP852": TokenType.INTRODUCER,
    -143            "_CP866": TokenType.INTRODUCER,
    -144            "_CP932": TokenType.INTRODUCER,
    -145            "_DEC8": TokenType.INTRODUCER,
    -146            "_EUCJPMS": TokenType.INTRODUCER,
    -147            "_EUCKR": TokenType.INTRODUCER,
    -148            "_GB18030": TokenType.INTRODUCER,
    -149            "_GB2312": TokenType.INTRODUCER,
    -150            "_GBK": TokenType.INTRODUCER,
    -151            "_GEOSTD8": TokenType.INTRODUCER,
    -152            "_GREEK": TokenType.INTRODUCER,
    -153            "_HEBREW": TokenType.INTRODUCER,
    -154            "_HP8": TokenType.INTRODUCER,
    -155            "_KEYBCS2": TokenType.INTRODUCER,
    -156            "_KOI8R": TokenType.INTRODUCER,
    -157            "_KOI8U": TokenType.INTRODUCER,
    -158            "_LATIN1": TokenType.INTRODUCER,
    -159            "_LATIN2": TokenType.INTRODUCER,
    -160            "_LATIN5": TokenType.INTRODUCER,
    -161            "_LATIN7": TokenType.INTRODUCER,
    -162            "_MACCE": TokenType.INTRODUCER,
    -163            "_MACROMAN": TokenType.INTRODUCER,
    -164            "_SJIS": TokenType.INTRODUCER,
    -165            "_SWE7": TokenType.INTRODUCER,
    -166            "_TIS620": TokenType.INTRODUCER,
    -167            "_UCS2": TokenType.INTRODUCER,
    -168            "_UJIS": TokenType.INTRODUCER,
    -169            # https://dev.mysql.com/doc/refman/8.0/en/string-literals.html
    -170            "_UTF8": TokenType.INTRODUCER,
    -171            "_UTF16": TokenType.INTRODUCER,
    -172            "_UTF16LE": TokenType.INTRODUCER,
    -173            "_UTF32": TokenType.INTRODUCER,
    -174            "_UTF8MB3": TokenType.INTRODUCER,
    -175            "_UTF8MB4": TokenType.INTRODUCER,
    -176            "@@": TokenType.SESSION_PARAMETER,
    -177        }
    -178
    -179        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
    -180
    -181    class Parser(parser.Parser):
    -182        FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS, TokenType.SCHEMA, TokenType.DATABASE}
    -183
    -184        FUNCTIONS = {
    -185            **parser.Parser.FUNCTIONS,
    -186            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
    -187            "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "mysql"),
    -188            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
    -189            "INSTR": lambda args: exp.StrPosition(substr=seq_get(args, 1), this=seq_get(args, 0)),
    -190            "LOCATE": locate_to_strposition,
    -191            "STR_TO_DATE": _str_to_date,
    -192        }
    -193
    -194        FUNCTION_PARSERS = {
    -195            **parser.Parser.FUNCTION_PARSERS,
    -196            "GROUP_CONCAT": lambda self: self.expression(
    -197                exp.GroupConcat,
    -198                this=self._parse_lambda(),
    -199                separator=self._match(TokenType.SEPARATOR) and self._parse_field(),
    -200            ),
    -201        }
    -202
    -203        STATEMENT_PARSERS = {
    -204            **parser.Parser.STATEMENT_PARSERS,
    -205            TokenType.SHOW: lambda self: self._parse_show(),
    -206        }
    -207
    -208        SHOW_PARSERS = {
    -209            "BINARY LOGS": _show_parser("BINARY LOGS"),
    -210            "MASTER LOGS": _show_parser("BINARY LOGS"),
    -211            "BINLOG EVENTS": _show_parser("BINLOG EVENTS"),
    -212            "CHARACTER SET": _show_parser("CHARACTER SET"),
    -213            "CHARSET": _show_parser("CHARACTER SET"),
    -214            "COLLATION": _show_parser("COLLATION"),
    -215            "FULL COLUMNS": _show_parser("COLUMNS", target="FROM", full=True),
    -216            "COLUMNS": _show_parser("COLUMNS", target="FROM"),
    -217            "CREATE DATABASE": _show_parser("CREATE DATABASE", target=True),
    -218            "CREATE EVENT": _show_parser("CREATE EVENT", target=True),
    -219            "CREATE FUNCTION": _show_parser("CREATE FUNCTION", target=True),
    -220            "CREATE PROCEDURE": _show_parser("CREATE PROCEDURE", target=True),
    -221            "CREATE TABLE": _show_parser("CREATE TABLE", target=True),
    -222            "CREATE TRIGGER": _show_parser("CREATE TRIGGER", target=True),
    -223            "CREATE VIEW": _show_parser("CREATE VIEW", target=True),
    -224            "DATABASES": _show_parser("DATABASES"),
    -225            "ENGINE": _show_parser("ENGINE", target=True),
    -226            "STORAGE ENGINES": _show_parser("ENGINES"),
    -227            "ENGINES": _show_parser("ENGINES"),
    -228            "ERRORS": _show_parser("ERRORS"),
    -229            "EVENTS": _show_parser("EVENTS"),
    -230            "FUNCTION CODE": _show_parser("FUNCTION CODE", target=True),
    -231            "FUNCTION STATUS": _show_parser("FUNCTION STATUS"),
    -232            "GRANTS": _show_parser("GRANTS", target="FOR"),
    -233            "INDEX": _show_parser("INDEX", target="FROM"),
    -234            "MASTER STATUS": _show_parser("MASTER STATUS"),
    -235            "OPEN TABLES": _show_parser("OPEN TABLES"),
    -236            "PLUGINS": _show_parser("PLUGINS"),
    -237            "PROCEDURE CODE": _show_parser("PROCEDURE CODE", target=True),
    -238            "PROCEDURE STATUS": _show_parser("PROCEDURE STATUS"),
    -239            "PRIVILEGES": _show_parser("PRIVILEGES"),
    -240            "FULL PROCESSLIST": _show_parser("PROCESSLIST", full=True),
    -241            "PROCESSLIST": _show_parser("PROCESSLIST"),
    -242            "PROFILE": _show_parser("PROFILE"),
    -243            "PROFILES": _show_parser("PROFILES"),
    -244            "RELAYLOG EVENTS": _show_parser("RELAYLOG EVENTS"),
    -245            "REPLICAS": _show_parser("REPLICAS"),
    -246            "SLAVE HOSTS": _show_parser("REPLICAS"),
    -247            "REPLICA STATUS": _show_parser("REPLICA STATUS"),
    -248            "SLAVE STATUS": _show_parser("REPLICA STATUS"),
    -249            "GLOBAL STATUS": _show_parser("STATUS", global_=True),
    -250            "SESSION STATUS": _show_parser("STATUS"),
    -251            "STATUS": _show_parser("STATUS"),
    -252            "TABLE STATUS": _show_parser("TABLE STATUS"),
    -253            "FULL TABLES": _show_parser("TABLES", full=True),
    -254            "TABLES": _show_parser("TABLES"),
    -255            "TRIGGERS": _show_parser("TRIGGERS"),
    -256            "GLOBAL VARIABLES": _show_parser("VARIABLES", global_=True),
    -257            "SESSION VARIABLES": _show_parser("VARIABLES"),
    -258            "VARIABLES": _show_parser("VARIABLES"),
    -259            "WARNINGS": _show_parser("WARNINGS"),
    -260        }
    -261
    -262        SET_PARSERS = {
    -263            **parser.Parser.SET_PARSERS,
    -264            "PERSIST": lambda self: self._parse_set_item_assignment("PERSIST"),
    -265            "PERSIST_ONLY": lambda self: self._parse_set_item_assignment("PERSIST_ONLY"),
    -266            "CHARACTER SET": lambda self: self._parse_set_item_charset("CHARACTER SET"),
    -267            "CHARSET": lambda self: self._parse_set_item_charset("CHARACTER SET"),
    -268            "NAMES": lambda self: self._parse_set_item_names(),
    -269        }
    -270
    -271        PROFILE_TYPES = {
    -272            "ALL",
    -273            "BLOCK IO",
    -274            "CONTEXT SWITCHES",
    -275            "CPU",
    -276            "IPC",
    -277            "MEMORY",
    -278            "PAGE FAULTS",
    -279            "SOURCE",
    -280            "SWAPS",
    -281        }
    -282
    -283        LOG_DEFAULTS_TO_LN = True
    -284
    -285        def _parse_show_mysql(
    -286            self,
    -287            this: str,
    -288            target: bool | str = False,
    -289            full: t.Optional[bool] = None,
    -290            global_: t.Optional[bool] = None,
    -291        ) -> exp.Show:
    -292            if target:
    -293                if isinstance(target, str):
    -294                    self._match_text_seq(target)
    -295                target_id = self._parse_id_var()
    -296            else:
    -297                target_id = None
    -298
    -299            log = self._parse_string() if self._match_text_seq("IN") else None
    -300
    -301            if this in {"BINLOG EVENTS", "RELAYLOG EVENTS"}:
    -302                position = self._parse_number() if self._match_text_seq("FROM") else None
    -303                db = None
    -304            else:
    -305                position = None
    -306                db = None
    -307
    -308                if self._match(TokenType.FROM):
    -309                    db = self._parse_id_var()
    -310                elif self._match(TokenType.DOT):
    -311                    db = target_id
    -312                    target_id = self._parse_id_var()
    -313
    -314            channel = self._parse_id_var() if self._match_text_seq("FOR", "CHANNEL") else None
    -315
    -316            like = self._parse_string() if self._match_text_seq("LIKE") else None
    -317            where = self._parse_where()
    +132            "ENUM": TokenType.ENUM,
    +133            "START": TokenType.BEGIN,
    +134            "_ARMSCII8": TokenType.INTRODUCER,
    +135            "_ASCII": TokenType.INTRODUCER,
    +136            "_BIG5": TokenType.INTRODUCER,
    +137            "_BINARY": TokenType.INTRODUCER,
    +138            "_CP1250": TokenType.INTRODUCER,
    +139            "_CP1251": TokenType.INTRODUCER,
    +140            "_CP1256": TokenType.INTRODUCER,
    +141            "_CP1257": TokenType.INTRODUCER,
    +142            "_CP850": TokenType.INTRODUCER,
    +143            "_CP852": TokenType.INTRODUCER,
    +144            "_CP866": TokenType.INTRODUCER,
    +145            "_CP932": TokenType.INTRODUCER,
    +146            "_DEC8": TokenType.INTRODUCER,
    +147            "_EUCJPMS": TokenType.INTRODUCER,
    +148            "_EUCKR": TokenType.INTRODUCER,
    +149            "_GB18030": TokenType.INTRODUCER,
    +150            "_GB2312": TokenType.INTRODUCER,
    +151            "_GBK": TokenType.INTRODUCER,
    +152            "_GEOSTD8": TokenType.INTRODUCER,
    +153            "_GREEK": TokenType.INTRODUCER,
    +154            "_HEBREW": TokenType.INTRODUCER,
    +155            "_HP8": TokenType.INTRODUCER,
    +156            "_KEYBCS2": TokenType.INTRODUCER,
    +157            "_KOI8R": TokenType.INTRODUCER,
    +158            "_KOI8U": TokenType.INTRODUCER,
    +159            "_LATIN1": TokenType.INTRODUCER,
    +160            "_LATIN2": TokenType.INTRODUCER,
    +161            "_LATIN5": TokenType.INTRODUCER,
    +162            "_LATIN7": TokenType.INTRODUCER,
    +163            "_MACCE": TokenType.INTRODUCER,
    +164            "_MACROMAN": TokenType.INTRODUCER,
    +165            "_SJIS": TokenType.INTRODUCER,
    +166            "_SWE7": TokenType.INTRODUCER,
    +167            "_TIS620": TokenType.INTRODUCER,
    +168            "_UCS2": TokenType.INTRODUCER,
    +169            "_UJIS": TokenType.INTRODUCER,
    +170            # https://dev.mysql.com/doc/refman/8.0/en/string-literals.html
    +171            "_UTF8": TokenType.INTRODUCER,
    +172            "_UTF16": TokenType.INTRODUCER,
    +173            "_UTF16LE": TokenType.INTRODUCER,
    +174            "_UTF32": TokenType.INTRODUCER,
    +175            "_UTF8MB3": TokenType.INTRODUCER,
    +176            "_UTF8MB4": TokenType.INTRODUCER,
    +177            "@@": TokenType.SESSION_PARAMETER,
    +178        }
    +179
    +180        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
    +181
    +182    class Parser(parser.Parser):
    +183        FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS, TokenType.SCHEMA, TokenType.DATABASE}
    +184
    +185        FUNCTIONS = {
    +186            **parser.Parser.FUNCTIONS,
    +187            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
    +188            "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "mysql"),
    +189            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
    +190            "INSTR": lambda args: exp.StrPosition(substr=seq_get(args, 1), this=seq_get(args, 0)),
    +191            "LOCATE": locate_to_strposition,
    +192            "STR_TO_DATE": _str_to_date,
    +193        }
    +194
    +195        FUNCTION_PARSERS = {
    +196            **parser.Parser.FUNCTION_PARSERS,
    +197            "GROUP_CONCAT": lambda self: self.expression(
    +198                exp.GroupConcat,
    +199                this=self._parse_lambda(),
    +200                separator=self._match(TokenType.SEPARATOR) and self._parse_field(),
    +201            ),
    +202        }
    +203
    +204        STATEMENT_PARSERS = {
    +205            **parser.Parser.STATEMENT_PARSERS,
    +206            TokenType.SHOW: lambda self: self._parse_show(),
    +207        }
    +208
    +209        SHOW_PARSERS = {
    +210            "BINARY LOGS": _show_parser("BINARY LOGS"),
    +211            "MASTER LOGS": _show_parser("BINARY LOGS"),
    +212            "BINLOG EVENTS": _show_parser("BINLOG EVENTS"),
    +213            "CHARACTER SET": _show_parser("CHARACTER SET"),
    +214            "CHARSET": _show_parser("CHARACTER SET"),
    +215            "COLLATION": _show_parser("COLLATION"),
    +216            "FULL COLUMNS": _show_parser("COLUMNS", target="FROM", full=True),
    +217            "COLUMNS": _show_parser("COLUMNS", target="FROM"),
    +218            "CREATE DATABASE": _show_parser("CREATE DATABASE", target=True),
    +219            "CREATE EVENT": _show_parser("CREATE EVENT", target=True),
    +220            "CREATE FUNCTION": _show_parser("CREATE FUNCTION", target=True),
    +221            "CREATE PROCEDURE": _show_parser("CREATE PROCEDURE", target=True),
    +222            "CREATE TABLE": _show_parser("CREATE TABLE", target=True),
    +223            "CREATE TRIGGER": _show_parser("CREATE TRIGGER", target=True),
    +224            "CREATE VIEW": _show_parser("CREATE VIEW", target=True),
    +225            "DATABASES": _show_parser("DATABASES"),
    +226            "ENGINE": _show_parser("ENGINE", target=True),
    +227            "STORAGE ENGINES": _show_parser("ENGINES"),
    +228            "ENGINES": _show_parser("ENGINES"),
    +229            "ERRORS": _show_parser("ERRORS"),
    +230            "EVENTS": _show_parser("EVENTS"),
    +231            "FUNCTION CODE": _show_parser("FUNCTION CODE", target=True),
    +232            "FUNCTION STATUS": _show_parser("FUNCTION STATUS"),
    +233            "GRANTS": _show_parser("GRANTS", target="FOR"),
    +234            "INDEX": _show_parser("INDEX", target="FROM"),
    +235            "MASTER STATUS": _show_parser("MASTER STATUS"),
    +236            "OPEN TABLES": _show_parser("OPEN TABLES"),
    +237            "PLUGINS": _show_parser("PLUGINS"),
    +238            "PROCEDURE CODE": _show_parser("PROCEDURE CODE", target=True),
    +239            "PROCEDURE STATUS": _show_parser("PROCEDURE STATUS"),
    +240            "PRIVILEGES": _show_parser("PRIVILEGES"),
    +241            "FULL PROCESSLIST": _show_parser("PROCESSLIST", full=True),
    +242            "PROCESSLIST": _show_parser("PROCESSLIST"),
    +243            "PROFILE": _show_parser("PROFILE"),
    +244            "PROFILES": _show_parser("PROFILES"),
    +245            "RELAYLOG EVENTS": _show_parser("RELAYLOG EVENTS"),
    +246            "REPLICAS": _show_parser("REPLICAS"),
    +247            "SLAVE HOSTS": _show_parser("REPLICAS"),
    +248            "REPLICA STATUS": _show_parser("REPLICA STATUS"),
    +249            "SLAVE STATUS": _show_parser("REPLICA STATUS"),
    +250            "GLOBAL STATUS": _show_parser("STATUS", global_=True),
    +251            "SESSION STATUS": _show_parser("STATUS"),
    +252            "STATUS": _show_parser("STATUS"),
    +253            "TABLE STATUS": _show_parser("TABLE STATUS"),
    +254            "FULL TABLES": _show_parser("TABLES", full=True),
    +255            "TABLES": _show_parser("TABLES"),
    +256            "TRIGGERS": _show_parser("TRIGGERS"),
    +257            "GLOBAL VARIABLES": _show_parser("VARIABLES", global_=True),
    +258            "SESSION VARIABLES": _show_parser("VARIABLES"),
    +259            "VARIABLES": _show_parser("VARIABLES"),
    +260            "WARNINGS": _show_parser("WARNINGS"),
    +261        }
    +262
    +263        SET_PARSERS = {
    +264            **parser.Parser.SET_PARSERS,
    +265            "PERSIST": lambda self: self._parse_set_item_assignment("PERSIST"),
    +266            "PERSIST_ONLY": lambda self: self._parse_set_item_assignment("PERSIST_ONLY"),
    +267            "CHARACTER SET": lambda self: self._parse_set_item_charset("CHARACTER SET"),
    +268            "CHARSET": lambda self: self._parse_set_item_charset("CHARACTER SET"),
    +269            "NAMES": lambda self: self._parse_set_item_names(),
    +270        }
    +271
    +272        PROFILE_TYPES = {
    +273            "ALL",
    +274            "BLOCK IO",
    +275            "CONTEXT SWITCHES",
    +276            "CPU",
    +277            "IPC",
    +278            "MEMORY",
    +279            "PAGE FAULTS",
    +280            "SOURCE",
    +281            "SWAPS",
    +282        }
    +283
    +284        TYPE_TOKENS = {
    +285            *parser.Parser.TYPE_TOKENS,
    +286            TokenType.SET,
    +287        }
    +288
    +289        ENUM_TYPE_TOKENS = {
    +290            *parser.Parser.ENUM_TYPE_TOKENS,
    +291            TokenType.SET,
    +292        }
    +293
    +294        LOG_DEFAULTS_TO_LN = True
    +295
    +296        def _parse_show_mysql(
    +297            self,
    +298            this: str,
    +299            target: bool | str = False,
    +300            full: t.Optional[bool] = None,
    +301            global_: t.Optional[bool] = None,
    +302        ) -> exp.Show:
    +303            if target:
    +304                if isinstance(target, str):
    +305                    self._match_text_seq(target)
    +306                target_id = self._parse_id_var()
    +307            else:
    +308                target_id = None
    +309
    +310            log = self._parse_string() if self._match_text_seq("IN") else None
    +311
    +312            if this in {"BINLOG EVENTS", "RELAYLOG EVENTS"}:
    +313                position = self._parse_number() if self._match_text_seq("FROM") else None
    +314                db = None
    +315            else:
    +316                position = None
    +317                db = None
     318
    -319            if this == "PROFILE":
    -320                types = self._parse_csv(lambda: self._parse_var_from_options(self.PROFILE_TYPES))
    -321                query = self._parse_number() if self._match_text_seq("FOR", "QUERY") else None
    -322                offset = self._parse_number() if self._match_text_seq("OFFSET") else None
    -323                limit = self._parse_number() if self._match_text_seq("LIMIT") else None
    -324            else:
    -325                types, query = None, None
    -326                offset, limit = self._parse_oldstyle_limit()
    -327
    -328            mutex = True if self._match_text_seq("MUTEX") else None
    -329            mutex = False if self._match_text_seq("STATUS") else mutex
    -330
    -331            return self.expression(
    -332                exp.Show,
    -333                this=this,
    -334                target=target_id,
    -335                full=full,
    -336                log=log,
    -337                position=position,
    -338                db=db,
    -339                channel=channel,
    -340                like=like,
    -341                where=where,
    -342                types=types,
    -343                query=query,
    -344                offset=offset,
    -345                limit=limit,
    -346                mutex=mutex,
    -347                **{"global": global_},  # type: ignore
    -348            )
    -349
    -350        def _parse_oldstyle_limit(
    -351            self,
    -352        ) -> t.Tuple[t.Optional[exp.Expression], t.Optional[exp.Expression]]:
    -353            limit = None
    -354            offset = None
    -355            if self._match_text_seq("LIMIT"):
    -356                parts = self._parse_csv(self._parse_number)
    -357                if len(parts) == 1:
    -358                    limit = parts[0]
    -359                elif len(parts) == 2:
    -360                    limit = parts[1]
    -361                    offset = parts[0]
    -362
    -363            return offset, limit
    -364
    -365        def _parse_set_item_charset(self, kind: str) -> exp.Expression:
    -366            this = self._parse_string() or self._parse_id_var()
    -367            return self.expression(exp.SetItem, this=this, kind=kind)
    -368
    -369        def _parse_set_item_names(self) -> exp.Expression:
    -370            charset = self._parse_string() or self._parse_id_var()
    -371            if self._match_text_seq("COLLATE"):
    -372                collate = self._parse_string() or self._parse_id_var()
    -373            else:
    -374                collate = None
    +319                if self._match(TokenType.FROM):
    +320                    db = self._parse_id_var()
    +321                elif self._match(TokenType.DOT):
    +322                    db = target_id
    +323                    target_id = self._parse_id_var()
    +324
    +325            channel = self._parse_id_var() if self._match_text_seq("FOR", "CHANNEL") else None
    +326
    +327            like = self._parse_string() if self._match_text_seq("LIKE") else None
    +328            where = self._parse_where()
    +329
    +330            if this == "PROFILE":
    +331                types = self._parse_csv(lambda: self._parse_var_from_options(self.PROFILE_TYPES))
    +332                query = self._parse_number() if self._match_text_seq("FOR", "QUERY") else None
    +333                offset = self._parse_number() if self._match_text_seq("OFFSET") else None
    +334                limit = self._parse_number() if self._match_text_seq("LIMIT") else None
    +335            else:
    +336                types, query = None, None
    +337                offset, limit = self._parse_oldstyle_limit()
    +338
    +339            mutex = True if self._match_text_seq("MUTEX") else None
    +340            mutex = False if self._match_text_seq("STATUS") else mutex
    +341
    +342            return self.expression(
    +343                exp.Show,
    +344                this=this,
    +345                target=target_id,
    +346                full=full,
    +347                log=log,
    +348                position=position,
    +349                db=db,
    +350                channel=channel,
    +351                like=like,
    +352                where=where,
    +353                types=types,
    +354                query=query,
    +355                offset=offset,
    +356                limit=limit,
    +357                mutex=mutex,
    +358                **{"global": global_},  # type: ignore
    +359            )
    +360
    +361        def _parse_oldstyle_limit(
    +362            self,
    +363        ) -> t.Tuple[t.Optional[exp.Expression], t.Optional[exp.Expression]]:
    +364            limit = None
    +365            offset = None
    +366            if self._match_text_seq("LIMIT"):
    +367                parts = self._parse_csv(self._parse_number)
    +368                if len(parts) == 1:
    +369                    limit = parts[0]
    +370                elif len(parts) == 2:
    +371                    limit = parts[1]
    +372                    offset = parts[0]
    +373
    +374            return offset, limit
     375
    -376            return self.expression(
    -377                exp.SetItem,
    -378                this=charset,
    -379                collate=collate,
    -380                kind="NAMES",
    -381            )
    -382
    -383    class Generator(generator.Generator):
    -384        LOCKING_READS_SUPPORTED = True
    -385        NULL_ORDERING_SUPPORTED = False
    -386        JOIN_HINTS = False
    -387        TABLE_HINTS = False
    +376        def _parse_set_item_charset(self, kind: str) -> exp.Expression:
    +377            this = self._parse_string() or self._parse_id_var()
    +378            return self.expression(exp.SetItem, this=this, kind=kind)
    +379
    +380        def _parse_set_item_names(self) -> exp.Expression:
    +381            charset = self._parse_string() or self._parse_id_var()
    +382            if self._match_text_seq("COLLATE"):
    +383                collate = self._parse_string() or self._parse_id_var()
    +384            else:
    +385                collate = None
    +386
    +387            return self.expression(exp.SetItem, this=charset, collate=collate, kind="NAMES")
     388
    -389        TRANSFORMS = {
    -390            **generator.Generator.TRANSFORMS,
    -391            exp.CurrentDate: no_paren_current_date_sql,
    -392            exp.DateDiff: lambda self, e: self.func("DATEDIFF", e.this, e.expression),
    -393            exp.DateAdd: _date_add_sql("ADD"),
    -394            exp.DateStrToDate: datestrtodate_sql,
    -395            exp.DateSub: _date_add_sql("SUB"),
    -396            exp.DateTrunc: _date_trunc_sql,
    -397            exp.DayOfMonth: rename_func("DAYOFMONTH"),
    -398            exp.DayOfWeek: rename_func("DAYOFWEEK"),
    -399            exp.DayOfYear: rename_func("DAYOFYEAR"),
    -400            exp.GroupConcat: lambda self, e: f"""GROUP_CONCAT({self.sql(e, "this")} SEPARATOR {self.sql(e, "separator") or "','"})""",
    -401            exp.ILike: no_ilike_sql,
    -402            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
    -403            exp.Max: max_or_greatest,
    -404            exp.Min: min_or_least,
    -405            exp.NullSafeEQ: lambda self, e: self.binary(e, "<=>"),
    -406            exp.NullSafeNEQ: lambda self, e: self.not_sql(self.binary(e, "<=>")),
    -407            exp.Pivot: no_pivot_sql,
    -408            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    -409            exp.StrPosition: strposition_to_locate_sql,
    -410            exp.StrToDate: _str_to_date_sql,
    -411            exp.StrToTime: _str_to_date_sql,
    -412            exp.TableSample: no_tablesample_sql,
    -413            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
    -414            exp.TimeToStr: lambda self, e: self.func("DATE_FORMAT", e.this, self.format_time(e)),
    -415            exp.Trim: _trim_sql,
    -416            exp.TryCast: no_trycast_sql,
    -417            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
    -418        }
    -419
    -420        TYPE_MAPPING = generator.Generator.TYPE_MAPPING.copy()
    -421        TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMTEXT)
    -422        TYPE_MAPPING.pop(exp.DataType.Type.LONGTEXT)
    -423        TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMBLOB)
    -424        TYPE_MAPPING.pop(exp.DataType.Type.LONGBLOB)
    +389    class Generator(generator.Generator):
    +390        LOCKING_READS_SUPPORTED = True
    +391        NULL_ORDERING_SUPPORTED = False
    +392        JOIN_HINTS = False
    +393        TABLE_HINTS = False
    +394
    +395        TRANSFORMS = {
    +396            **generator.Generator.TRANSFORMS,
    +397            exp.CurrentDate: no_paren_current_date_sql,
    +398            exp.DateDiff: lambda self, e: self.func("DATEDIFF", e.this, e.expression),
    +399            exp.DateAdd: _date_add_sql("ADD"),
    +400            exp.DateStrToDate: datestrtodate_sql,
    +401            exp.DateSub: _date_add_sql("SUB"),
    +402            exp.DateTrunc: _date_trunc_sql,
    +403            exp.DayOfMonth: rename_func("DAYOFMONTH"),
    +404            exp.DayOfWeek: rename_func("DAYOFWEEK"),
    +405            exp.DayOfYear: rename_func("DAYOFYEAR"),
    +406            exp.GroupConcat: lambda self, e: f"""GROUP_CONCAT({self.sql(e, "this")} SEPARATOR {self.sql(e, "separator") or "','"})""",
    +407            exp.ILike: no_ilike_sql,
    +408            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
    +409            exp.Max: max_or_greatest,
    +410            exp.Min: min_or_least,
    +411            exp.NullSafeEQ: lambda self, e: self.binary(e, "<=>"),
    +412            exp.NullSafeNEQ: lambda self, e: self.not_sql(self.binary(e, "<=>")),
    +413            exp.Pivot: no_pivot_sql,
    +414            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    +415            exp.StrPosition: strposition_to_locate_sql,
    +416            exp.StrToDate: _str_to_date_sql,
    +417            exp.StrToTime: _str_to_date_sql,
    +418            exp.TableSample: no_tablesample_sql,
    +419            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
    +420            exp.TimeToStr: lambda self, e: self.func("DATE_FORMAT", e.this, self.format_time(e)),
    +421            exp.Trim: _trim_sql,
    +422            exp.TryCast: no_trycast_sql,
    +423            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
    +424        }
     425
    -426        PROPERTIES_LOCATION = {
    -427            **generator.Generator.PROPERTIES_LOCATION,
    -428            exp.TransientProperty: exp.Properties.Location.UNSUPPORTED,
    -429            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    -430        }
    +426        TYPE_MAPPING = generator.Generator.TYPE_MAPPING.copy()
    +427        TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMTEXT)
    +428        TYPE_MAPPING.pop(exp.DataType.Type.LONGTEXT)
    +429        TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMBLOB)
    +430        TYPE_MAPPING.pop(exp.DataType.Type.LONGBLOB)
     431
    -432        LIMIT_FETCH = "LIMIT"
    -433
    -434        def show_sql(self, expression: exp.Show) -> str:
    -435            this = f" {expression.name}"
    -436            full = " FULL" if expression.args.get("full") else ""
    -437            global_ = " GLOBAL" if expression.args.get("global") else ""
    -438
    -439            target = self.sql(expression, "target")
    -440            target = f" {target}" if target else ""
    -441            if expression.name in {"COLUMNS", "INDEX"}:
    -442                target = f" FROM{target}"
    -443            elif expression.name == "GRANTS":
    -444                target = f" FOR{target}"
    -445
    -446            db = self._prefixed_sql("FROM", expression, "db")
    -447
    -448            like = self._prefixed_sql("LIKE", expression, "like")
    -449            where = self.sql(expression, "where")
    -450
    -451            types = self.expressions(expression, key="types")
    -452            types = f" {types}" if types else types
    -453            query = self._prefixed_sql("FOR QUERY", expression, "query")
    -454
    -455            if expression.name == "PROFILE":
    -456                offset = self._prefixed_sql("OFFSET", expression, "offset")
    -457                limit = self._prefixed_sql("LIMIT", expression, "limit")
    -458            else:
    -459                offset = ""
    -460                limit = self._oldstyle_limit_sql(expression)
    -461
    -462            log = self._prefixed_sql("IN", expression, "log")
    -463            position = self._prefixed_sql("FROM", expression, "position")
    -464
    -465            channel = self._prefixed_sql("FOR CHANNEL", expression, "channel")
    -466
    -467            if expression.name == "ENGINE":
    -468                mutex_or_status = " MUTEX" if expression.args.get("mutex") else " STATUS"
    -469            else:
    -470                mutex_or_status = ""
    -471
    -472            return f"SHOW{full}{global_}{this}{target}{types}{db}{query}{log}{position}{channel}{mutex_or_status}{like}{where}{offset}{limit}"
    -473
    -474        def _prefixed_sql(self, prefix: str, expression: exp.Expression, arg: str) -> str:
    -475            sql = self.sql(expression, arg)
    -476            if not sql:
    -477                return ""
    -478            return f" {prefix} {sql}"
    +432        PROPERTIES_LOCATION = {
    +433            **generator.Generator.PROPERTIES_LOCATION,
    +434            exp.TransientProperty: exp.Properties.Location.UNSUPPORTED,
    +435            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +436        }
    +437
    +438        LIMIT_FETCH = "LIMIT"
    +439
    +440        def show_sql(self, expression: exp.Show) -> str:
    +441            this = f" {expression.name}"
    +442            full = " FULL" if expression.args.get("full") else ""
    +443            global_ = " GLOBAL" if expression.args.get("global") else ""
    +444
    +445            target = self.sql(expression, "target")
    +446            target = f" {target}" if target else ""
    +447            if expression.name in {"COLUMNS", "INDEX"}:
    +448                target = f" FROM{target}"
    +449            elif expression.name == "GRANTS":
    +450                target = f" FOR{target}"
    +451
    +452            db = self._prefixed_sql("FROM", expression, "db")
    +453
    +454            like = self._prefixed_sql("LIKE", expression, "like")
    +455            where = self.sql(expression, "where")
    +456
    +457            types = self.expressions(expression, key="types")
    +458            types = f" {types}" if types else types
    +459            query = self._prefixed_sql("FOR QUERY", expression, "query")
    +460
    +461            if expression.name == "PROFILE":
    +462                offset = self._prefixed_sql("OFFSET", expression, "offset")
    +463                limit = self._prefixed_sql("LIMIT", expression, "limit")
    +464            else:
    +465                offset = ""
    +466                limit = self._oldstyle_limit_sql(expression)
    +467
    +468            log = self._prefixed_sql("IN", expression, "log")
    +469            position = self._prefixed_sql("FROM", expression, "position")
    +470
    +471            channel = self._prefixed_sql("FOR CHANNEL", expression, "channel")
    +472
    +473            if expression.name == "ENGINE":
    +474                mutex_or_status = " MUTEX" if expression.args.get("mutex") else " STATUS"
    +475            else:
    +476                mutex_or_status = ""
    +477
    +478            return f"SHOW{full}{global_}{this}{target}{types}{db}{query}{log}{position}{channel}{mutex_or_status}{like}{where}{offset}{limit}"
     479
    -480        def _oldstyle_limit_sql(self, expression: exp.Show) -> str:
    -481            limit = self.sql(expression, "limit")
    -482            offset = self.sql(expression, "offset")
    -483            if limit:
    -484                limit_offset = f"{offset}, {limit}" if offset else limit
    -485                return f" LIMIT {limit_offset}"
    -486            return ""
    +480        def _prefixed_sql(self, prefix: str, expression: exp.Expression, arg: str) -> str:
    +481            sql = self.sql(expression, arg)
    +482            return f" {prefix} {sql}" if sql else ""
    +483
    +484        def _oldstyle_limit_sql(self, expression: exp.Show) -> str:
    +485            limit = self.sql(expression, "limit")
    +486            offset = self.sql(expression, "offset")
    +487            if limit:
    +488                limit_offset = f"{offset}, {limit}" if offset else limit
    +489                return f" LIMIT {limit_offset}"
    +490            return ""
     
    @@ -1020,54 +1028,55 @@ 129 "MEDIUMBLOB": TokenType.MEDIUMBLOB, 130 "MEDIUMTEXT": TokenType.MEDIUMTEXT, 131 "SEPARATOR": TokenType.SEPARATOR, -132 "START": TokenType.BEGIN, -133 "_ARMSCII8": TokenType.INTRODUCER, -134 "_ASCII": TokenType.INTRODUCER, -135 "_BIG5": TokenType.INTRODUCER, -136 "_BINARY": TokenType.INTRODUCER, -137 "_CP1250": TokenType.INTRODUCER, -138 "_CP1251": TokenType.INTRODUCER, -139 "_CP1256": TokenType.INTRODUCER, -140 "_CP1257": TokenType.INTRODUCER, -141 "_CP850": TokenType.INTRODUCER, -142 "_CP852": TokenType.INTRODUCER, -143 "_CP866": TokenType.INTRODUCER, -144 "_CP932": TokenType.INTRODUCER, -145 "_DEC8": TokenType.INTRODUCER, -146 "_EUCJPMS": TokenType.INTRODUCER, -147 "_EUCKR": TokenType.INTRODUCER, -148 "_GB18030": TokenType.INTRODUCER, -149 "_GB2312": TokenType.INTRODUCER, -150 "_GBK": TokenType.INTRODUCER, -151 "_GEOSTD8": TokenType.INTRODUCER, -152 "_GREEK": TokenType.INTRODUCER, -153 "_HEBREW": TokenType.INTRODUCER, -154 "_HP8": TokenType.INTRODUCER, -155 "_KEYBCS2": TokenType.INTRODUCER, -156 "_KOI8R": TokenType.INTRODUCER, -157 "_KOI8U": TokenType.INTRODUCER, -158 "_LATIN1": TokenType.INTRODUCER, -159 "_LATIN2": TokenType.INTRODUCER, -160 "_LATIN5": TokenType.INTRODUCER, -161 "_LATIN7": TokenType.INTRODUCER, -162 "_MACCE": TokenType.INTRODUCER, -163 "_MACROMAN": TokenType.INTRODUCER, -164 "_SJIS": TokenType.INTRODUCER, -165 "_SWE7": TokenType.INTRODUCER, -166 "_TIS620": TokenType.INTRODUCER, -167 "_UCS2": TokenType.INTRODUCER, -168 "_UJIS": TokenType.INTRODUCER, -169 # https://dev.mysql.com/doc/refman/8.0/en/string-literals.html -170 "_UTF8": TokenType.INTRODUCER, -171 "_UTF16": TokenType.INTRODUCER, -172 "_UTF16LE": TokenType.INTRODUCER, -173 "_UTF32": TokenType.INTRODUCER, -174 "_UTF8MB3": TokenType.INTRODUCER, -175 "_UTF8MB4": TokenType.INTRODUCER, -176 "@@": TokenType.SESSION_PARAMETER, -177 } -178 -179 COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW} +132 "ENUM": TokenType.ENUM, +133 "START": TokenType.BEGIN, +134 "_ARMSCII8": TokenType.INTRODUCER, +135 "_ASCII": TokenType.INTRODUCER, +136 "_BIG5": TokenType.INTRODUCER, +137 "_BINARY": TokenType.INTRODUCER, +138 "_CP1250": TokenType.INTRODUCER, +139 "_CP1251": TokenType.INTRODUCER, +140 "_CP1256": TokenType.INTRODUCER, +141 "_CP1257": TokenType.INTRODUCER, +142 "_CP850": TokenType.INTRODUCER, +143 "_CP852": TokenType.INTRODUCER, +144 "_CP866": TokenType.INTRODUCER, +145 "_CP932": TokenType.INTRODUCER, +146 "_DEC8": TokenType.INTRODUCER, +147 "_EUCJPMS": TokenType.INTRODUCER, +148 "_EUCKR": TokenType.INTRODUCER, +149 "_GB18030": TokenType.INTRODUCER, +150 "_GB2312": TokenType.INTRODUCER, +151 "_GBK": TokenType.INTRODUCER, +152 "_GEOSTD8": TokenType.INTRODUCER, +153 "_GREEK": TokenType.INTRODUCER, +154 "_HEBREW": TokenType.INTRODUCER, +155 "_HP8": TokenType.INTRODUCER, +156 "_KEYBCS2": TokenType.INTRODUCER, +157 "_KOI8R": TokenType.INTRODUCER, +158 "_KOI8U": TokenType.INTRODUCER, +159 "_LATIN1": TokenType.INTRODUCER, +160 "_LATIN2": TokenType.INTRODUCER, +161 "_LATIN5": TokenType.INTRODUCER, +162 "_LATIN7": TokenType.INTRODUCER, +163 "_MACCE": TokenType.INTRODUCER, +164 "_MACROMAN": TokenType.INTRODUCER, +165 "_SJIS": TokenType.INTRODUCER, +166 "_SWE7": TokenType.INTRODUCER, +167 "_TIS620": TokenType.INTRODUCER, +168 "_UCS2": TokenType.INTRODUCER, +169 "_UJIS": TokenType.INTRODUCER, +170 # https://dev.mysql.com/doc/refman/8.0/en/string-literals.html +171 "_UTF8": TokenType.INTRODUCER, +172 "_UTF16": TokenType.INTRODUCER, +173 "_UTF16LE": TokenType.INTRODUCER, +174 "_UTF32": TokenType.INTRODUCER, +175 "_UTF8MB3": TokenType.INTRODUCER, +176 "_UTF8MB4": TokenType.INTRODUCER, +177 "@@": TokenType.SESSION_PARAMETER, +178 } +179 +180 COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
    @@ -1079,6 +1088,7 @@ @@ -1095,231 +1105,228 @@
    -
    181    class Parser(parser.Parser):
    -182        FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS, TokenType.SCHEMA, TokenType.DATABASE}
    -183
    -184        FUNCTIONS = {
    -185            **parser.Parser.FUNCTIONS,
    -186            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
    -187            "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "mysql"),
    -188            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
    -189            "INSTR": lambda args: exp.StrPosition(substr=seq_get(args, 1), this=seq_get(args, 0)),
    -190            "LOCATE": locate_to_strposition,
    -191            "STR_TO_DATE": _str_to_date,
    -192        }
    -193
    -194        FUNCTION_PARSERS = {
    -195            **parser.Parser.FUNCTION_PARSERS,
    -196            "GROUP_CONCAT": lambda self: self.expression(
    -197                exp.GroupConcat,
    -198                this=self._parse_lambda(),
    -199                separator=self._match(TokenType.SEPARATOR) and self._parse_field(),
    -200            ),
    -201        }
    -202
    -203        STATEMENT_PARSERS = {
    -204            **parser.Parser.STATEMENT_PARSERS,
    -205            TokenType.SHOW: lambda self: self._parse_show(),
    -206        }
    -207
    -208        SHOW_PARSERS = {
    -209            "BINARY LOGS": _show_parser("BINARY LOGS"),
    -210            "MASTER LOGS": _show_parser("BINARY LOGS"),
    -211            "BINLOG EVENTS": _show_parser("BINLOG EVENTS"),
    -212            "CHARACTER SET": _show_parser("CHARACTER SET"),
    -213            "CHARSET": _show_parser("CHARACTER SET"),
    -214            "COLLATION": _show_parser("COLLATION"),
    -215            "FULL COLUMNS": _show_parser("COLUMNS", target="FROM", full=True),
    -216            "COLUMNS": _show_parser("COLUMNS", target="FROM"),
    -217            "CREATE DATABASE": _show_parser("CREATE DATABASE", target=True),
    -218            "CREATE EVENT": _show_parser("CREATE EVENT", target=True),
    -219            "CREATE FUNCTION": _show_parser("CREATE FUNCTION", target=True),
    -220            "CREATE PROCEDURE": _show_parser("CREATE PROCEDURE", target=True),
    -221            "CREATE TABLE": _show_parser("CREATE TABLE", target=True),
    -222            "CREATE TRIGGER": _show_parser("CREATE TRIGGER", target=True),
    -223            "CREATE VIEW": _show_parser("CREATE VIEW", target=True),
    -224            "DATABASES": _show_parser("DATABASES"),
    -225            "ENGINE": _show_parser("ENGINE", target=True),
    -226            "STORAGE ENGINES": _show_parser("ENGINES"),
    -227            "ENGINES": _show_parser("ENGINES"),
    -228            "ERRORS": _show_parser("ERRORS"),
    -229            "EVENTS": _show_parser("EVENTS"),
    -230            "FUNCTION CODE": _show_parser("FUNCTION CODE", target=True),
    -231            "FUNCTION STATUS": _show_parser("FUNCTION STATUS"),
    -232            "GRANTS": _show_parser("GRANTS", target="FOR"),
    -233            "INDEX": _show_parser("INDEX", target="FROM"),
    -234            "MASTER STATUS": _show_parser("MASTER STATUS"),
    -235            "OPEN TABLES": _show_parser("OPEN TABLES"),
    -236            "PLUGINS": _show_parser("PLUGINS"),
    -237            "PROCEDURE CODE": _show_parser("PROCEDURE CODE", target=True),
    -238            "PROCEDURE STATUS": _show_parser("PROCEDURE STATUS"),
    -239            "PRIVILEGES": _show_parser("PRIVILEGES"),
    -240            "FULL PROCESSLIST": _show_parser("PROCESSLIST", full=True),
    -241            "PROCESSLIST": _show_parser("PROCESSLIST"),
    -242            "PROFILE": _show_parser("PROFILE"),
    -243            "PROFILES": _show_parser("PROFILES"),
    -244            "RELAYLOG EVENTS": _show_parser("RELAYLOG EVENTS"),
    -245            "REPLICAS": _show_parser("REPLICAS"),
    -246            "SLAVE HOSTS": _show_parser("REPLICAS"),
    -247            "REPLICA STATUS": _show_parser("REPLICA STATUS"),
    -248            "SLAVE STATUS": _show_parser("REPLICA STATUS"),
    -249            "GLOBAL STATUS": _show_parser("STATUS", global_=True),
    -250            "SESSION STATUS": _show_parser("STATUS"),
    -251            "STATUS": _show_parser("STATUS"),
    -252            "TABLE STATUS": _show_parser("TABLE STATUS"),
    -253            "FULL TABLES": _show_parser("TABLES", full=True),
    -254            "TABLES": _show_parser("TABLES"),
    -255            "TRIGGERS": _show_parser("TRIGGERS"),
    -256            "GLOBAL VARIABLES": _show_parser("VARIABLES", global_=True),
    -257            "SESSION VARIABLES": _show_parser("VARIABLES"),
    -258            "VARIABLES": _show_parser("VARIABLES"),
    -259            "WARNINGS": _show_parser("WARNINGS"),
    -260        }
    -261
    -262        SET_PARSERS = {
    -263            **parser.Parser.SET_PARSERS,
    -264            "PERSIST": lambda self: self._parse_set_item_assignment("PERSIST"),
    -265            "PERSIST_ONLY": lambda self: self._parse_set_item_assignment("PERSIST_ONLY"),
    -266            "CHARACTER SET": lambda self: self._parse_set_item_charset("CHARACTER SET"),
    -267            "CHARSET": lambda self: self._parse_set_item_charset("CHARACTER SET"),
    -268            "NAMES": lambda self: self._parse_set_item_names(),
    -269        }
    -270
    -271        PROFILE_TYPES = {
    -272            "ALL",
    -273            "BLOCK IO",
    -274            "CONTEXT SWITCHES",
    -275            "CPU",
    -276            "IPC",
    -277            "MEMORY",
    -278            "PAGE FAULTS",
    -279            "SOURCE",
    -280            "SWAPS",
    -281        }
    -282
    -283        LOG_DEFAULTS_TO_LN = True
    -284
    -285        def _parse_show_mysql(
    -286            self,
    -287            this: str,
    -288            target: bool | str = False,
    -289            full: t.Optional[bool] = None,
    -290            global_: t.Optional[bool] = None,
    -291        ) -> exp.Show:
    -292            if target:
    -293                if isinstance(target, str):
    -294                    self._match_text_seq(target)
    -295                target_id = self._parse_id_var()
    -296            else:
    -297                target_id = None
    -298
    -299            log = self._parse_string() if self._match_text_seq("IN") else None
    -300
    -301            if this in {"BINLOG EVENTS", "RELAYLOG EVENTS"}:
    -302                position = self._parse_number() if self._match_text_seq("FROM") else None
    -303                db = None
    -304            else:
    -305                position = None
    -306                db = None
    -307
    -308                if self._match(TokenType.FROM):
    -309                    db = self._parse_id_var()
    -310                elif self._match(TokenType.DOT):
    -311                    db = target_id
    -312                    target_id = self._parse_id_var()
    -313
    -314            channel = self._parse_id_var() if self._match_text_seq("FOR", "CHANNEL") else None
    -315
    -316            like = self._parse_string() if self._match_text_seq("LIKE") else None
    -317            where = self._parse_where()
    +            
    182    class Parser(parser.Parser):
    +183        FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS, TokenType.SCHEMA, TokenType.DATABASE}
    +184
    +185        FUNCTIONS = {
    +186            **parser.Parser.FUNCTIONS,
    +187            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
    +188            "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "mysql"),
    +189            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
    +190            "INSTR": lambda args: exp.StrPosition(substr=seq_get(args, 1), this=seq_get(args, 0)),
    +191            "LOCATE": locate_to_strposition,
    +192            "STR_TO_DATE": _str_to_date,
    +193        }
    +194
    +195        FUNCTION_PARSERS = {
    +196            **parser.Parser.FUNCTION_PARSERS,
    +197            "GROUP_CONCAT": lambda self: self.expression(
    +198                exp.GroupConcat,
    +199                this=self._parse_lambda(),
    +200                separator=self._match(TokenType.SEPARATOR) and self._parse_field(),
    +201            ),
    +202        }
    +203
    +204        STATEMENT_PARSERS = {
    +205            **parser.Parser.STATEMENT_PARSERS,
    +206            TokenType.SHOW: lambda self: self._parse_show(),
    +207        }
    +208
    +209        SHOW_PARSERS = {
    +210            "BINARY LOGS": _show_parser("BINARY LOGS"),
    +211            "MASTER LOGS": _show_parser("BINARY LOGS"),
    +212            "BINLOG EVENTS": _show_parser("BINLOG EVENTS"),
    +213            "CHARACTER SET": _show_parser("CHARACTER SET"),
    +214            "CHARSET": _show_parser("CHARACTER SET"),
    +215            "COLLATION": _show_parser("COLLATION"),
    +216            "FULL COLUMNS": _show_parser("COLUMNS", target="FROM", full=True),
    +217            "COLUMNS": _show_parser("COLUMNS", target="FROM"),
    +218            "CREATE DATABASE": _show_parser("CREATE DATABASE", target=True),
    +219            "CREATE EVENT": _show_parser("CREATE EVENT", target=True),
    +220            "CREATE FUNCTION": _show_parser("CREATE FUNCTION", target=True),
    +221            "CREATE PROCEDURE": _show_parser("CREATE PROCEDURE", target=True),
    +222            "CREATE TABLE": _show_parser("CREATE TABLE", target=True),
    +223            "CREATE TRIGGER": _show_parser("CREATE TRIGGER", target=True),
    +224            "CREATE VIEW": _show_parser("CREATE VIEW", target=True),
    +225            "DATABASES": _show_parser("DATABASES"),
    +226            "ENGINE": _show_parser("ENGINE", target=True),
    +227            "STORAGE ENGINES": _show_parser("ENGINES"),
    +228            "ENGINES": _show_parser("ENGINES"),
    +229            "ERRORS": _show_parser("ERRORS"),
    +230            "EVENTS": _show_parser("EVENTS"),
    +231            "FUNCTION CODE": _show_parser("FUNCTION CODE", target=True),
    +232            "FUNCTION STATUS": _show_parser("FUNCTION STATUS"),
    +233            "GRANTS": _show_parser("GRANTS", target="FOR"),
    +234            "INDEX": _show_parser("INDEX", target="FROM"),
    +235            "MASTER STATUS": _show_parser("MASTER STATUS"),
    +236            "OPEN TABLES": _show_parser("OPEN TABLES"),
    +237            "PLUGINS": _show_parser("PLUGINS"),
    +238            "PROCEDURE CODE": _show_parser("PROCEDURE CODE", target=True),
    +239            "PROCEDURE STATUS": _show_parser("PROCEDURE STATUS"),
    +240            "PRIVILEGES": _show_parser("PRIVILEGES"),
    +241            "FULL PROCESSLIST": _show_parser("PROCESSLIST", full=True),
    +242            "PROCESSLIST": _show_parser("PROCESSLIST"),
    +243            "PROFILE": _show_parser("PROFILE"),
    +244            "PROFILES": _show_parser("PROFILES"),
    +245            "RELAYLOG EVENTS": _show_parser("RELAYLOG EVENTS"),
    +246            "REPLICAS": _show_parser("REPLICAS"),
    +247            "SLAVE HOSTS": _show_parser("REPLICAS"),
    +248            "REPLICA STATUS": _show_parser("REPLICA STATUS"),
    +249            "SLAVE STATUS": _show_parser("REPLICA STATUS"),
    +250            "GLOBAL STATUS": _show_parser("STATUS", global_=True),
    +251            "SESSION STATUS": _show_parser("STATUS"),
    +252            "STATUS": _show_parser("STATUS"),
    +253            "TABLE STATUS": _show_parser("TABLE STATUS"),
    +254            "FULL TABLES": _show_parser("TABLES", full=True),
    +255            "TABLES": _show_parser("TABLES"),
    +256            "TRIGGERS": _show_parser("TRIGGERS"),
    +257            "GLOBAL VARIABLES": _show_parser("VARIABLES", global_=True),
    +258            "SESSION VARIABLES": _show_parser("VARIABLES"),
    +259            "VARIABLES": _show_parser("VARIABLES"),
    +260            "WARNINGS": _show_parser("WARNINGS"),
    +261        }
    +262
    +263        SET_PARSERS = {
    +264            **parser.Parser.SET_PARSERS,
    +265            "PERSIST": lambda self: self._parse_set_item_assignment("PERSIST"),
    +266            "PERSIST_ONLY": lambda self: self._parse_set_item_assignment("PERSIST_ONLY"),
    +267            "CHARACTER SET": lambda self: self._parse_set_item_charset("CHARACTER SET"),
    +268            "CHARSET": lambda self: self._parse_set_item_charset("CHARACTER SET"),
    +269            "NAMES": lambda self: self._parse_set_item_names(),
    +270        }
    +271
    +272        PROFILE_TYPES = {
    +273            "ALL",
    +274            "BLOCK IO",
    +275            "CONTEXT SWITCHES",
    +276            "CPU",
    +277            "IPC",
    +278            "MEMORY",
    +279            "PAGE FAULTS",
    +280            "SOURCE",
    +281            "SWAPS",
    +282        }
    +283
    +284        TYPE_TOKENS = {
    +285            *parser.Parser.TYPE_TOKENS,
    +286            TokenType.SET,
    +287        }
    +288
    +289        ENUM_TYPE_TOKENS = {
    +290            *parser.Parser.ENUM_TYPE_TOKENS,
    +291            TokenType.SET,
    +292        }
    +293
    +294        LOG_DEFAULTS_TO_LN = True
    +295
    +296        def _parse_show_mysql(
    +297            self,
    +298            this: str,
    +299            target: bool | str = False,
    +300            full: t.Optional[bool] = None,
    +301            global_: t.Optional[bool] = None,
    +302        ) -> exp.Show:
    +303            if target:
    +304                if isinstance(target, str):
    +305                    self._match_text_seq(target)
    +306                target_id = self._parse_id_var()
    +307            else:
    +308                target_id = None
    +309
    +310            log = self._parse_string() if self._match_text_seq("IN") else None
    +311
    +312            if this in {"BINLOG EVENTS", "RELAYLOG EVENTS"}:
    +313                position = self._parse_number() if self._match_text_seq("FROM") else None
    +314                db = None
    +315            else:
    +316                position = None
    +317                db = None
     318
    -319            if this == "PROFILE":
    -320                types = self._parse_csv(lambda: self._parse_var_from_options(self.PROFILE_TYPES))
    -321                query = self._parse_number() if self._match_text_seq("FOR", "QUERY") else None
    -322                offset = self._parse_number() if self._match_text_seq("OFFSET") else None
    -323                limit = self._parse_number() if self._match_text_seq("LIMIT") else None
    -324            else:
    -325                types, query = None, None
    -326                offset, limit = self._parse_oldstyle_limit()
    -327
    -328            mutex = True if self._match_text_seq("MUTEX") else None
    -329            mutex = False if self._match_text_seq("STATUS") else mutex
    -330
    -331            return self.expression(
    -332                exp.Show,
    -333                this=this,
    -334                target=target_id,
    -335                full=full,
    -336                log=log,
    -337                position=position,
    -338                db=db,
    -339                channel=channel,
    -340                like=like,
    -341                where=where,
    -342                types=types,
    -343                query=query,
    -344                offset=offset,
    -345                limit=limit,
    -346                mutex=mutex,
    -347                **{"global": global_},  # type: ignore
    -348            )
    -349
    -350        def _parse_oldstyle_limit(
    -351            self,
    -352        ) -> t.Tuple[t.Optional[exp.Expression], t.Optional[exp.Expression]]:
    -353            limit = None
    -354            offset = None
    -355            if self._match_text_seq("LIMIT"):
    -356                parts = self._parse_csv(self._parse_number)
    -357                if len(parts) == 1:
    -358                    limit = parts[0]
    -359                elif len(parts) == 2:
    -360                    limit = parts[1]
    -361                    offset = parts[0]
    -362
    -363            return offset, limit
    -364
    -365        def _parse_set_item_charset(self, kind: str) -> exp.Expression:
    -366            this = self._parse_string() or self._parse_id_var()
    -367            return self.expression(exp.SetItem, this=this, kind=kind)
    -368
    -369        def _parse_set_item_names(self) -> exp.Expression:
    -370            charset = self._parse_string() or self._parse_id_var()
    -371            if self._match_text_seq("COLLATE"):
    -372                collate = self._parse_string() or self._parse_id_var()
    -373            else:
    -374                collate = None
    +319                if self._match(TokenType.FROM):
    +320                    db = self._parse_id_var()
    +321                elif self._match(TokenType.DOT):
    +322                    db = target_id
    +323                    target_id = self._parse_id_var()
    +324
    +325            channel = self._parse_id_var() if self._match_text_seq("FOR", "CHANNEL") else None
    +326
    +327            like = self._parse_string() if self._match_text_seq("LIKE") else None
    +328            where = self._parse_where()
    +329
    +330            if this == "PROFILE":
    +331                types = self._parse_csv(lambda: self._parse_var_from_options(self.PROFILE_TYPES))
    +332                query = self._parse_number() if self._match_text_seq("FOR", "QUERY") else None
    +333                offset = self._parse_number() if self._match_text_seq("OFFSET") else None
    +334                limit = self._parse_number() if self._match_text_seq("LIMIT") else None
    +335            else:
    +336                types, query = None, None
    +337                offset, limit = self._parse_oldstyle_limit()
    +338
    +339            mutex = True if self._match_text_seq("MUTEX") else None
    +340            mutex = False if self._match_text_seq("STATUS") else mutex
    +341
    +342            return self.expression(
    +343                exp.Show,
    +344                this=this,
    +345                target=target_id,
    +346                full=full,
    +347                log=log,
    +348                position=position,
    +349                db=db,
    +350                channel=channel,
    +351                like=like,
    +352                where=where,
    +353                types=types,
    +354                query=query,
    +355                offset=offset,
    +356                limit=limit,
    +357                mutex=mutex,
    +358                **{"global": global_},  # type: ignore
    +359            )
    +360
    +361        def _parse_oldstyle_limit(
    +362            self,
    +363        ) -> t.Tuple[t.Optional[exp.Expression], t.Optional[exp.Expression]]:
    +364            limit = None
    +365            offset = None
    +366            if self._match_text_seq("LIMIT"):
    +367                parts = self._parse_csv(self._parse_number)
    +368                if len(parts) == 1:
    +369                    limit = parts[0]
    +370                elif len(parts) == 2:
    +371                    limit = parts[1]
    +372                    offset = parts[0]
    +373
    +374            return offset, limit
     375
    -376            return self.expression(
    -377                exp.SetItem,
    -378                this=charset,
    -379                collate=collate,
    -380                kind="NAMES",
    -381            )
    +376        def _parse_set_item_charset(self, kind: str) -> exp.Expression:
    +377            this = self._parse_string() or self._parse_id_var()
    +378            return self.expression(exp.SetItem, this=this, kind=kind)
    +379
    +380        def _parse_set_item_names(self) -> exp.Expression:
    +381            charset = self._parse_string() or self._parse_id_var()
    +382            if self._match_text_seq("COLLATE"):
    +383                collate = self._parse_string() or self._parse_id_var()
    +384            else:
    +385                collate = None
    +386
    +387            return self.expression(exp.SetItem, this=charset, collate=collate, kind="NAMES")
     
    -

    Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces -a parsed syntax tree.

    +

    Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

    Arguments:
      -
    • error_level: the desired error level. +
    • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
    • -
    • error_message_context: determines the amount of context to capture from a +
    • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). -Default: 50.
    • -
    • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. -Default: 0
    • -
    • alias_post_tablesample: If the table alias comes after tablesample. -Default: False
    • +Default: 100
    • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
    • -
    • null_ordering: Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    @@ -1352,156 +1359,139 @@ Default: "nulls_are_small"
    -
    383    class Generator(generator.Generator):
    -384        LOCKING_READS_SUPPORTED = True
    -385        NULL_ORDERING_SUPPORTED = False
    -386        JOIN_HINTS = False
    -387        TABLE_HINTS = False
    -388
    -389        TRANSFORMS = {
    -390            **generator.Generator.TRANSFORMS,
    -391            exp.CurrentDate: no_paren_current_date_sql,
    -392            exp.DateDiff: lambda self, e: self.func("DATEDIFF", e.this, e.expression),
    -393            exp.DateAdd: _date_add_sql("ADD"),
    -394            exp.DateStrToDate: datestrtodate_sql,
    -395            exp.DateSub: _date_add_sql("SUB"),
    -396            exp.DateTrunc: _date_trunc_sql,
    -397            exp.DayOfMonth: rename_func("DAYOFMONTH"),
    -398            exp.DayOfWeek: rename_func("DAYOFWEEK"),
    -399            exp.DayOfYear: rename_func("DAYOFYEAR"),
    -400            exp.GroupConcat: lambda self, e: f"""GROUP_CONCAT({self.sql(e, "this")} SEPARATOR {self.sql(e, "separator") or "','"})""",
    -401            exp.ILike: no_ilike_sql,
    -402            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
    -403            exp.Max: max_or_greatest,
    -404            exp.Min: min_or_least,
    -405            exp.NullSafeEQ: lambda self, e: self.binary(e, "<=>"),
    -406            exp.NullSafeNEQ: lambda self, e: self.not_sql(self.binary(e, "<=>")),
    -407            exp.Pivot: no_pivot_sql,
    -408            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    -409            exp.StrPosition: strposition_to_locate_sql,
    -410            exp.StrToDate: _str_to_date_sql,
    -411            exp.StrToTime: _str_to_date_sql,
    -412            exp.TableSample: no_tablesample_sql,
    -413            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
    -414            exp.TimeToStr: lambda self, e: self.func("DATE_FORMAT", e.this, self.format_time(e)),
    -415            exp.Trim: _trim_sql,
    -416            exp.TryCast: no_trycast_sql,
    -417            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
    -418        }
    -419
    -420        TYPE_MAPPING = generator.Generator.TYPE_MAPPING.copy()
    -421        TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMTEXT)
    -422        TYPE_MAPPING.pop(exp.DataType.Type.LONGTEXT)
    -423        TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMBLOB)
    -424        TYPE_MAPPING.pop(exp.DataType.Type.LONGBLOB)
    +            
    389    class Generator(generator.Generator):
    +390        LOCKING_READS_SUPPORTED = True
    +391        NULL_ORDERING_SUPPORTED = False
    +392        JOIN_HINTS = False
    +393        TABLE_HINTS = False
    +394
    +395        TRANSFORMS = {
    +396            **generator.Generator.TRANSFORMS,
    +397            exp.CurrentDate: no_paren_current_date_sql,
    +398            exp.DateDiff: lambda self, e: self.func("DATEDIFF", e.this, e.expression),
    +399            exp.DateAdd: _date_add_sql("ADD"),
    +400            exp.DateStrToDate: datestrtodate_sql,
    +401            exp.DateSub: _date_add_sql("SUB"),
    +402            exp.DateTrunc: _date_trunc_sql,
    +403            exp.DayOfMonth: rename_func("DAYOFMONTH"),
    +404            exp.DayOfWeek: rename_func("DAYOFWEEK"),
    +405            exp.DayOfYear: rename_func("DAYOFYEAR"),
    +406            exp.GroupConcat: lambda self, e: f"""GROUP_CONCAT({self.sql(e, "this")} SEPARATOR {self.sql(e, "separator") or "','"})""",
    +407            exp.ILike: no_ilike_sql,
    +408            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
    +409            exp.Max: max_or_greatest,
    +410            exp.Min: min_or_least,
    +411            exp.NullSafeEQ: lambda self, e: self.binary(e, "<=>"),
    +412            exp.NullSafeNEQ: lambda self, e: self.not_sql(self.binary(e, "<=>")),
    +413            exp.Pivot: no_pivot_sql,
    +414            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    +415            exp.StrPosition: strposition_to_locate_sql,
    +416            exp.StrToDate: _str_to_date_sql,
    +417            exp.StrToTime: _str_to_date_sql,
    +418            exp.TableSample: no_tablesample_sql,
    +419            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
    +420            exp.TimeToStr: lambda self, e: self.func("DATE_FORMAT", e.this, self.format_time(e)),
    +421            exp.Trim: _trim_sql,
    +422            exp.TryCast: no_trycast_sql,
    +423            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
    +424        }
     425
    -426        PROPERTIES_LOCATION = {
    -427            **generator.Generator.PROPERTIES_LOCATION,
    -428            exp.TransientProperty: exp.Properties.Location.UNSUPPORTED,
    -429            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    -430        }
    +426        TYPE_MAPPING = generator.Generator.TYPE_MAPPING.copy()
    +427        TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMTEXT)
    +428        TYPE_MAPPING.pop(exp.DataType.Type.LONGTEXT)
    +429        TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMBLOB)
    +430        TYPE_MAPPING.pop(exp.DataType.Type.LONGBLOB)
     431
    -432        LIMIT_FETCH = "LIMIT"
    -433
    -434        def show_sql(self, expression: exp.Show) -> str:
    -435            this = f" {expression.name}"
    -436            full = " FULL" if expression.args.get("full") else ""
    -437            global_ = " GLOBAL" if expression.args.get("global") else ""
    -438
    -439            target = self.sql(expression, "target")
    -440            target = f" {target}" if target else ""
    -441            if expression.name in {"COLUMNS", "INDEX"}:
    -442                target = f" FROM{target}"
    -443            elif expression.name == "GRANTS":
    -444                target = f" FOR{target}"
    -445
    -446            db = self._prefixed_sql("FROM", expression, "db")
    -447
    -448            like = self._prefixed_sql("LIKE", expression, "like")
    -449            where = self.sql(expression, "where")
    -450
    -451            types = self.expressions(expression, key="types")
    -452            types = f" {types}" if types else types
    -453            query = self._prefixed_sql("FOR QUERY", expression, "query")
    -454
    -455            if expression.name == "PROFILE":
    -456                offset = self._prefixed_sql("OFFSET", expression, "offset")
    -457                limit = self._prefixed_sql("LIMIT", expression, "limit")
    -458            else:
    -459                offset = ""
    -460                limit = self._oldstyle_limit_sql(expression)
    -461
    -462            log = self._prefixed_sql("IN", expression, "log")
    -463            position = self._prefixed_sql("FROM", expression, "position")
    -464
    -465            channel = self._prefixed_sql("FOR CHANNEL", expression, "channel")
    -466
    -467            if expression.name == "ENGINE":
    -468                mutex_or_status = " MUTEX" if expression.args.get("mutex") else " STATUS"
    -469            else:
    -470                mutex_or_status = ""
    -471
    -472            return f"SHOW{full}{global_}{this}{target}{types}{db}{query}{log}{position}{channel}{mutex_or_status}{like}{where}{offset}{limit}"
    -473
    -474        def _prefixed_sql(self, prefix: str, expression: exp.Expression, arg: str) -> str:
    -475            sql = self.sql(expression, arg)
    -476            if not sql:
    -477                return ""
    -478            return f" {prefix} {sql}"
    +432        PROPERTIES_LOCATION = {
    +433            **generator.Generator.PROPERTIES_LOCATION,
    +434            exp.TransientProperty: exp.Properties.Location.UNSUPPORTED,
    +435            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +436        }
    +437
    +438        LIMIT_FETCH = "LIMIT"
    +439
    +440        def show_sql(self, expression: exp.Show) -> str:
    +441            this = f" {expression.name}"
    +442            full = " FULL" if expression.args.get("full") else ""
    +443            global_ = " GLOBAL" if expression.args.get("global") else ""
    +444
    +445            target = self.sql(expression, "target")
    +446            target = f" {target}" if target else ""
    +447            if expression.name in {"COLUMNS", "INDEX"}:
    +448                target = f" FROM{target}"
    +449            elif expression.name == "GRANTS":
    +450                target = f" FOR{target}"
    +451
    +452            db = self._prefixed_sql("FROM", expression, "db")
    +453
    +454            like = self._prefixed_sql("LIKE", expression, "like")
    +455            where = self.sql(expression, "where")
    +456
    +457            types = self.expressions(expression, key="types")
    +458            types = f" {types}" if types else types
    +459            query = self._prefixed_sql("FOR QUERY", expression, "query")
    +460
    +461            if expression.name == "PROFILE":
    +462                offset = self._prefixed_sql("OFFSET", expression, "offset")
    +463                limit = self._prefixed_sql("LIMIT", expression, "limit")
    +464            else:
    +465                offset = ""
    +466                limit = self._oldstyle_limit_sql(expression)
    +467
    +468            log = self._prefixed_sql("IN", expression, "log")
    +469            position = self._prefixed_sql("FROM", expression, "position")
    +470
    +471            channel = self._prefixed_sql("FOR CHANNEL", expression, "channel")
    +472
    +473            if expression.name == "ENGINE":
    +474                mutex_or_status = " MUTEX" if expression.args.get("mutex") else " STATUS"
    +475            else:
    +476                mutex_or_status = ""
    +477
    +478            return f"SHOW{full}{global_}{this}{target}{types}{db}{query}{log}{position}{channel}{mutex_or_status}{like}{where}{offset}{limit}"
     479
    -480        def _oldstyle_limit_sql(self, expression: exp.Show) -> str:
    -481            limit = self.sql(expression, "limit")
    -482            offset = self.sql(expression, "offset")
    -483            if limit:
    -484                limit_offset = f"{offset}, {limit}" if offset else limit
    -485                return f" LIMIT {limit_offset}"
    -486            return ""
    +480        def _prefixed_sql(self, prefix: str, expression: exp.Expression, arg: str) -> str:
    +481            sql = self.sql(expression, arg)
    +482            return f" {prefix} {sql}" if sql else ""
    +483
    +484        def _oldstyle_limit_sql(self, expression: exp.Show) -> str:
    +485            limit = self.sql(expression, "limit")
    +486            offset = self.sql(expression, "offset")
    +487            if limit:
    +488                limit_offset = f"{offset}, {limit}" if offset else limit
    +489                return f" LIMIT {limit_offset}"
    +490            return ""
     
    -

    Generator interprets the given syntax tree and produces a SQL string as an output.

    +

    Generator converts a given syntax tree to the corresponding SQL string.

    Arguments:
      -
    • time_mapping (dict): the dictionary of custom time mappings in which the key -represents a python time format and the output the target time format
    • -
    • time_trie (trie): a trie of the time_mapping keys
    • -
    • pretty (bool): if set to True the returned string will be formatted. Default: False.
    • -
    • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
    • -
    • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
    • -
    • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
    • -
    • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
    • -
    • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
    • -
    • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
    • -
    • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
    • -
    • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
    • -
    • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
    • -
    • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
    • -
    • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
    • -
    • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
    • -
    • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
    • -
    • normalize (bool): if set to True all identifiers will lower cased
    • -
    • string_escape (str): specifies a string escape character. Default: '.
    • -
    • identifier_escape (str): specifies an identifier escape character. Default: ".
    • -
    • pad (int): determines padding in a formatted string. Default: 2.
    • -
    • indent (int): determines the size of indentation in a formatted string. Default: 4.
    • -
    • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
    • -
    • normalize_functions (str): normalize function names, "upper", "lower", or None -Default: "upper"
    • -
    • alias_post_tablesample (bool): if the table alias comes after tablesample -Default: False
    • -
    • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit -Default: False
    • -
    • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters -unsupported expressions. Default ErrorLevel.WARN.
    • -
    • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    • -
    • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +
    • pretty: Whether or not to format the produced SQL string. +Default: False.
    • +
    • identify: Determines when an identifier should be quoted. Possible values are: +False (default): Never quote, except in cases where it's mandatory by the dialect. +True or 'always': Always quote. +'safe': Only quote identifiers that are case insensitive.
    • +
    • normalize: Whether or not to normalize identifiers to lowercase. +Default: False.
    • +
    • pad: Determines the pad size in a formatted string. +Default: 2.
    • +
    • indent: Determines the indentation size in a formatted string. +Default: 2.
    • +
    • normalize_functions: Whether or not to normalize all function names. Possible values are: +"upper" or True (default): Convert names to uppercase. +"lower": Convert names to lowercase. +False: Disables function name normalization.
    • +
    • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. +Default ErrorLevel.WARN.
    • +
    • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
    • -
    • leading_comma (bool): if the the comma is leading or trailing in select statements +
    • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. +This is only relevant when generating in pretty mode. Default: False
    • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true @@ -1524,45 +1514,45 @@ Default: True
    -
    434        def show_sql(self, expression: exp.Show) -> str:
    -435            this = f" {expression.name}"
    -436            full = " FULL" if expression.args.get("full") else ""
    -437            global_ = " GLOBAL" if expression.args.get("global") else ""
    -438
    -439            target = self.sql(expression, "target")
    -440            target = f" {target}" if target else ""
    -441            if expression.name in {"COLUMNS", "INDEX"}:
    -442                target = f" FROM{target}"
    -443            elif expression.name == "GRANTS":
    -444                target = f" FOR{target}"
    -445
    -446            db = self._prefixed_sql("FROM", expression, "db")
    -447
    -448            like = self._prefixed_sql("LIKE", expression, "like")
    -449            where = self.sql(expression, "where")
    -450
    -451            types = self.expressions(expression, key="types")
    -452            types = f" {types}" if types else types
    -453            query = self._prefixed_sql("FOR QUERY", expression, "query")
    -454
    -455            if expression.name == "PROFILE":
    -456                offset = self._prefixed_sql("OFFSET", expression, "offset")
    -457                limit = self._prefixed_sql("LIMIT", expression, "limit")
    -458            else:
    -459                offset = ""
    -460                limit = self._oldstyle_limit_sql(expression)
    -461
    -462            log = self._prefixed_sql("IN", expression, "log")
    -463            position = self._prefixed_sql("FROM", expression, "position")
    -464
    -465            channel = self._prefixed_sql("FOR CHANNEL", expression, "channel")
    -466
    -467            if expression.name == "ENGINE":
    -468                mutex_or_status = " MUTEX" if expression.args.get("mutex") else " STATUS"
    -469            else:
    -470                mutex_or_status = ""
    -471
    -472            return f"SHOW{full}{global_}{this}{target}{types}{db}{query}{log}{position}{channel}{mutex_or_status}{like}{where}{offset}{limit}"
    +            
    440        def show_sql(self, expression: exp.Show) -> str:
    +441            this = f" {expression.name}"
    +442            full = " FULL" if expression.args.get("full") else ""
    +443            global_ = " GLOBAL" if expression.args.get("global") else ""
    +444
    +445            target = self.sql(expression, "target")
    +446            target = f" {target}" if target else ""
    +447            if expression.name in {"COLUMNS", "INDEX"}:
    +448                target = f" FROM{target}"
    +449            elif expression.name == "GRANTS":
    +450                target = f" FOR{target}"
    +451
    +452            db = self._prefixed_sql("FROM", expression, "db")
    +453
    +454            like = self._prefixed_sql("LIKE", expression, "like")
    +455            where = self.sql(expression, "where")
    +456
    +457            types = self.expressions(expression, key="types")
    +458            types = f" {types}" if types else types
    +459            query = self._prefixed_sql("FOR QUERY", expression, "query")
    +460
    +461            if expression.name == "PROFILE":
    +462                offset = self._prefixed_sql("OFFSET", expression, "offset")
    +463                limit = self._prefixed_sql("LIMIT", expression, "limit")
    +464            else:
    +465                offset = ""
    +466                limit = self._oldstyle_limit_sql(expression)
    +467
    +468            log = self._prefixed_sql("IN", expression, "log")
    +469            position = self._prefixed_sql("FROM", expression, "position")
    +470
    +471            channel = self._prefixed_sql("FOR CHANNEL", expression, "channel")
    +472
    +473            if expression.name == "ENGINE":
    +474                mutex_or_status = " MUTEX" if expression.args.get("mutex") else " STATUS"
    +475            else:
    +476                mutex_or_status = ""
    +477
    +478            return f"SHOW{full}{global_}{this}{target}{types}{db}{query}{log}{position}{channel}{mutex_or_status}{like}{where}{offset}{limit}"
     
    @@ -1598,6 +1588,7 @@ Default: True
    notnullcolumnconstraint_sql
    primarykeycolumnconstraint_sql
    uniquecolumnconstraint_sql
    +
    createable_sql
    create_sql
    clone_sql
    describe_sql
    @@ -1680,10 +1671,12 @@ Default: True
    ordered_sql
    matchrecognize_sql
    query_modifiers
    +
    offset_limit_modifiers
    after_having_modifiers
    after_limit_modifiers
    select_sql
    schema_sql
    +
    schema_columns_sql
    star_sql
    parameter_sql
    sessionparameter_sql
    @@ -1708,7 +1701,7 @@ Default: True
    nextvaluefor_sql
    extract_sql
    trim_sql
    -
    concat_sql
    +
    safeconcat_sql
    check_sql
    foreignkey_sql
    primarykey_sql
    @@ -1759,6 +1752,7 @@ Default: True
    respectnulls_sql
    intdiv_sql
    dpipe_sql
    +
    safedpipe_sql
    div_sql
    overlaps_sql
    distance_sql
    @@ -1807,6 +1801,7 @@ Default: True
    dictproperty_sql
    dictrange_sql
    dictsubproperty_sql
    +
    oncluster_sql
    diff --git a/docs/sqlglot/dialects/oracle.html b/docs/sqlglot/dialects/oracle.html index 2ef666e..7258af1 100644 --- a/docs/sqlglot/dialects/oracle.html +++ b/docs/sqlglot/dialects/oracle.html @@ -112,167 +112,160 @@ 24 if self._match_text_seq("COLUMNS"): 25 columns = self._parse_csv(lambda: self._parse_column_def(self._parse_field(any_token=True))) 26 - 27 return self.expression( - 28 exp.XMLTable, - 29 this=this, - 30 passing=passing, - 31 columns=columns, - 32 by_ref=by_ref, - 33 ) - 34 - 35 - 36class Oracle(Dialect): - 37 alias_post_tablesample = True - 38 - 39 # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212 - 40 # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes - 41 time_mapping = { - 42 "AM": "%p", # Meridian indicator with or without periods - 43 "A.M.": "%p", # Meridian indicator with or without periods - 44 "PM": "%p", # Meridian indicator with or without periods - 45 "P.M.": "%p", # Meridian indicator with or without periods - 46 "D": "%u", # Day of week (1-7) - 47 "DAY": "%A", # name of day - 48 "DD": "%d", # day of month (1-31) - 49 "DDD": "%j", # day of year (1-366) - 50 "DY": "%a", # abbreviated name of day - 51 "HH": "%I", # Hour of day (1-12) - 52 "HH12": "%I", # alias for HH - 53 "HH24": "%H", # Hour of day (0-23) - 54 "IW": "%V", # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard - 55 "MI": "%M", # Minute (0-59) - 56 "MM": "%m", # Month (01-12; January = 01) - 57 "MON": "%b", # Abbreviated name of month - 58 "MONTH": "%B", # Name of month - 59 "SS": "%S", # Second (0-59) - 60 "WW": "%W", # Week of year (1-53) - 61 "YY": "%y", # 15 - 62 "YYYY": "%Y", # 2015 - 63 } - 64 - 65 class Parser(parser.Parser): - 66 WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP} - 67 - 68 FUNCTIONS = { - 69 **parser.Parser.FUNCTIONS, - 70 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), - 71 } - 72 - 73 FUNCTION_PARSERS: t.Dict[str, t.Callable] = { - 74 **parser.Parser.FUNCTION_PARSERS, - 75 "XMLTABLE": _parse_xml_table, + 27 return self.expression(exp.XMLTable, this=this, passing=passing, columns=columns, by_ref=by_ref) + 28 + 29 + 30class Oracle(Dialect): + 31 ALIAS_POST_TABLESAMPLE = True + 32 + 33 # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212 + 34 # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes + 35 TIME_MAPPING = { + 36 "AM": "%p", # Meridian indicator with or without periods + 37 "A.M.": "%p", # Meridian indicator with or without periods + 38 "PM": "%p", # Meridian indicator with or without periods + 39 "P.M.": "%p", # Meridian indicator with or without periods + 40 "D": "%u", # Day of week (1-7) + 41 "DAY": "%A", # name of day + 42 "DD": "%d", # day of month (1-31) + 43 "DDD": "%j", # day of year (1-366) + 44 "DY": "%a", # abbreviated name of day + 45 "HH": "%I", # Hour of day (1-12) + 46 "HH12": "%I", # alias for HH + 47 "HH24": "%H", # Hour of day (0-23) + 48 "IW": "%V", # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard + 49 "MI": "%M", # Minute (0-59) + 50 "MM": "%m", # Month (01-12; January = 01) + 51 "MON": "%b", # Abbreviated name of month + 52 "MONTH": "%B", # Name of month + 53 "SS": "%S", # Second (0-59) + 54 "WW": "%W", # Week of year (1-53) + 55 "YY": "%y", # 15 + 56 "YYYY": "%Y", # 2015 + 57 } + 58 + 59 class Parser(parser.Parser): + 60 WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP} + 61 + 62 FUNCTIONS = { + 63 **parser.Parser.FUNCTIONS, + 64 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), + 65 } + 66 + 67 FUNCTION_PARSERS: t.Dict[str, t.Callable] = { + 68 **parser.Parser.FUNCTION_PARSERS, + 69 "XMLTABLE": _parse_xml_table, + 70 } + 71 + 72 TYPE_LITERAL_PARSERS = { + 73 exp.DataType.Type.DATE: lambda self, this, _: self.expression( + 74 exp.DateStrToDate, this=this + 75 ) 76 } 77 - 78 TYPE_LITERAL_PARSERS = { - 79 exp.DataType.Type.DATE: lambda self, this, _: self.expression( - 80 exp.DateStrToDate, this=this - 81 ) - 82 } + 78 def _parse_column(self) -> t.Optional[exp.Expression]: + 79 column = super()._parse_column() + 80 if column: + 81 column.set("join_mark", self._match(TokenType.JOIN_MARKER)) + 82 return column 83 - 84 def _parse_column(self) -> t.Optional[exp.Expression]: - 85 column = super()._parse_column() - 86 if column: - 87 column.set("join_mark", self._match(TokenType.JOIN_MARKER)) - 88 return column + 84 def _parse_hint(self) -> t.Optional[exp.Hint]: + 85 if self._match(TokenType.HINT): + 86 start = self._curr + 87 while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH): + 88 self._advance() 89 - 90 def _parse_hint(self) -> t.Optional[exp.Expression]: - 91 if self._match(TokenType.HINT): - 92 start = self._curr - 93 while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH): - 94 self._advance() + 90 if not self._curr: + 91 self.raise_error("Expected */ after HINT") + 92 + 93 end = self._tokens[self._index - 3] + 94 return exp.Hint(expressions=[self._find_sql(start, end)]) 95 - 96 if not self._curr: - 97 self.raise_error("Expected */ after HINT") - 98 - 99 end = self._tokens[self._index - 3] -100 return exp.Hint(expressions=[self._find_sql(start, end)]) -101 -102 return None -103 -104 class Generator(generator.Generator): -105 LOCKING_READS_SUPPORTED = True -106 JOIN_HINTS = False -107 TABLE_HINTS = False -108 -109 TYPE_MAPPING = { -110 **generator.Generator.TYPE_MAPPING, -111 exp.DataType.Type.TINYINT: "NUMBER", -112 exp.DataType.Type.SMALLINT: "NUMBER", -113 exp.DataType.Type.INT: "NUMBER", -114 exp.DataType.Type.BIGINT: "NUMBER", -115 exp.DataType.Type.DECIMAL: "NUMBER", -116 exp.DataType.Type.DOUBLE: "DOUBLE PRECISION", -117 exp.DataType.Type.VARCHAR: "VARCHAR2", -118 exp.DataType.Type.NVARCHAR: "NVARCHAR2", -119 exp.DataType.Type.TEXT: "CLOB", -120 exp.DataType.Type.BINARY: "BLOB", -121 exp.DataType.Type.VARBINARY: "BLOB", -122 } -123 -124 TRANSFORMS = { -125 **generator.Generator.TRANSFORMS, -126 exp.DateStrToDate: lambda self, e: self.func( -127 "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD") -128 ), -129 exp.Group: transforms.preprocess([transforms.unalias_group]), -130 exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */", -131 exp.ILike: no_ilike_sql, -132 exp.IfNull: rename_func("NVL"), -133 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), -134 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", -135 exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "), -136 exp.Substring: rename_func("SUBSTR"), -137 exp.Table: lambda self, e: self.table_sql(e, sep=" "), -138 exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=" "), -139 exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})", -140 exp.ToChar: lambda self, e: self.function_fallback_sql(e), -141 exp.Trim: trim_sql, -142 exp.UnixToTime: lambda self, e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)", -143 } -144 -145 PROPERTIES_LOCATION = { -146 **generator.Generator.PROPERTIES_LOCATION, -147 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, -148 } -149 -150 LIMIT_FETCH = "FETCH" -151 -152 def offset_sql(self, expression: exp.Offset) -> str: -153 return f"{super().offset_sql(expression)} ROWS" -154 -155 def column_sql(self, expression: exp.Column) -> str: -156 column = super().column_sql(expression) -157 return f"{column} (+)" if expression.args.get("join_mark") else column -158 -159 def xmltable_sql(self, expression: exp.XMLTable) -> str: -160 this = self.sql(expression, "this") -161 passing = self.expressions(expression, key="passing") -162 passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else "" -163 columns = self.expressions(expression, key="columns") -164 columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else "" -165 by_ref = ( -166 f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else "" -167 ) -168 return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}" -169 -170 class Tokenizer(tokens.Tokenizer): -171 VAR_SINGLE_TOKENS = {"@"} -172 -173 KEYWORDS = { -174 **tokens.Tokenizer.KEYWORDS, -175 "(+)": TokenType.JOIN_MARKER, -176 "BINARY_DOUBLE": TokenType.DOUBLE, -177 "BINARY_FLOAT": TokenType.FLOAT, -178 "COLUMNS": TokenType.COLUMN, -179 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE, -180 "MINUS": TokenType.EXCEPT, -181 "NVARCHAR2": TokenType.NVARCHAR, -182 "RETURNING": TokenType.RETURNING, -183 "SAMPLE": TokenType.TABLE_SAMPLE, -184 "START": TokenType.BEGIN, -185 "TOP": TokenType.TOP, -186 "VARCHAR2": TokenType.VARCHAR, -187 } + 96 return None + 97 + 98 class Generator(generator.Generator): + 99 LOCKING_READS_SUPPORTED = True +100 JOIN_HINTS = False +101 TABLE_HINTS = False +102 +103 TYPE_MAPPING = { +104 **generator.Generator.TYPE_MAPPING, +105 exp.DataType.Type.TINYINT: "NUMBER", +106 exp.DataType.Type.SMALLINT: "NUMBER", +107 exp.DataType.Type.INT: "NUMBER", +108 exp.DataType.Type.BIGINT: "NUMBER", +109 exp.DataType.Type.DECIMAL: "NUMBER", +110 exp.DataType.Type.DOUBLE: "DOUBLE PRECISION", +111 exp.DataType.Type.VARCHAR: "VARCHAR2", +112 exp.DataType.Type.NVARCHAR: "NVARCHAR2", +113 exp.DataType.Type.TEXT: "CLOB", +114 exp.DataType.Type.BINARY: "BLOB", +115 exp.DataType.Type.VARBINARY: "BLOB", +116 } +117 +118 TRANSFORMS = { +119 **generator.Generator.TRANSFORMS, +120 exp.DateStrToDate: lambda self, e: self.func( +121 "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD") +122 ), +123 exp.Group: transforms.preprocess([transforms.unalias_group]), +124 exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */", +125 exp.ILike: no_ilike_sql, +126 exp.Coalesce: rename_func("NVL"), +127 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), +128 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", +129 exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "), +130 exp.Substring: rename_func("SUBSTR"), +131 exp.Table: lambda self, e: self.table_sql(e, sep=" "), +132 exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=" "), +133 exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})", +134 exp.ToChar: lambda self, e: self.function_fallback_sql(e), +135 exp.Trim: trim_sql, +136 exp.UnixToTime: lambda self, e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)", +137 } +138 +139 PROPERTIES_LOCATION = { +140 **generator.Generator.PROPERTIES_LOCATION, +141 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, +142 } +143 +144 LIMIT_FETCH = "FETCH" +145 +146 def offset_sql(self, expression: exp.Offset) -> str: +147 return f"{super().offset_sql(expression)} ROWS" +148 +149 def column_sql(self, expression: exp.Column) -> str: +150 column = super().column_sql(expression) +151 return f"{column} (+)" if expression.args.get("join_mark") else column +152 +153 def xmltable_sql(self, expression: exp.XMLTable) -> str: +154 this = self.sql(expression, "this") +155 passing = self.expressions(expression, key="passing") +156 passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else "" +157 columns = self.expressions(expression, key="columns") +158 columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else "" +159 by_ref = ( +160 f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else "" +161 ) +162 return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}" +163 +164 class Tokenizer(tokens.Tokenizer): +165 VAR_SINGLE_TOKENS = {"@"} +166 +167 KEYWORDS = { +168 **tokens.Tokenizer.KEYWORDS, +169 "(+)": TokenType.JOIN_MARKER, +170 "BINARY_DOUBLE": TokenType.DOUBLE, +171 "BINARY_FLOAT": TokenType.FLOAT, +172 "COLUMNS": TokenType.COLUMN, +173 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE, +174 "MINUS": TokenType.EXCEPT, +175 "NVARCHAR2": TokenType.NVARCHAR, +176 "SAMPLE": TokenType.TABLE_SAMPLE, +177 "START": TokenType.BEGIN, +178 "TOP": TokenType.TOP, +179 "VARCHAR2": TokenType.VARCHAR, +180 }
    @@ -288,158 +281,157 @@
    -
     37class Oracle(Dialect):
    - 38    alias_post_tablesample = True
    - 39
    - 40    # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212
    - 41    # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
    - 42    time_mapping = {
    - 43        "AM": "%p",  # Meridian indicator with or without periods
    - 44        "A.M.": "%p",  # Meridian indicator with or without periods
    - 45        "PM": "%p",  # Meridian indicator with or without periods
    - 46        "P.M.": "%p",  # Meridian indicator with or without periods
    - 47        "D": "%u",  # Day of week (1-7)
    - 48        "DAY": "%A",  # name of day
    - 49        "DD": "%d",  # day of month (1-31)
    - 50        "DDD": "%j",  # day of year (1-366)
    - 51        "DY": "%a",  # abbreviated name of day
    - 52        "HH": "%I",  # Hour of day (1-12)
    - 53        "HH12": "%I",  # alias for HH
    - 54        "HH24": "%H",  # Hour of day (0-23)
    - 55        "IW": "%V",  # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard
    - 56        "MI": "%M",  # Minute (0-59)
    - 57        "MM": "%m",  # Month (01-12; January = 01)
    - 58        "MON": "%b",  # Abbreviated name of month
    - 59        "MONTH": "%B",  # Name of month
    - 60        "SS": "%S",  # Second (0-59)
    - 61        "WW": "%W",  # Week of year (1-53)
    - 62        "YY": "%y",  # 15
    - 63        "YYYY": "%Y",  # 2015
    - 64    }
    - 65
    - 66    class Parser(parser.Parser):
    - 67        WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP}
    - 68
    - 69        FUNCTIONS = {
    - 70            **parser.Parser.FUNCTIONS,
    - 71            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
    - 72        }
    - 73
    - 74        FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
    - 75            **parser.Parser.FUNCTION_PARSERS,
    - 76            "XMLTABLE": _parse_xml_table,
    +            
     31class Oracle(Dialect):
    + 32    ALIAS_POST_TABLESAMPLE = True
    + 33
    + 34    # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212
    + 35    # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
    + 36    TIME_MAPPING = {
    + 37        "AM": "%p",  # Meridian indicator with or without periods
    + 38        "A.M.": "%p",  # Meridian indicator with or without periods
    + 39        "PM": "%p",  # Meridian indicator with or without periods
    + 40        "P.M.": "%p",  # Meridian indicator with or without periods
    + 41        "D": "%u",  # Day of week (1-7)
    + 42        "DAY": "%A",  # name of day
    + 43        "DD": "%d",  # day of month (1-31)
    + 44        "DDD": "%j",  # day of year (1-366)
    + 45        "DY": "%a",  # abbreviated name of day
    + 46        "HH": "%I",  # Hour of day (1-12)
    + 47        "HH12": "%I",  # alias for HH
    + 48        "HH24": "%H",  # Hour of day (0-23)
    + 49        "IW": "%V",  # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard
    + 50        "MI": "%M",  # Minute (0-59)
    + 51        "MM": "%m",  # Month (01-12; January = 01)
    + 52        "MON": "%b",  # Abbreviated name of month
    + 53        "MONTH": "%B",  # Name of month
    + 54        "SS": "%S",  # Second (0-59)
    + 55        "WW": "%W",  # Week of year (1-53)
    + 56        "YY": "%y",  # 15
    + 57        "YYYY": "%Y",  # 2015
    + 58    }
    + 59
    + 60    class Parser(parser.Parser):
    + 61        WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP}
    + 62
    + 63        FUNCTIONS = {
    + 64            **parser.Parser.FUNCTIONS,
    + 65            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
    + 66        }
    + 67
    + 68        FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
    + 69            **parser.Parser.FUNCTION_PARSERS,
    + 70            "XMLTABLE": _parse_xml_table,
    + 71        }
    + 72
    + 73        TYPE_LITERAL_PARSERS = {
    + 74            exp.DataType.Type.DATE: lambda self, this, _: self.expression(
    + 75                exp.DateStrToDate, this=this
    + 76            )
      77        }
      78
    - 79        TYPE_LITERAL_PARSERS = {
    - 80            exp.DataType.Type.DATE: lambda self, this, _: self.expression(
    - 81                exp.DateStrToDate, this=this
    - 82            )
    - 83        }
    + 79        def _parse_column(self) -> t.Optional[exp.Expression]:
    + 80            column = super()._parse_column()
    + 81            if column:
    + 82                column.set("join_mark", self._match(TokenType.JOIN_MARKER))
    + 83            return column
      84
    - 85        def _parse_column(self) -> t.Optional[exp.Expression]:
    - 86            column = super()._parse_column()
    - 87            if column:
    - 88                column.set("join_mark", self._match(TokenType.JOIN_MARKER))
    - 89            return column
    + 85        def _parse_hint(self) -> t.Optional[exp.Hint]:
    + 86            if self._match(TokenType.HINT):
    + 87                start = self._curr
    + 88                while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):
    + 89                    self._advance()
      90
    - 91        def _parse_hint(self) -> t.Optional[exp.Expression]:
    - 92            if self._match(TokenType.HINT):
    - 93                start = self._curr
    - 94                while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):
    - 95                    self._advance()
    + 91                if not self._curr:
    + 92                    self.raise_error("Expected */ after HINT")
    + 93
    + 94                end = self._tokens[self._index - 3]
    + 95                return exp.Hint(expressions=[self._find_sql(start, end)])
      96
    - 97                if not self._curr:
    - 98                    self.raise_error("Expected */ after HINT")
    - 99
    -100                end = self._tokens[self._index - 3]
    -101                return exp.Hint(expressions=[self._find_sql(start, end)])
    -102
    -103            return None
    -104
    -105    class Generator(generator.Generator):
    -106        LOCKING_READS_SUPPORTED = True
    -107        JOIN_HINTS = False
    -108        TABLE_HINTS = False
    -109
    -110        TYPE_MAPPING = {
    -111            **generator.Generator.TYPE_MAPPING,
    -112            exp.DataType.Type.TINYINT: "NUMBER",
    -113            exp.DataType.Type.SMALLINT: "NUMBER",
    -114            exp.DataType.Type.INT: "NUMBER",
    -115            exp.DataType.Type.BIGINT: "NUMBER",
    -116            exp.DataType.Type.DECIMAL: "NUMBER",
    -117            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
    -118            exp.DataType.Type.VARCHAR: "VARCHAR2",
    -119            exp.DataType.Type.NVARCHAR: "NVARCHAR2",
    -120            exp.DataType.Type.TEXT: "CLOB",
    -121            exp.DataType.Type.BINARY: "BLOB",
    -122            exp.DataType.Type.VARBINARY: "BLOB",
    -123        }
    -124
    -125        TRANSFORMS = {
    -126            **generator.Generator.TRANSFORMS,
    -127            exp.DateStrToDate: lambda self, e: self.func(
    -128                "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD")
    -129            ),
    -130            exp.Group: transforms.preprocess([transforms.unalias_group]),
    -131            exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */",
    -132            exp.ILike: no_ilike_sql,
    -133            exp.IfNull: rename_func("NVL"),
    -134            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    -135            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
    -136            exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "),
    -137            exp.Substring: rename_func("SUBSTR"),
    -138            exp.Table: lambda self, e: self.table_sql(e, sep=" "),
    -139            exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=" "),
    -140            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
    -141            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    -142            exp.Trim: trim_sql,
    -143            exp.UnixToTime: lambda self, e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)",
    -144        }
    -145
    -146        PROPERTIES_LOCATION = {
    -147            **generator.Generator.PROPERTIES_LOCATION,
    -148            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    -149        }
    -150
    -151        LIMIT_FETCH = "FETCH"
    -152
    -153        def offset_sql(self, expression: exp.Offset) -> str:
    -154            return f"{super().offset_sql(expression)} ROWS"
    -155
    -156        def column_sql(self, expression: exp.Column) -> str:
    -157            column = super().column_sql(expression)
    -158            return f"{column} (+)" if expression.args.get("join_mark") else column
    -159
    -160        def xmltable_sql(self, expression: exp.XMLTable) -> str:
    -161            this = self.sql(expression, "this")
    -162            passing = self.expressions(expression, key="passing")
    -163            passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else ""
    -164            columns = self.expressions(expression, key="columns")
    -165            columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else ""
    -166            by_ref = (
    -167                f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else ""
    -168            )
    -169            return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"
    -170
    -171    class Tokenizer(tokens.Tokenizer):
    -172        VAR_SINGLE_TOKENS = {"@"}
    -173
    -174        KEYWORDS = {
    -175            **tokens.Tokenizer.KEYWORDS,
    -176            "(+)": TokenType.JOIN_MARKER,
    -177            "BINARY_DOUBLE": TokenType.DOUBLE,
    -178            "BINARY_FLOAT": TokenType.FLOAT,
    -179            "COLUMNS": TokenType.COLUMN,
    -180            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
    -181            "MINUS": TokenType.EXCEPT,
    -182            "NVARCHAR2": TokenType.NVARCHAR,
    -183            "RETURNING": TokenType.RETURNING,
    -184            "SAMPLE": TokenType.TABLE_SAMPLE,
    -185            "START": TokenType.BEGIN,
    -186            "TOP": TokenType.TOP,
    -187            "VARCHAR2": TokenType.VARCHAR,
    -188        }
    + 97            return None
    + 98
    + 99    class Generator(generator.Generator):
    +100        LOCKING_READS_SUPPORTED = True
    +101        JOIN_HINTS = False
    +102        TABLE_HINTS = False
    +103
    +104        TYPE_MAPPING = {
    +105            **generator.Generator.TYPE_MAPPING,
    +106            exp.DataType.Type.TINYINT: "NUMBER",
    +107            exp.DataType.Type.SMALLINT: "NUMBER",
    +108            exp.DataType.Type.INT: "NUMBER",
    +109            exp.DataType.Type.BIGINT: "NUMBER",
    +110            exp.DataType.Type.DECIMAL: "NUMBER",
    +111            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
    +112            exp.DataType.Type.VARCHAR: "VARCHAR2",
    +113            exp.DataType.Type.NVARCHAR: "NVARCHAR2",
    +114            exp.DataType.Type.TEXT: "CLOB",
    +115            exp.DataType.Type.BINARY: "BLOB",
    +116            exp.DataType.Type.VARBINARY: "BLOB",
    +117        }
    +118
    +119        TRANSFORMS = {
    +120            **generator.Generator.TRANSFORMS,
    +121            exp.DateStrToDate: lambda self, e: self.func(
    +122                "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD")
    +123            ),
    +124            exp.Group: transforms.preprocess([transforms.unalias_group]),
    +125            exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */",
    +126            exp.ILike: no_ilike_sql,
    +127            exp.Coalesce: rename_func("NVL"),
    +128            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    +129            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
    +130            exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "),
    +131            exp.Substring: rename_func("SUBSTR"),
    +132            exp.Table: lambda self, e: self.table_sql(e, sep=" "),
    +133            exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=" "),
    +134            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
    +135            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    +136            exp.Trim: trim_sql,
    +137            exp.UnixToTime: lambda self, e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)",
    +138        }
    +139
    +140        PROPERTIES_LOCATION = {
    +141            **generator.Generator.PROPERTIES_LOCATION,
    +142            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +143        }
    +144
    +145        LIMIT_FETCH = "FETCH"
    +146
    +147        def offset_sql(self, expression: exp.Offset) -> str:
    +148            return f"{super().offset_sql(expression)} ROWS"
    +149
    +150        def column_sql(self, expression: exp.Column) -> str:
    +151            column = super().column_sql(expression)
    +152            return f"{column} (+)" if expression.args.get("join_mark") else column
    +153
    +154        def xmltable_sql(self, expression: exp.XMLTable) -> str:
    +155            this = self.sql(expression, "this")
    +156            passing = self.expressions(expression, key="passing")
    +157            passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else ""
    +158            columns = self.expressions(expression, key="columns")
    +159            columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else ""
    +160            by_ref = (
    +161                f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else ""
    +162            )
    +163            return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"
    +164
    +165    class Tokenizer(tokens.Tokenizer):
    +166        VAR_SINGLE_TOKENS = {"@"}
    +167
    +168        KEYWORDS = {
    +169            **tokens.Tokenizer.KEYWORDS,
    +170            "(+)": TokenType.JOIN_MARKER,
    +171            "BINARY_DOUBLE": TokenType.DOUBLE,
    +172            "BINARY_FLOAT": TokenType.FLOAT,
    +173            "COLUMNS": TokenType.COLUMN,
    +174            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
    +175            "MINUS": TokenType.EXCEPT,
    +176            "NVARCHAR2": TokenType.NVARCHAR,
    +177            "SAMPLE": TokenType.TABLE_SAMPLE,
    +178            "START": TokenType.BEGIN,
    +179            "TOP": TokenType.TOP,
    +180            "VARCHAR2": TokenType.VARCHAR,
    +181        }
     
    @@ -474,68 +466,60 @@
    -
     66    class Parser(parser.Parser):
    - 67        WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP}
    - 68
    - 69        FUNCTIONS = {
    - 70            **parser.Parser.FUNCTIONS,
    - 71            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
    - 72        }
    - 73
    - 74        FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
    - 75            **parser.Parser.FUNCTION_PARSERS,
    - 76            "XMLTABLE": _parse_xml_table,
    - 77        }
    - 78
    - 79        TYPE_LITERAL_PARSERS = {
    - 80            exp.DataType.Type.DATE: lambda self, this, _: self.expression(
    - 81                exp.DateStrToDate, this=this
    - 82            )
    - 83        }
    - 84
    - 85        def _parse_column(self) -> t.Optional[exp.Expression]:
    - 86            column = super()._parse_column()
    - 87            if column:
    - 88                column.set("join_mark", self._match(TokenType.JOIN_MARKER))
    - 89            return column
    - 90
    - 91        def _parse_hint(self) -> t.Optional[exp.Expression]:
    - 92            if self._match(TokenType.HINT):
    - 93                start = self._curr
    - 94                while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):
    - 95                    self._advance()
    - 96
    - 97                if not self._curr:
    - 98                    self.raise_error("Expected */ after HINT")
    - 99
    -100                end = self._tokens[self._index - 3]
    -101                return exp.Hint(expressions=[self._find_sql(start, end)])
    -102
    -103            return None
    +            
    60    class Parser(parser.Parser):
    +61        WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP}
    +62
    +63        FUNCTIONS = {
    +64            **parser.Parser.FUNCTIONS,
    +65            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
    +66        }
    +67
    +68        FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
    +69            **parser.Parser.FUNCTION_PARSERS,
    +70            "XMLTABLE": _parse_xml_table,
    +71        }
    +72
    +73        TYPE_LITERAL_PARSERS = {
    +74            exp.DataType.Type.DATE: lambda self, this, _: self.expression(
    +75                exp.DateStrToDate, this=this
    +76            )
    +77        }
    +78
    +79        def _parse_column(self) -> t.Optional[exp.Expression]:
    +80            column = super()._parse_column()
    +81            if column:
    +82                column.set("join_mark", self._match(TokenType.JOIN_MARKER))
    +83            return column
    +84
    +85        def _parse_hint(self) -> t.Optional[exp.Hint]:
    +86            if self._match(TokenType.HINT):
    +87                start = self._curr
    +88                while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):
    +89                    self._advance()
    +90
    +91                if not self._curr:
    +92                    self.raise_error("Expected */ after HINT")
    +93
    +94                end = self._tokens[self._index - 3]
    +95                return exp.Hint(expressions=[self._find_sql(start, end)])
    +96
    +97            return None
     
    -

    Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces -a parsed syntax tree.

    +

    Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

    Arguments:
      -
    • error_level: the desired error level. +
    • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
    • -
    • error_message_context: determines the amount of context to capture from a +
    • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). -Default: 50.
    • -
    • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. -Default: 0
    • -
    • alias_post_tablesample: If the table alias comes after tablesample. -Default: False
    • +Default: 100
    • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
    • -
    • null_ordering: Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    @@ -568,117 +552,102 @@ Default: "nulls_are_small"
    -
    105    class Generator(generator.Generator):
    -106        LOCKING_READS_SUPPORTED = True
    -107        JOIN_HINTS = False
    -108        TABLE_HINTS = False
    -109
    -110        TYPE_MAPPING = {
    -111            **generator.Generator.TYPE_MAPPING,
    -112            exp.DataType.Type.TINYINT: "NUMBER",
    -113            exp.DataType.Type.SMALLINT: "NUMBER",
    -114            exp.DataType.Type.INT: "NUMBER",
    -115            exp.DataType.Type.BIGINT: "NUMBER",
    -116            exp.DataType.Type.DECIMAL: "NUMBER",
    -117            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
    -118            exp.DataType.Type.VARCHAR: "VARCHAR2",
    -119            exp.DataType.Type.NVARCHAR: "NVARCHAR2",
    -120            exp.DataType.Type.TEXT: "CLOB",
    -121            exp.DataType.Type.BINARY: "BLOB",
    -122            exp.DataType.Type.VARBINARY: "BLOB",
    -123        }
    -124
    -125        TRANSFORMS = {
    -126            **generator.Generator.TRANSFORMS,
    -127            exp.DateStrToDate: lambda self, e: self.func(
    -128                "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD")
    -129            ),
    -130            exp.Group: transforms.preprocess([transforms.unalias_group]),
    -131            exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */",
    -132            exp.ILike: no_ilike_sql,
    -133            exp.IfNull: rename_func("NVL"),
    -134            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    -135            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
    -136            exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "),
    -137            exp.Substring: rename_func("SUBSTR"),
    -138            exp.Table: lambda self, e: self.table_sql(e, sep=" "),
    -139            exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=" "),
    -140            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
    -141            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    -142            exp.Trim: trim_sql,
    -143            exp.UnixToTime: lambda self, e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)",
    -144        }
    -145
    -146        PROPERTIES_LOCATION = {
    -147            **generator.Generator.PROPERTIES_LOCATION,
    -148            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    -149        }
    -150
    -151        LIMIT_FETCH = "FETCH"
    -152
    -153        def offset_sql(self, expression: exp.Offset) -> str:
    -154            return f"{super().offset_sql(expression)} ROWS"
    -155
    -156        def column_sql(self, expression: exp.Column) -> str:
    -157            column = super().column_sql(expression)
    -158            return f"{column} (+)" if expression.args.get("join_mark") else column
    -159
    -160        def xmltable_sql(self, expression: exp.XMLTable) -> str:
    -161            this = self.sql(expression, "this")
    -162            passing = self.expressions(expression, key="passing")
    -163            passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else ""
    -164            columns = self.expressions(expression, key="columns")
    -165            columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else ""
    -166            by_ref = (
    -167                f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else ""
    -168            )
    -169            return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"
    +            
     99    class Generator(generator.Generator):
    +100        LOCKING_READS_SUPPORTED = True
    +101        JOIN_HINTS = False
    +102        TABLE_HINTS = False
    +103
    +104        TYPE_MAPPING = {
    +105            **generator.Generator.TYPE_MAPPING,
    +106            exp.DataType.Type.TINYINT: "NUMBER",
    +107            exp.DataType.Type.SMALLINT: "NUMBER",
    +108            exp.DataType.Type.INT: "NUMBER",
    +109            exp.DataType.Type.BIGINT: "NUMBER",
    +110            exp.DataType.Type.DECIMAL: "NUMBER",
    +111            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
    +112            exp.DataType.Type.VARCHAR: "VARCHAR2",
    +113            exp.DataType.Type.NVARCHAR: "NVARCHAR2",
    +114            exp.DataType.Type.TEXT: "CLOB",
    +115            exp.DataType.Type.BINARY: "BLOB",
    +116            exp.DataType.Type.VARBINARY: "BLOB",
    +117        }
    +118
    +119        TRANSFORMS = {
    +120            **generator.Generator.TRANSFORMS,
    +121            exp.DateStrToDate: lambda self, e: self.func(
    +122                "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD")
    +123            ),
    +124            exp.Group: transforms.preprocess([transforms.unalias_group]),
    +125            exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */",
    +126            exp.ILike: no_ilike_sql,
    +127            exp.Coalesce: rename_func("NVL"),
    +128            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    +129            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
    +130            exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "),
    +131            exp.Substring: rename_func("SUBSTR"),
    +132            exp.Table: lambda self, e: self.table_sql(e, sep=" "),
    +133            exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=" "),
    +134            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
    +135            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    +136            exp.Trim: trim_sql,
    +137            exp.UnixToTime: lambda self, e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)",
    +138        }
    +139
    +140        PROPERTIES_LOCATION = {
    +141            **generator.Generator.PROPERTIES_LOCATION,
    +142            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +143        }
    +144
    +145        LIMIT_FETCH = "FETCH"
    +146
    +147        def offset_sql(self, expression: exp.Offset) -> str:
    +148            return f"{super().offset_sql(expression)} ROWS"
    +149
    +150        def column_sql(self, expression: exp.Column) -> str:
    +151            column = super().column_sql(expression)
    +152            return f"{column} (+)" if expression.args.get("join_mark") else column
    +153
    +154        def xmltable_sql(self, expression: exp.XMLTable) -> str:
    +155            this = self.sql(expression, "this")
    +156            passing = self.expressions(expression, key="passing")
    +157            passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else ""
    +158            columns = self.expressions(expression, key="columns")
    +159            columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else ""
    +160            by_ref = (
    +161                f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else ""
    +162            )
    +163            return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"
     
    -

    Generator interprets the given syntax tree and produces a SQL string as an output.

    +

    Generator converts a given syntax tree to the corresponding SQL string.

    Arguments:
      -
    • time_mapping (dict): the dictionary of custom time mappings in which the key -represents a python time format and the output the target time format
    • -
    • time_trie (trie): a trie of the time_mapping keys
    • -
    • pretty (bool): if set to True the returned string will be formatted. Default: False.
    • -
    • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
    • -
    • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
    • -
    • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
    • -
    • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
    • -
    • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
    • -
    • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
    • -
    • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
    • -
    • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
    • -
    • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
    • -
    • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
    • -
    • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
    • -
    • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
    • -
    • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
    • -
    • normalize (bool): if set to True all identifiers will lower cased
    • -
    • string_escape (str): specifies a string escape character. Default: '.
    • -
    • identifier_escape (str): specifies an identifier escape character. Default: ".
    • -
    • pad (int): determines padding in a formatted string. Default: 2.
    • -
    • indent (int): determines the size of indentation in a formatted string. Default: 4.
    • -
    • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
    • -
    • normalize_functions (str): normalize function names, "upper", "lower", or None -Default: "upper"
    • -
    • alias_post_tablesample (bool): if the table alias comes after tablesample -Default: False
    • -
    • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit -Default: False
    • -
    • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters -unsupported expressions. Default ErrorLevel.WARN.
    • -
    • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    • -
    • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +
    • pretty: Whether or not to format the produced SQL string. +Default: False.
    • +
    • identify: Determines when an identifier should be quoted. Possible values are: +False (default): Never quote, except in cases where it's mandatory by the dialect. +True or 'always': Always quote. +'safe': Only quote identifiers that are case insensitive.
    • +
    • normalize: Whether or not to normalize identifiers to lowercase. +Default: False.
    • +
    • pad: Determines the pad size in a formatted string. +Default: 2.
    • +
    • indent: Determines the indentation size in a formatted string. +Default: 2.
    • +
    • normalize_functions: Whether or not to normalize all function names. Possible values are: +"upper" or True (default): Convert names to uppercase. +"lower": Convert names to lowercase. +False: Disables function name normalization.
    • +
    • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. +Default ErrorLevel.WARN.
    • +
    • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
    • -
    • leading_comma (bool): if the the comma is leading or trailing in select statements +
    • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. +This is only relevant when generating in pretty mode. Default: False
    • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true @@ -701,8 +670,8 @@ Default: True
    -
    153        def offset_sql(self, expression: exp.Offset) -> str:
    -154            return f"{super().offset_sql(expression)} ROWS"
    +            
    147        def offset_sql(self, expression: exp.Offset) -> str:
    +148            return f"{super().offset_sql(expression)} ROWS"
     
    @@ -720,9 +689,9 @@ Default: True
    -
    156        def column_sql(self, expression: exp.Column) -> str:
    -157            column = super().column_sql(expression)
    -158            return f"{column} (+)" if expression.args.get("join_mark") else column
    +            
    150        def column_sql(self, expression: exp.Column) -> str:
    +151            column = super().column_sql(expression)
    +152            return f"{column} (+)" if expression.args.get("join_mark") else column
     
    @@ -740,16 +709,16 @@ Default: True
    -
    160        def xmltable_sql(self, expression: exp.XMLTable) -> str:
    -161            this = self.sql(expression, "this")
    -162            passing = self.expressions(expression, key="passing")
    -163            passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else ""
    -164            columns = self.expressions(expression, key="columns")
    -165            columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else ""
    -166            by_ref = (
    -167                f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else ""
    -168            )
    -169            return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"
    +            
    154        def xmltable_sql(self, expression: exp.XMLTable) -> str:
    +155            this = self.sql(expression, "this")
    +156            passing = self.expressions(expression, key="passing")
    +157            passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else ""
    +158            columns = self.expressions(expression, key="columns")
    +159            columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else ""
    +160            by_ref = (
    +161                f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else ""
    +162            )
    +163            return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"
     
    @@ -784,6 +753,7 @@ Default: True
    notnullcolumnconstraint_sql
    primarykeycolumnconstraint_sql
    uniquecolumnconstraint_sql
    +
    createable_sql
    create_sql
    clone_sql
    describe_sql
    @@ -865,10 +835,12 @@ Default: True
    ordered_sql
    matchrecognize_sql
    query_modifiers
    +
    offset_limit_modifiers
    after_having_modifiers
    after_limit_modifiers
    select_sql
    schema_sql
    +
    schema_columns_sql
    star_sql
    parameter_sql
    sessionparameter_sql
    @@ -893,7 +865,7 @@ Default: True
    nextvaluefor_sql
    extract_sql
    trim_sql
    -
    concat_sql
    +
    safeconcat_sql
    check_sql
    foreignkey_sql
    primarykey_sql
    @@ -944,6 +916,7 @@ Default: True
    respectnulls_sql
    intdiv_sql
    dpipe_sql
    +
    safedpipe_sql
    div_sql
    overlaps_sql
    distance_sql
    @@ -992,6 +965,7 @@ Default: True
    dictproperty_sql
    dictrange_sql
    dictsubproperty_sql
    +
    oncluster_sql
    @@ -1008,24 +982,23 @@ Default: True
    -
    171    class Tokenizer(tokens.Tokenizer):
    -172        VAR_SINGLE_TOKENS = {"@"}
    -173
    -174        KEYWORDS = {
    -175            **tokens.Tokenizer.KEYWORDS,
    -176            "(+)": TokenType.JOIN_MARKER,
    -177            "BINARY_DOUBLE": TokenType.DOUBLE,
    -178            "BINARY_FLOAT": TokenType.FLOAT,
    -179            "COLUMNS": TokenType.COLUMN,
    -180            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
    -181            "MINUS": TokenType.EXCEPT,
    -182            "NVARCHAR2": TokenType.NVARCHAR,
    -183            "RETURNING": TokenType.RETURNING,
    -184            "SAMPLE": TokenType.TABLE_SAMPLE,
    -185            "START": TokenType.BEGIN,
    -186            "TOP": TokenType.TOP,
    -187            "VARCHAR2": TokenType.VARCHAR,
    -188        }
    +            
    165    class Tokenizer(tokens.Tokenizer):
    +166        VAR_SINGLE_TOKENS = {"@"}
    +167
    +168        KEYWORDS = {
    +169            **tokens.Tokenizer.KEYWORDS,
    +170            "(+)": TokenType.JOIN_MARKER,
    +171            "BINARY_DOUBLE": TokenType.DOUBLE,
    +172            "BINARY_FLOAT": TokenType.FLOAT,
    +173            "COLUMNS": TokenType.COLUMN,
    +174            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
    +175            "MINUS": TokenType.EXCEPT,
    +176            "NVARCHAR2": TokenType.NVARCHAR,
    +177            "SAMPLE": TokenType.TABLE_SAMPLE,
    +178            "START": TokenType.BEGIN,
    +179            "TOP": TokenType.TOP,
    +180            "VARCHAR2": TokenType.VARCHAR,
    +181        }
     
    @@ -1037,6 +1010,7 @@ Default: True diff --git a/docs/sqlglot/dialects/postgres.html b/docs/sqlglot/dialects/postgres.html index b3634b0..2f6dc67 100644 --- a/docs/sqlglot/dialects/postgres.html +++ b/docs/sqlglot/dialects/postgres.html @@ -97,360 +97,366 @@
    18 rename_func, 19 str_position_sql, 20 timestamptrunc_sql, - 21 trim_sql, - 22) - 23from sqlglot.helper import seq_get - 24from sqlglot.parser import binary_range_parser - 25from sqlglot.tokens import TokenType - 26 - 27DATE_DIFF_FACTOR = { - 28 "MICROSECOND": " * 1000000", - 29 "MILLISECOND": " * 1000", - 30 "SECOND": "", - 31 "MINUTE": " / 60", - 32 "HOUR": " / 3600", - 33 "DAY": " / 86400", - 34} - 35 - 36 - 37def _date_add_sql(kind: str) -> t.Callable[[generator.Generator, exp.DateAdd | exp.DateSub], str]: - 38 def func(self: generator.Generator, expression: exp.DateAdd | exp.DateSub) -> str: - 39 from sqlglot.optimizer.simplify import simplify - 40 - 41 this = self.sql(expression, "this") - 42 unit = expression.args.get("unit") - 43 expression = simplify(expression.args["expression"]) - 44 - 45 if not isinstance(expression, exp.Literal): - 46 self.unsupported("Cannot add non literal") - 47 - 48 expression = expression.copy() - 49 expression.args["is_string"] = True - 50 return f"{this} {kind} {self.sql(exp.Interval(this=expression, unit=unit))}" - 51 - 52 return func + 21 timestrtotime_sql, + 22 trim_sql, + 23 ts_or_ds_to_date_sql, + 24) + 25from sqlglot.helper import seq_get + 26from sqlglot.parser import binary_range_parser + 27from sqlglot.tokens import TokenType + 28 + 29DATE_DIFF_FACTOR = { + 30 "MICROSECOND": " * 1000000", + 31 "MILLISECOND": " * 1000", + 32 "SECOND": "", + 33 "MINUTE": " / 60", + 34 "HOUR": " / 3600", + 35 "DAY": " / 86400", + 36} + 37 + 38 + 39def _date_add_sql(kind: str) -> t.Callable[[generator.Generator, exp.DateAdd | exp.DateSub], str]: + 40 def func(self: generator.Generator, expression: exp.DateAdd | exp.DateSub) -> str: + 41 from sqlglot.optimizer.simplify import simplify + 42 + 43 this = self.sql(expression, "this") + 44 unit = expression.args.get("unit") + 45 expression = simplify(expression.args["expression"]) + 46 + 47 if not isinstance(expression, exp.Literal): + 48 self.unsupported("Cannot add non literal") + 49 + 50 expression = expression.copy() + 51 expression.args["is_string"] = True + 52 return f"{this} {kind} {self.sql(exp.Interval(this=expression, unit=unit))}" 53 - 54 - 55def _date_diff_sql(self: generator.Generator, expression: exp.DateDiff) -> str: - 56 unit = expression.text("unit").upper() - 57 factor = DATE_DIFF_FACTOR.get(unit) - 58 - 59 end = f"CAST({expression.this} AS TIMESTAMP)" - 60 start = f"CAST({expression.expression} AS TIMESTAMP)" - 61 - 62 if factor is not None: - 63 return f"CAST(EXTRACT(epoch FROM {end} - {start}){factor} AS BIGINT)" - 64 - 65 age = f"AGE({end}, {start})" + 54 return func + 55 + 56 + 57def _date_diff_sql(self: generator.Generator, expression: exp.DateDiff) -> str: + 58 unit = expression.text("unit").upper() + 59 factor = DATE_DIFF_FACTOR.get(unit) + 60 + 61 end = f"CAST({expression.this} AS TIMESTAMP)" + 62 start = f"CAST({expression.expression} AS TIMESTAMP)" + 63 + 64 if factor is not None: + 65 return f"CAST(EXTRACT(epoch FROM {end} - {start}){factor} AS BIGINT)" 66 - 67 if unit == "WEEK": - 68 unit = f"EXTRACT(year FROM {age}) * 48 + EXTRACT(month FROM {age}) * 4 + EXTRACT(day FROM {age}) / 7" - 69 elif unit == "MONTH": - 70 unit = f"EXTRACT(year FROM {age}) * 12 + EXTRACT(month FROM {age})" - 71 elif unit == "QUARTER": - 72 unit = f"EXTRACT(year FROM {age}) * 4 + EXTRACT(month FROM {age}) / 3" - 73 elif unit == "YEAR": - 74 unit = f"EXTRACT(year FROM {age})" - 75 else: - 76 unit = age - 77 - 78 return f"CAST({unit} AS BIGINT)" + 67 age = f"AGE({end}, {start})" + 68 + 69 if unit == "WEEK": + 70 unit = f"EXTRACT(year FROM {age}) * 48 + EXTRACT(month FROM {age}) * 4 + EXTRACT(day FROM {age}) / 7" + 71 elif unit == "MONTH": + 72 unit = f"EXTRACT(year FROM {age}) * 12 + EXTRACT(month FROM {age})" + 73 elif unit == "QUARTER": + 74 unit = f"EXTRACT(year FROM {age}) * 4 + EXTRACT(month FROM {age}) / 3" + 75 elif unit == "YEAR": + 76 unit = f"EXTRACT(year FROM {age})" + 77 else: + 78 unit = age 79 - 80 - 81def _substring_sql(self: generator.Generator, expression: exp.Substring) -> str: - 82 this = self.sql(expression, "this") - 83 start = self.sql(expression, "start") - 84 length = self.sql(expression, "length") - 85 - 86 from_part = f" FROM {start}" if start else "" - 87 for_part = f" FOR {length}" if length else "" - 88 - 89 return f"SUBSTRING({this}{from_part}{for_part})" + 80 return f"CAST({unit} AS BIGINT)" + 81 + 82 + 83def _substring_sql(self: generator.Generator, expression: exp.Substring) -> str: + 84 this = self.sql(expression, "this") + 85 start = self.sql(expression, "start") + 86 length = self.sql(expression, "length") + 87 + 88 from_part = f" FROM {start}" if start else "" + 89 for_part = f" FOR {length}" if length else "" 90 - 91 - 92def _string_agg_sql(self: generator.Generator, expression: exp.GroupConcat) -> str: - 93 expression = expression.copy() - 94 separator = expression.args.get("separator") or exp.Literal.string(",") - 95 - 96 order = "" - 97 this = expression.this - 98 if isinstance(this, exp.Order): - 99 if this.this: -100 this = this.this.pop() -101 order = self.sql(expression.this) # Order has a leading space -102 -103 return f"STRING_AGG({self.format_args(this, separator)}{order})" + 91 return f"SUBSTRING({this}{from_part}{for_part})" + 92 + 93 + 94def _string_agg_sql(self: generator.Generator, expression: exp.GroupConcat) -> str: + 95 expression = expression.copy() + 96 separator = expression.args.get("separator") or exp.Literal.string(",") + 97 + 98 order = "" + 99 this = expression.this +100 if isinstance(this, exp.Order): +101 if this.this: +102 this = this.this.pop() +103 order = self.sql(expression.this) # Order has a leading space 104 -105 -106def _datatype_sql(self: generator.Generator, expression: exp.DataType) -> str: -107 if expression.is_type("array"): -108 return f"{self.expressions(expression, flat=True)}[]" -109 return self.datatype_sql(expression) -110 -111 -112def _auto_increment_to_serial(expression: exp.Expression) -> exp.Expression: -113 auto = expression.find(exp.AutoIncrementColumnConstraint) -114 -115 if auto: -116 expression = expression.copy() -117 expression.args["constraints"].remove(auto.parent) -118 kind = expression.args["kind"] -119 -120 if kind.this == exp.DataType.Type.INT: -121 kind.replace(exp.DataType(this=exp.DataType.Type.SERIAL)) -122 elif kind.this == exp.DataType.Type.SMALLINT: -123 kind.replace(exp.DataType(this=exp.DataType.Type.SMALLSERIAL)) -124 elif kind.this == exp.DataType.Type.BIGINT: -125 kind.replace(exp.DataType(this=exp.DataType.Type.BIGSERIAL)) -126 -127 return expression +105 return f"STRING_AGG({self.format_args(this, separator)}{order})" +106 +107 +108def _datatype_sql(self: generator.Generator, expression: exp.DataType) -> str: +109 if expression.is_type("array"): +110 return f"{self.expressions(expression, flat=True)}[]" +111 return self.datatype_sql(expression) +112 +113 +114def _auto_increment_to_serial(expression: exp.Expression) -> exp.Expression: +115 auto = expression.find(exp.AutoIncrementColumnConstraint) +116 +117 if auto: +118 expression = expression.copy() +119 expression.args["constraints"].remove(auto.parent) +120 kind = expression.args["kind"] +121 +122 if kind.this == exp.DataType.Type.INT: +123 kind.replace(exp.DataType(this=exp.DataType.Type.SERIAL)) +124 elif kind.this == exp.DataType.Type.SMALLINT: +125 kind.replace(exp.DataType(this=exp.DataType.Type.SMALLSERIAL)) +126 elif kind.this == exp.DataType.Type.BIGINT: +127 kind.replace(exp.DataType(this=exp.DataType.Type.BIGSERIAL)) 128 -129 -130def _serial_to_generated(expression: exp.Expression) -> exp.Expression: -131 kind = expression.args["kind"] -132 -133 if kind.this == exp.DataType.Type.SERIAL: -134 data_type = exp.DataType(this=exp.DataType.Type.INT) -135 elif kind.this == exp.DataType.Type.SMALLSERIAL: -136 data_type = exp.DataType(this=exp.DataType.Type.SMALLINT) -137 elif kind.this == exp.DataType.Type.BIGSERIAL: -138 data_type = exp.DataType(this=exp.DataType.Type.BIGINT) -139 else: -140 data_type = None -141 -142 if data_type: -143 expression = expression.copy() -144 expression.args["kind"].replace(data_type) -145 constraints = expression.args["constraints"] -146 generated = exp.ColumnConstraint(kind=exp.GeneratedAsIdentityColumnConstraint(this=False)) -147 notnull = exp.ColumnConstraint(kind=exp.NotNullColumnConstraint()) -148 -149 if notnull not in constraints: -150 constraints.insert(0, notnull) -151 if generated not in constraints: -152 constraints.insert(0, generated) -153 -154 return expression +129 return expression +130 +131 +132def _serial_to_generated(expression: exp.Expression) -> exp.Expression: +133 kind = expression.args["kind"] +134 +135 if kind.this == exp.DataType.Type.SERIAL: +136 data_type = exp.DataType(this=exp.DataType.Type.INT) +137 elif kind.this == exp.DataType.Type.SMALLSERIAL: +138 data_type = exp.DataType(this=exp.DataType.Type.SMALLINT) +139 elif kind.this == exp.DataType.Type.BIGSERIAL: +140 data_type = exp.DataType(this=exp.DataType.Type.BIGINT) +141 else: +142 data_type = None +143 +144 if data_type: +145 expression = expression.copy() +146 expression.args["kind"].replace(data_type) +147 constraints = expression.args["constraints"] +148 generated = exp.ColumnConstraint(kind=exp.GeneratedAsIdentityColumnConstraint(this=False)) +149 notnull = exp.ColumnConstraint(kind=exp.NotNullColumnConstraint()) +150 +151 if notnull not in constraints: +152 constraints.insert(0, notnull) +153 if generated not in constraints: +154 constraints.insert(0, generated) 155 -156 -157def _generate_series(args: t.List) -> exp.Expression: -158 # The goal is to convert step values like '1 day' or INTERVAL '1 day' into INTERVAL '1' day -159 step = seq_get(args, 2) -160 -161 if step is None: -162 # Postgres allows calls with just two arguments -- the "step" argument defaults to 1 -163 return exp.GenerateSeries.from_arg_list(args) -164 -165 if step.is_string: -166 args[2] = exp.to_interval(step.this) -167 elif isinstance(step, exp.Interval) and not step.args.get("unit"): -168 args[2] = exp.to_interval(step.this.this) -169 -170 return exp.GenerateSeries.from_arg_list(args) +156 return expression +157 +158 +159def _generate_series(args: t.List) -> exp.Expression: +160 # The goal is to convert step values like '1 day' or INTERVAL '1 day' into INTERVAL '1' day +161 step = seq_get(args, 2) +162 +163 if step is None: +164 # Postgres allows calls with just two arguments -- the "step" argument defaults to 1 +165 return exp.GenerateSeries.from_arg_list(args) +166 +167 if step.is_string: +168 args[2] = exp.to_interval(step.this) +169 elif isinstance(step, exp.Interval) and not step.args.get("unit"): +170 args[2] = exp.to_interval(step.this.this) 171 -172 -173def _to_timestamp(args: t.List) -> exp.Expression: -174 # TO_TIMESTAMP accepts either a single double argument or (text, text) -175 if len(args) == 1: -176 # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TABLE -177 return exp.UnixToTime.from_arg_list(args) -178 -179 # https://www.postgresql.org/docs/current/functions-formatting.html -180 return format_time_lambda(exp.StrToTime, "postgres")(args) -181 -182 -183class Postgres(Dialect): -184 null_ordering = "nulls_are_large" -185 time_format = "'YYYY-MM-DD HH24:MI:SS'" -186 time_mapping = { -187 "AM": "%p", -188 "PM": "%p", -189 "D": "%u", # 1-based day of week -190 "DD": "%d", # day of month -191 "DDD": "%j", # zero padded day of year -192 "FMDD": "%-d", # - is no leading zero for Python; same for FM in postgres -193 "FMDDD": "%-j", # day of year -194 "FMHH12": "%-I", # 9 -195 "FMHH24": "%-H", # 9 -196 "FMMI": "%-M", # Minute -197 "FMMM": "%-m", # 1 -198 "FMSS": "%-S", # Second -199 "HH12": "%I", # 09 -200 "HH24": "%H", # 09 -201 "MI": "%M", # zero padded minute -202 "MM": "%m", # 01 -203 "OF": "%z", # utc offset -204 "SS": "%S", # zero padded second -205 "TMDay": "%A", # TM is locale dependent -206 "TMDy": "%a", -207 "TMMon": "%b", # Sep -208 "TMMonth": "%B", # September -209 "TZ": "%Z", # uppercase timezone name -210 "US": "%f", # zero padded microsecond -211 "WW": "%U", # 1-based week of year -212 "YY": "%y", # 15 -213 "YYYY": "%Y", # 2015 -214 } -215 -216 class Tokenizer(tokens.Tokenizer): -217 QUOTES = ["'", "$$"] +172 return exp.GenerateSeries.from_arg_list(args) +173 +174 +175def _to_timestamp(args: t.List) -> exp.Expression: +176 # TO_TIMESTAMP accepts either a single double argument or (text, text) +177 if len(args) == 1: +178 # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TABLE +179 return exp.UnixToTime.from_arg_list(args) +180 +181 # https://www.postgresql.org/docs/current/functions-formatting.html +182 return format_time_lambda(exp.StrToTime, "postgres")(args) +183 +184 +185class Postgres(Dialect): +186 INDEX_OFFSET = 1 +187 NULL_ORDERING = "nulls_are_large" +188 TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'" +189 TIME_MAPPING = { +190 "AM": "%p", +191 "PM": "%p", +192 "D": "%u", # 1-based day of week +193 "DD": "%d", # day of month +194 "DDD": "%j", # zero padded day of year +195 "FMDD": "%-d", # - is no leading zero for Python; same for FM in postgres +196 "FMDDD": "%-j", # day of year +197 "FMHH12": "%-I", # 9 +198 "FMHH24": "%-H", # 9 +199 "FMMI": "%-M", # Minute +200 "FMMM": "%-m", # 1 +201 "FMSS": "%-S", # Second +202 "HH12": "%I", # 09 +203 "HH24": "%H", # 09 +204 "MI": "%M", # zero padded minute +205 "MM": "%m", # 01 +206 "OF": "%z", # utc offset +207 "SS": "%S", # zero padded second +208 "TMDay": "%A", # TM is locale dependent +209 "TMDy": "%a", +210 "TMMon": "%b", # Sep +211 "TMMonth": "%B", # September +212 "TZ": "%Z", # uppercase timezone name +213 "US": "%f", # zero padded microsecond +214 "WW": "%U", # 1-based week of year +215 "YY": "%y", # 15 +216 "YYYY": "%Y", # 2015 +217 } 218 -219 BIT_STRINGS = [("b'", "'"), ("B'", "'")] -220 HEX_STRINGS = [("x'", "'"), ("X'", "'")] -221 BYTE_STRINGS = [("e'", "'"), ("E'", "'")] -222 -223 KEYWORDS = { -224 **tokens.Tokenizer.KEYWORDS, -225 "~~": TokenType.LIKE, -226 "~~*": TokenType.ILIKE, -227 "~*": TokenType.IRLIKE, -228 "~": TokenType.RLIKE, -229 "@>": TokenType.AT_GT, -230 "<@": TokenType.LT_AT, -231 "BEGIN": TokenType.COMMAND, -232 "BEGIN TRANSACTION": TokenType.BEGIN, -233 "BIGSERIAL": TokenType.BIGSERIAL, -234 "CHARACTER VARYING": TokenType.VARCHAR, -235 "DECLARE": TokenType.COMMAND, -236 "DO": TokenType.COMMAND, -237 "HSTORE": TokenType.HSTORE, -238 "JSONB": TokenType.JSONB, -239 "REFRESH": TokenType.COMMAND, -240 "REINDEX": TokenType.COMMAND, -241 "RESET": TokenType.COMMAND, -242 "RETURNING": TokenType.RETURNING, -243 "REVOKE": TokenType.COMMAND, -244 "SERIAL": TokenType.SERIAL, -245 "SMALLSERIAL": TokenType.SMALLSERIAL, -246 "TEMP": TokenType.TEMPORARY, -247 "CSTRING": TokenType.PSEUDO_TYPE, -248 } -249 -250 SINGLE_TOKENS = { -251 **tokens.Tokenizer.SINGLE_TOKENS, -252 "$": TokenType.PARAMETER, -253 } -254 -255 VAR_SINGLE_TOKENS = {"$"} +219 class Tokenizer(tokens.Tokenizer): +220 QUOTES = ["'", "$$"] +221 +222 BIT_STRINGS = [("b'", "'"), ("B'", "'")] +223 HEX_STRINGS = [("x'", "'"), ("X'", "'")] +224 BYTE_STRINGS = [("e'", "'"), ("E'", "'")] +225 +226 KEYWORDS = { +227 **tokens.Tokenizer.KEYWORDS, +228 "~~": TokenType.LIKE, +229 "~~*": TokenType.ILIKE, +230 "~*": TokenType.IRLIKE, +231 "~": TokenType.RLIKE, +232 "@>": TokenType.AT_GT, +233 "<@": TokenType.LT_AT, +234 "BEGIN": TokenType.COMMAND, +235 "BEGIN TRANSACTION": TokenType.BEGIN, +236 "BIGSERIAL": TokenType.BIGSERIAL, +237 "CHARACTER VARYING": TokenType.VARCHAR, +238 "DECLARE": TokenType.COMMAND, +239 "DO": TokenType.COMMAND, +240 "HSTORE": TokenType.HSTORE, +241 "JSONB": TokenType.JSONB, +242 "REFRESH": TokenType.COMMAND, +243 "REINDEX": TokenType.COMMAND, +244 "RESET": TokenType.COMMAND, +245 "REVOKE": TokenType.COMMAND, +246 "SERIAL": TokenType.SERIAL, +247 "SMALLSERIAL": TokenType.SMALLSERIAL, +248 "TEMP": TokenType.TEMPORARY, +249 "CSTRING": TokenType.PSEUDO_TYPE, +250 } +251 +252 SINGLE_TOKENS = { +253 **tokens.Tokenizer.SINGLE_TOKENS, +254 "$": TokenType.PARAMETER, +255 } 256 -257 class Parser(parser.Parser): -258 STRICT_CAST = False -259 -260 FUNCTIONS = { -261 **parser.Parser.FUNCTIONS, -262 "DATE_TRUNC": lambda args: exp.TimestampTrunc( -263 this=seq_get(args, 1), unit=seq_get(args, 0) -264 ), -265 "GENERATE_SERIES": _generate_series, -266 "NOW": exp.CurrentTimestamp.from_arg_list, -267 "TO_CHAR": format_time_lambda(exp.TimeToStr, "postgres"), -268 "TO_TIMESTAMP": _to_timestamp, -269 } -270 -271 FUNCTION_PARSERS = { -272 **parser.Parser.FUNCTION_PARSERS, -273 "DATE_PART": lambda self: self._parse_date_part(), -274 } -275 -276 BITWISE = { -277 **parser.Parser.BITWISE, -278 TokenType.HASH: exp.BitwiseXor, -279 } -280 -281 EXPONENT = { -282 TokenType.CARET: exp.Pow, +257 VAR_SINGLE_TOKENS = {"$"} +258 +259 class Parser(parser.Parser): +260 STRICT_CAST = False +261 CONCAT_NULL_OUTPUTS_STRING = True +262 +263 FUNCTIONS = { +264 **parser.Parser.FUNCTIONS, +265 "DATE_TRUNC": lambda args: exp.TimestampTrunc( +266 this=seq_get(args, 1), unit=seq_get(args, 0) +267 ), +268 "GENERATE_SERIES": _generate_series, +269 "NOW": exp.CurrentTimestamp.from_arg_list, +270 "TO_CHAR": format_time_lambda(exp.TimeToStr, "postgres"), +271 "TO_TIMESTAMP": _to_timestamp, +272 "UNNEST": exp.Explode.from_arg_list, +273 } +274 +275 FUNCTION_PARSERS = { +276 **parser.Parser.FUNCTION_PARSERS, +277 "DATE_PART": lambda self: self._parse_date_part(), +278 } +279 +280 BITWISE = { +281 **parser.Parser.BITWISE, +282 TokenType.HASH: exp.BitwiseXor, 283 } 284 -285 RANGE_PARSERS = { -286 **parser.Parser.RANGE_PARSERS, -287 TokenType.DAMP: binary_range_parser(exp.ArrayOverlaps), -288 TokenType.AT_GT: binary_range_parser(exp.ArrayContains), -289 TokenType.LT_AT: binary_range_parser(exp.ArrayContained), -290 } -291 -292 def _parse_factor(self) -> t.Optional[exp.Expression]: -293 return self._parse_tokens(self._parse_exponent, self.FACTOR) -294 -295 def _parse_exponent(self) -> t.Optional[exp.Expression]: -296 return self._parse_tokens(self._parse_unary, self.EXPONENT) -297 -298 def _parse_date_part(self) -> exp.Expression: -299 part = self._parse_type() -300 self._match(TokenType.COMMA) -301 value = self._parse_bitwise() -302 -303 if part and part.is_string: -304 part = exp.Var(this=part.name) -305 -306 return self.expression(exp.Extract, this=part, expression=value) -307 -308 class Generator(generator.Generator): -309 SINGLE_STRING_INTERVAL = True -310 LOCKING_READS_SUPPORTED = True -311 JOIN_HINTS = False -312 TABLE_HINTS = False -313 PARAMETER_TOKEN = "$" -314 -315 TYPE_MAPPING = { -316 **generator.Generator.TYPE_MAPPING, -317 exp.DataType.Type.TINYINT: "SMALLINT", -318 exp.DataType.Type.FLOAT: "REAL", -319 exp.DataType.Type.DOUBLE: "DOUBLE PRECISION", -320 exp.DataType.Type.BINARY: "BYTEA", -321 exp.DataType.Type.VARBINARY: "BYTEA", -322 exp.DataType.Type.DATETIME: "TIMESTAMP", -323 } -324 -325 TRANSFORMS = { -326 **generator.Generator.TRANSFORMS, -327 exp.BitwiseXor: lambda self, e: self.binary(e, "#"), -328 exp.ColumnDef: transforms.preprocess([_auto_increment_to_serial, _serial_to_generated]), -329 exp.JSONExtract: arrow_json_extract_sql, -330 exp.JSONExtractScalar: arrow_json_extract_scalar_sql, -331 exp.JSONBExtract: lambda self, e: self.binary(e, "#>"), -332 exp.JSONBExtractScalar: lambda self, e: self.binary(e, "#>>"), -333 exp.JSONBContains: lambda self, e: self.binary(e, "?"), -334 exp.Pow: lambda self, e: self.binary(e, "^"), -335 exp.CurrentDate: no_paren_current_date_sql, -336 exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP", -337 exp.DateAdd: _date_add_sql("+"), -338 exp.DateStrToDate: datestrtodate_sql, -339 exp.DateSub: _date_add_sql("-"), -340 exp.DateDiff: _date_diff_sql, -341 exp.LogicalOr: rename_func("BOOL_OR"), -342 exp.LogicalAnd: rename_func("BOOL_AND"), -343 exp.Max: max_or_greatest, -344 exp.Min: min_or_least, -345 exp.ArrayOverlaps: lambda self, e: self.binary(e, "&&"), -346 exp.ArrayContains: lambda self, e: self.binary(e, "@>"), -347 exp.ArrayContained: lambda self, e: self.binary(e, "<@"), -348 exp.Merge: transforms.preprocess([transforms.remove_target_from_merge]), -349 exp.Pivot: no_pivot_sql, -350 exp.RegexpLike: lambda self, e: self.binary(e, "~"), -351 exp.RegexpILike: lambda self, e: self.binary(e, "~*"), -352 exp.StrPosition: str_position_sql, -353 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", -354 exp.Substring: _substring_sql, -355 exp.TimestampTrunc: timestamptrunc_sql, -356 exp.TimeStrToTime: lambda self, e: f"CAST({self.sql(e, 'this')} AS TIMESTAMP)", -357 exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})", -358 exp.TableSample: no_tablesample_sql, -359 exp.ToChar: lambda self, e: self.function_fallback_sql(e), -360 exp.Trim: trim_sql, -361 exp.TryCast: no_trycast_sql, -362 exp.UnixToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')})", -363 exp.DataType: _datatype_sql, -364 exp.GroupConcat: _string_agg_sql, -365 exp.Array: lambda self, e: f"{self.normalize_func('ARRAY')}({self.sql(e.expressions[0])})" -366 if isinstance(seq_get(e.expressions, 0), exp.Select) -367 else f"{self.normalize_func('ARRAY')}[{self.expressions(e, flat=True)}]", -368 } -369 -370 PROPERTIES_LOCATION = { -371 **generator.Generator.PROPERTIES_LOCATION, -372 exp.TransientProperty: exp.Properties.Location.UNSUPPORTED, -373 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, +285 EXPONENT = { +286 TokenType.CARET: exp.Pow, +287 } +288 +289 RANGE_PARSERS = { +290 **parser.Parser.RANGE_PARSERS, +291 TokenType.DAMP: binary_range_parser(exp.ArrayOverlaps), +292 TokenType.AT_GT: binary_range_parser(exp.ArrayContains), +293 TokenType.LT_AT: binary_range_parser(exp.ArrayContained), +294 } +295 +296 def _parse_factor(self) -> t.Optional[exp.Expression]: +297 return self._parse_tokens(self._parse_exponent, self.FACTOR) +298 +299 def _parse_exponent(self) -> t.Optional[exp.Expression]: +300 return self._parse_tokens(self._parse_unary, self.EXPONENT) +301 +302 def _parse_date_part(self) -> exp.Expression: +303 part = self._parse_type() +304 self._match(TokenType.COMMA) +305 value = self._parse_bitwise() +306 +307 if part and part.is_string: +308 part = exp.var(part.name) +309 +310 return self.expression(exp.Extract, this=part, expression=value) +311 +312 class Generator(generator.Generator): +313 SINGLE_STRING_INTERVAL = True +314 LOCKING_READS_SUPPORTED = True +315 JOIN_HINTS = False +316 TABLE_HINTS = False +317 PARAMETER_TOKEN = "$" +318 +319 TYPE_MAPPING = { +320 **generator.Generator.TYPE_MAPPING, +321 exp.DataType.Type.TINYINT: "SMALLINT", +322 exp.DataType.Type.FLOAT: "REAL", +323 exp.DataType.Type.DOUBLE: "DOUBLE PRECISION", +324 exp.DataType.Type.BINARY: "BYTEA", +325 exp.DataType.Type.VARBINARY: "BYTEA", +326 exp.DataType.Type.DATETIME: "TIMESTAMP", +327 } +328 +329 TRANSFORMS = { +330 **generator.Generator.TRANSFORMS, +331 exp.BitwiseXor: lambda self, e: self.binary(e, "#"), +332 exp.ColumnDef: transforms.preprocess([_auto_increment_to_serial, _serial_to_generated]), +333 exp.Explode: rename_func("UNNEST"), +334 exp.JSONExtract: arrow_json_extract_sql, +335 exp.JSONExtractScalar: arrow_json_extract_scalar_sql, +336 exp.JSONBExtract: lambda self, e: self.binary(e, "#>"), +337 exp.JSONBExtractScalar: lambda self, e: self.binary(e, "#>>"), +338 exp.JSONBContains: lambda self, e: self.binary(e, "?"), +339 exp.Pow: lambda self, e: self.binary(e, "^"), +340 exp.CurrentDate: no_paren_current_date_sql, +341 exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP", +342 exp.DateAdd: _date_add_sql("+"), +343 exp.DateStrToDate: datestrtodate_sql, +344 exp.DateSub: _date_add_sql("-"), +345 exp.DateDiff: _date_diff_sql, +346 exp.LogicalOr: rename_func("BOOL_OR"), +347 exp.LogicalAnd: rename_func("BOOL_AND"), +348 exp.Max: max_or_greatest, +349 exp.Min: min_or_least, +350 exp.ArrayOverlaps: lambda self, e: self.binary(e, "&&"), +351 exp.ArrayContains: lambda self, e: self.binary(e, "@>"), +352 exp.ArrayContained: lambda self, e: self.binary(e, "<@"), +353 exp.Merge: transforms.preprocess([transforms.remove_target_from_merge]), +354 exp.Pivot: no_pivot_sql, +355 exp.RegexpLike: lambda self, e: self.binary(e, "~"), +356 exp.RegexpILike: lambda self, e: self.binary(e, "~*"), +357 exp.StrPosition: str_position_sql, +358 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", +359 exp.Substring: _substring_sql, +360 exp.TimestampTrunc: timestamptrunc_sql, +361 exp.TimeStrToTime: timestrtotime_sql, +362 exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})", +363 exp.TableSample: no_tablesample_sql, +364 exp.ToChar: lambda self, e: self.function_fallback_sql(e), +365 exp.Trim: trim_sql, +366 exp.TryCast: no_trycast_sql, +367 exp.TsOrDsToDate: ts_or_ds_to_date_sql("postgres"), +368 exp.UnixToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')})", +369 exp.DataType: _datatype_sql, +370 exp.GroupConcat: _string_agg_sql, +371 exp.Array: lambda self, e: f"{self.normalize_func('ARRAY')}({self.sql(e.expressions[0])})" +372 if isinstance(seq_get(e.expressions, 0), exp.Select) +373 else f"{self.normalize_func('ARRAY')}[{self.expressions(e, flat=True)}]", 374 } +375 +376 PROPERTIES_LOCATION = { +377 **generator.Generator.PROPERTIES_LOCATION, +378 exp.TransientProperty: exp.Properties.Location.UNSUPPORTED, +379 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, +380 }
    @@ -466,198 +472,202 @@
    -
    184class Postgres(Dialect):
    -185    null_ordering = "nulls_are_large"
    -186    time_format = "'YYYY-MM-DD HH24:MI:SS'"
    -187    time_mapping = {
    -188        "AM": "%p",
    -189        "PM": "%p",
    -190        "D": "%u",  # 1-based day of week
    -191        "DD": "%d",  # day of month
    -192        "DDD": "%j",  # zero padded day of year
    -193        "FMDD": "%-d",  # - is no leading zero for Python; same for FM in postgres
    -194        "FMDDD": "%-j",  # day of year
    -195        "FMHH12": "%-I",  # 9
    -196        "FMHH24": "%-H",  # 9
    -197        "FMMI": "%-M",  # Minute
    -198        "FMMM": "%-m",  # 1
    -199        "FMSS": "%-S",  # Second
    -200        "HH12": "%I",  # 09
    -201        "HH24": "%H",  # 09
    -202        "MI": "%M",  # zero padded minute
    -203        "MM": "%m",  # 01
    -204        "OF": "%z",  # utc offset
    -205        "SS": "%S",  # zero padded second
    -206        "TMDay": "%A",  # TM is locale dependent
    -207        "TMDy": "%a",
    -208        "TMMon": "%b",  # Sep
    -209        "TMMonth": "%B",  # September
    -210        "TZ": "%Z",  # uppercase timezone name
    -211        "US": "%f",  # zero padded microsecond
    -212        "WW": "%U",  # 1-based week of year
    -213        "YY": "%y",  # 15
    -214        "YYYY": "%Y",  # 2015
    -215    }
    -216
    -217    class Tokenizer(tokens.Tokenizer):
    -218        QUOTES = ["'", "$$"]
    +            
    186class Postgres(Dialect):
    +187    INDEX_OFFSET = 1
    +188    NULL_ORDERING = "nulls_are_large"
    +189    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
    +190    TIME_MAPPING = {
    +191        "AM": "%p",
    +192        "PM": "%p",
    +193        "D": "%u",  # 1-based day of week
    +194        "DD": "%d",  # day of month
    +195        "DDD": "%j",  # zero padded day of year
    +196        "FMDD": "%-d",  # - is no leading zero for Python; same for FM in postgres
    +197        "FMDDD": "%-j",  # day of year
    +198        "FMHH12": "%-I",  # 9
    +199        "FMHH24": "%-H",  # 9
    +200        "FMMI": "%-M",  # Minute
    +201        "FMMM": "%-m",  # 1
    +202        "FMSS": "%-S",  # Second
    +203        "HH12": "%I",  # 09
    +204        "HH24": "%H",  # 09
    +205        "MI": "%M",  # zero padded minute
    +206        "MM": "%m",  # 01
    +207        "OF": "%z",  # utc offset
    +208        "SS": "%S",  # zero padded second
    +209        "TMDay": "%A",  # TM is locale dependent
    +210        "TMDy": "%a",
    +211        "TMMon": "%b",  # Sep
    +212        "TMMonth": "%B",  # September
    +213        "TZ": "%Z",  # uppercase timezone name
    +214        "US": "%f",  # zero padded microsecond
    +215        "WW": "%U",  # 1-based week of year
    +216        "YY": "%y",  # 15
    +217        "YYYY": "%Y",  # 2015
    +218    }
     219
    -220        BIT_STRINGS = [("b'", "'"), ("B'", "'")]
    -221        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
    -222        BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
    -223
    -224        KEYWORDS = {
    -225            **tokens.Tokenizer.KEYWORDS,
    -226            "~~": TokenType.LIKE,
    -227            "~~*": TokenType.ILIKE,
    -228            "~*": TokenType.IRLIKE,
    -229            "~": TokenType.RLIKE,
    -230            "@>": TokenType.AT_GT,
    -231            "<@": TokenType.LT_AT,
    -232            "BEGIN": TokenType.COMMAND,
    -233            "BEGIN TRANSACTION": TokenType.BEGIN,
    -234            "BIGSERIAL": TokenType.BIGSERIAL,
    -235            "CHARACTER VARYING": TokenType.VARCHAR,
    -236            "DECLARE": TokenType.COMMAND,
    -237            "DO": TokenType.COMMAND,
    -238            "HSTORE": TokenType.HSTORE,
    -239            "JSONB": TokenType.JSONB,
    -240            "REFRESH": TokenType.COMMAND,
    -241            "REINDEX": TokenType.COMMAND,
    -242            "RESET": TokenType.COMMAND,
    -243            "RETURNING": TokenType.RETURNING,
    -244            "REVOKE": TokenType.COMMAND,
    -245            "SERIAL": TokenType.SERIAL,
    -246            "SMALLSERIAL": TokenType.SMALLSERIAL,
    -247            "TEMP": TokenType.TEMPORARY,
    -248            "CSTRING": TokenType.PSEUDO_TYPE,
    -249        }
    -250
    -251        SINGLE_TOKENS = {
    -252            **tokens.Tokenizer.SINGLE_TOKENS,
    -253            "$": TokenType.PARAMETER,
    -254        }
    -255
    -256        VAR_SINGLE_TOKENS = {"$"}
    +220    class Tokenizer(tokens.Tokenizer):
    +221        QUOTES = ["'", "$$"]
    +222
    +223        BIT_STRINGS = [("b'", "'"), ("B'", "'")]
    +224        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
    +225        BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
    +226
    +227        KEYWORDS = {
    +228            **tokens.Tokenizer.KEYWORDS,
    +229            "~~": TokenType.LIKE,
    +230            "~~*": TokenType.ILIKE,
    +231            "~*": TokenType.IRLIKE,
    +232            "~": TokenType.RLIKE,
    +233            "@>": TokenType.AT_GT,
    +234            "<@": TokenType.LT_AT,
    +235            "BEGIN": TokenType.COMMAND,
    +236            "BEGIN TRANSACTION": TokenType.BEGIN,
    +237            "BIGSERIAL": TokenType.BIGSERIAL,
    +238            "CHARACTER VARYING": TokenType.VARCHAR,
    +239            "DECLARE": TokenType.COMMAND,
    +240            "DO": TokenType.COMMAND,
    +241            "HSTORE": TokenType.HSTORE,
    +242            "JSONB": TokenType.JSONB,
    +243            "REFRESH": TokenType.COMMAND,
    +244            "REINDEX": TokenType.COMMAND,
    +245            "RESET": TokenType.COMMAND,
    +246            "REVOKE": TokenType.COMMAND,
    +247            "SERIAL": TokenType.SERIAL,
    +248            "SMALLSERIAL": TokenType.SMALLSERIAL,
    +249            "TEMP": TokenType.TEMPORARY,
    +250            "CSTRING": TokenType.PSEUDO_TYPE,
    +251        }
    +252
    +253        SINGLE_TOKENS = {
    +254            **tokens.Tokenizer.SINGLE_TOKENS,
    +255            "$": TokenType.PARAMETER,
    +256        }
     257
    -258    class Parser(parser.Parser):
    -259        STRICT_CAST = False
    -260
    -261        FUNCTIONS = {
    -262            **parser.Parser.FUNCTIONS,
    -263            "DATE_TRUNC": lambda args: exp.TimestampTrunc(
    -264                this=seq_get(args, 1), unit=seq_get(args, 0)
    -265            ),
    -266            "GENERATE_SERIES": _generate_series,
    -267            "NOW": exp.CurrentTimestamp.from_arg_list,
    -268            "TO_CHAR": format_time_lambda(exp.TimeToStr, "postgres"),
    -269            "TO_TIMESTAMP": _to_timestamp,
    -270        }
    -271
    -272        FUNCTION_PARSERS = {
    -273            **parser.Parser.FUNCTION_PARSERS,
    -274            "DATE_PART": lambda self: self._parse_date_part(),
    -275        }
    -276
    -277        BITWISE = {
    -278            **parser.Parser.BITWISE,
    -279            TokenType.HASH: exp.BitwiseXor,
    -280        }
    -281
    -282        EXPONENT = {
    -283            TokenType.CARET: exp.Pow,
    +258        VAR_SINGLE_TOKENS = {"$"}
    +259
    +260    class Parser(parser.Parser):
    +261        STRICT_CAST = False
    +262        CONCAT_NULL_OUTPUTS_STRING = True
    +263
    +264        FUNCTIONS = {
    +265            **parser.Parser.FUNCTIONS,
    +266            "DATE_TRUNC": lambda args: exp.TimestampTrunc(
    +267                this=seq_get(args, 1), unit=seq_get(args, 0)
    +268            ),
    +269            "GENERATE_SERIES": _generate_series,
    +270            "NOW": exp.CurrentTimestamp.from_arg_list,
    +271            "TO_CHAR": format_time_lambda(exp.TimeToStr, "postgres"),
    +272            "TO_TIMESTAMP": _to_timestamp,
    +273            "UNNEST": exp.Explode.from_arg_list,
    +274        }
    +275
    +276        FUNCTION_PARSERS = {
    +277            **parser.Parser.FUNCTION_PARSERS,
    +278            "DATE_PART": lambda self: self._parse_date_part(),
    +279        }
    +280
    +281        BITWISE = {
    +282            **parser.Parser.BITWISE,
    +283            TokenType.HASH: exp.BitwiseXor,
     284        }
     285
    -286        RANGE_PARSERS = {
    -287            **parser.Parser.RANGE_PARSERS,
    -288            TokenType.DAMP: binary_range_parser(exp.ArrayOverlaps),
    -289            TokenType.AT_GT: binary_range_parser(exp.ArrayContains),
    -290            TokenType.LT_AT: binary_range_parser(exp.ArrayContained),
    -291        }
    -292
    -293        def _parse_factor(self) -> t.Optional[exp.Expression]:
    -294            return self._parse_tokens(self._parse_exponent, self.FACTOR)
    -295
    -296        def _parse_exponent(self) -> t.Optional[exp.Expression]:
    -297            return self._parse_tokens(self._parse_unary, self.EXPONENT)
    -298
    -299        def _parse_date_part(self) -> exp.Expression:
    -300            part = self._parse_type()
    -301            self._match(TokenType.COMMA)
    -302            value = self._parse_bitwise()
    -303
    -304            if part and part.is_string:
    -305                part = exp.Var(this=part.name)
    -306
    -307            return self.expression(exp.Extract, this=part, expression=value)
    -308
    -309    class Generator(generator.Generator):
    -310        SINGLE_STRING_INTERVAL = True
    -311        LOCKING_READS_SUPPORTED = True
    -312        JOIN_HINTS = False
    -313        TABLE_HINTS = False
    -314        PARAMETER_TOKEN = "$"
    -315
    -316        TYPE_MAPPING = {
    -317            **generator.Generator.TYPE_MAPPING,
    -318            exp.DataType.Type.TINYINT: "SMALLINT",
    -319            exp.DataType.Type.FLOAT: "REAL",
    -320            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
    -321            exp.DataType.Type.BINARY: "BYTEA",
    -322            exp.DataType.Type.VARBINARY: "BYTEA",
    -323            exp.DataType.Type.DATETIME: "TIMESTAMP",
    -324        }
    -325
    -326        TRANSFORMS = {
    -327            **generator.Generator.TRANSFORMS,
    -328            exp.BitwiseXor: lambda self, e: self.binary(e, "#"),
    -329            exp.ColumnDef: transforms.preprocess([_auto_increment_to_serial, _serial_to_generated]),
    -330            exp.JSONExtract: arrow_json_extract_sql,
    -331            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
    -332            exp.JSONBExtract: lambda self, e: self.binary(e, "#>"),
    -333            exp.JSONBExtractScalar: lambda self, e: self.binary(e, "#>>"),
    -334            exp.JSONBContains: lambda self, e: self.binary(e, "?"),
    -335            exp.Pow: lambda self, e: self.binary(e, "^"),
    -336            exp.CurrentDate: no_paren_current_date_sql,
    -337            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
    -338            exp.DateAdd: _date_add_sql("+"),
    -339            exp.DateStrToDate: datestrtodate_sql,
    -340            exp.DateSub: _date_add_sql("-"),
    -341            exp.DateDiff: _date_diff_sql,
    -342            exp.LogicalOr: rename_func("BOOL_OR"),
    -343            exp.LogicalAnd: rename_func("BOOL_AND"),
    -344            exp.Max: max_or_greatest,
    -345            exp.Min: min_or_least,
    -346            exp.ArrayOverlaps: lambda self, e: self.binary(e, "&&"),
    -347            exp.ArrayContains: lambda self, e: self.binary(e, "@>"),
    -348            exp.ArrayContained: lambda self, e: self.binary(e, "<@"),
    -349            exp.Merge: transforms.preprocess([transforms.remove_target_from_merge]),
    -350            exp.Pivot: no_pivot_sql,
    -351            exp.RegexpLike: lambda self, e: self.binary(e, "~"),
    -352            exp.RegexpILike: lambda self, e: self.binary(e, "~*"),
    -353            exp.StrPosition: str_position_sql,
    -354            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
    -355            exp.Substring: _substring_sql,
    -356            exp.TimestampTrunc: timestamptrunc_sql,
    -357            exp.TimeStrToTime: lambda self, e: f"CAST({self.sql(e, 'this')} AS TIMESTAMP)",
    -358            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
    -359            exp.TableSample: no_tablesample_sql,
    -360            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    -361            exp.Trim: trim_sql,
    -362            exp.TryCast: no_trycast_sql,
    -363            exp.UnixToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')})",
    -364            exp.DataType: _datatype_sql,
    -365            exp.GroupConcat: _string_agg_sql,
    -366            exp.Array: lambda self, e: f"{self.normalize_func('ARRAY')}({self.sql(e.expressions[0])})"
    -367            if isinstance(seq_get(e.expressions, 0), exp.Select)
    -368            else f"{self.normalize_func('ARRAY')}[{self.expressions(e, flat=True)}]",
    -369        }
    -370
    -371        PROPERTIES_LOCATION = {
    -372            **generator.Generator.PROPERTIES_LOCATION,
    -373            exp.TransientProperty: exp.Properties.Location.UNSUPPORTED,
    -374            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +286        EXPONENT = {
    +287            TokenType.CARET: exp.Pow,
    +288        }
    +289
    +290        RANGE_PARSERS = {
    +291            **parser.Parser.RANGE_PARSERS,
    +292            TokenType.DAMP: binary_range_parser(exp.ArrayOverlaps),
    +293            TokenType.AT_GT: binary_range_parser(exp.ArrayContains),
    +294            TokenType.LT_AT: binary_range_parser(exp.ArrayContained),
    +295        }
    +296
    +297        def _parse_factor(self) -> t.Optional[exp.Expression]:
    +298            return self._parse_tokens(self._parse_exponent, self.FACTOR)
    +299
    +300        def _parse_exponent(self) -> t.Optional[exp.Expression]:
    +301            return self._parse_tokens(self._parse_unary, self.EXPONENT)
    +302
    +303        def _parse_date_part(self) -> exp.Expression:
    +304            part = self._parse_type()
    +305            self._match(TokenType.COMMA)
    +306            value = self._parse_bitwise()
    +307
    +308            if part and part.is_string:
    +309                part = exp.var(part.name)
    +310
    +311            return self.expression(exp.Extract, this=part, expression=value)
    +312
    +313    class Generator(generator.Generator):
    +314        SINGLE_STRING_INTERVAL = True
    +315        LOCKING_READS_SUPPORTED = True
    +316        JOIN_HINTS = False
    +317        TABLE_HINTS = False
    +318        PARAMETER_TOKEN = "$"
    +319
    +320        TYPE_MAPPING = {
    +321            **generator.Generator.TYPE_MAPPING,
    +322            exp.DataType.Type.TINYINT: "SMALLINT",
    +323            exp.DataType.Type.FLOAT: "REAL",
    +324            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
    +325            exp.DataType.Type.BINARY: "BYTEA",
    +326            exp.DataType.Type.VARBINARY: "BYTEA",
    +327            exp.DataType.Type.DATETIME: "TIMESTAMP",
    +328        }
    +329
    +330        TRANSFORMS = {
    +331            **generator.Generator.TRANSFORMS,
    +332            exp.BitwiseXor: lambda self, e: self.binary(e, "#"),
    +333            exp.ColumnDef: transforms.preprocess([_auto_increment_to_serial, _serial_to_generated]),
    +334            exp.Explode: rename_func("UNNEST"),
    +335            exp.JSONExtract: arrow_json_extract_sql,
    +336            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
    +337            exp.JSONBExtract: lambda self, e: self.binary(e, "#>"),
    +338            exp.JSONBExtractScalar: lambda self, e: self.binary(e, "#>>"),
    +339            exp.JSONBContains: lambda self, e: self.binary(e, "?"),
    +340            exp.Pow: lambda self, e: self.binary(e, "^"),
    +341            exp.CurrentDate: no_paren_current_date_sql,
    +342            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
    +343            exp.DateAdd: _date_add_sql("+"),
    +344            exp.DateStrToDate: datestrtodate_sql,
    +345            exp.DateSub: _date_add_sql("-"),
    +346            exp.DateDiff: _date_diff_sql,
    +347            exp.LogicalOr: rename_func("BOOL_OR"),
    +348            exp.LogicalAnd: rename_func("BOOL_AND"),
    +349            exp.Max: max_or_greatest,
    +350            exp.Min: min_or_least,
    +351            exp.ArrayOverlaps: lambda self, e: self.binary(e, "&&"),
    +352            exp.ArrayContains: lambda self, e: self.binary(e, "@>"),
    +353            exp.ArrayContained: lambda self, e: self.binary(e, "<@"),
    +354            exp.Merge: transforms.preprocess([transforms.remove_target_from_merge]),
    +355            exp.Pivot: no_pivot_sql,
    +356            exp.RegexpLike: lambda self, e: self.binary(e, "~"),
    +357            exp.RegexpILike: lambda self, e: self.binary(e, "~*"),
    +358            exp.StrPosition: str_position_sql,
    +359            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
    +360            exp.Substring: _substring_sql,
    +361            exp.TimestampTrunc: timestamptrunc_sql,
    +362            exp.TimeStrToTime: timestrtotime_sql,
    +363            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
    +364            exp.TableSample: no_tablesample_sql,
    +365            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    +366            exp.Trim: trim_sql,
    +367            exp.TryCast: no_trycast_sql,
    +368            exp.TsOrDsToDate: ts_or_ds_to_date_sql("postgres"),
    +369            exp.UnixToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')})",
    +370            exp.DataType: _datatype_sql,
    +371            exp.GroupConcat: _string_agg_sql,
    +372            exp.Array: lambda self, e: f"{self.normalize_func('ARRAY')}({self.sql(e.expressions[0])})"
    +373            if isinstance(seq_get(e.expressions, 0), exp.Select)
    +374            else f"{self.normalize_func('ARRAY')}[{self.expressions(e, flat=True)}]",
     375        }
    +376
    +377        PROPERTIES_LOCATION = {
    +378            **generator.Generator.PROPERTIES_LOCATION,
    +379            exp.TransientProperty: exp.Properties.Location.UNSUPPORTED,
    +380            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +381        }
     
    @@ -692,46 +702,45 @@
    -
    217    class Tokenizer(tokens.Tokenizer):
    -218        QUOTES = ["'", "$$"]
    -219
    -220        BIT_STRINGS = [("b'", "'"), ("B'", "'")]
    -221        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
    -222        BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
    -223
    -224        KEYWORDS = {
    -225            **tokens.Tokenizer.KEYWORDS,
    -226            "~~": TokenType.LIKE,
    -227            "~~*": TokenType.ILIKE,
    -228            "~*": TokenType.IRLIKE,
    -229            "~": TokenType.RLIKE,
    -230            "@>": TokenType.AT_GT,
    -231            "<@": TokenType.LT_AT,
    -232            "BEGIN": TokenType.COMMAND,
    -233            "BEGIN TRANSACTION": TokenType.BEGIN,
    -234            "BIGSERIAL": TokenType.BIGSERIAL,
    -235            "CHARACTER VARYING": TokenType.VARCHAR,
    -236            "DECLARE": TokenType.COMMAND,
    -237            "DO": TokenType.COMMAND,
    -238            "HSTORE": TokenType.HSTORE,
    -239            "JSONB": TokenType.JSONB,
    -240            "REFRESH": TokenType.COMMAND,
    -241            "REINDEX": TokenType.COMMAND,
    -242            "RESET": TokenType.COMMAND,
    -243            "RETURNING": TokenType.RETURNING,
    -244            "REVOKE": TokenType.COMMAND,
    -245            "SERIAL": TokenType.SERIAL,
    -246            "SMALLSERIAL": TokenType.SMALLSERIAL,
    -247            "TEMP": TokenType.TEMPORARY,
    -248            "CSTRING": TokenType.PSEUDO_TYPE,
    -249        }
    -250
    -251        SINGLE_TOKENS = {
    -252            **tokens.Tokenizer.SINGLE_TOKENS,
    -253            "$": TokenType.PARAMETER,
    -254        }
    -255
    -256        VAR_SINGLE_TOKENS = {"$"}
    +            
    220    class Tokenizer(tokens.Tokenizer):
    +221        QUOTES = ["'", "$$"]
    +222
    +223        BIT_STRINGS = [("b'", "'"), ("B'", "'")]
    +224        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
    +225        BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
    +226
    +227        KEYWORDS = {
    +228            **tokens.Tokenizer.KEYWORDS,
    +229            "~~": TokenType.LIKE,
    +230            "~~*": TokenType.ILIKE,
    +231            "~*": TokenType.IRLIKE,
    +232            "~": TokenType.RLIKE,
    +233            "@>": TokenType.AT_GT,
    +234            "<@": TokenType.LT_AT,
    +235            "BEGIN": TokenType.COMMAND,
    +236            "BEGIN TRANSACTION": TokenType.BEGIN,
    +237            "BIGSERIAL": TokenType.BIGSERIAL,
    +238            "CHARACTER VARYING": TokenType.VARCHAR,
    +239            "DECLARE": TokenType.COMMAND,
    +240            "DO": TokenType.COMMAND,
    +241            "HSTORE": TokenType.HSTORE,
    +242            "JSONB": TokenType.JSONB,
    +243            "REFRESH": TokenType.COMMAND,
    +244            "REINDEX": TokenType.COMMAND,
    +245            "RESET": TokenType.COMMAND,
    +246            "REVOKE": TokenType.COMMAND,
    +247            "SERIAL": TokenType.SERIAL,
    +248            "SMALLSERIAL": TokenType.SMALLSERIAL,
    +249            "TEMP": TokenType.TEMPORARY,
    +250            "CSTRING": TokenType.PSEUDO_TYPE,
    +251        }
    +252
    +253        SINGLE_TOKENS = {
    +254            **tokens.Tokenizer.SINGLE_TOKENS,
    +255            "$": TokenType.PARAMETER,
    +256        }
    +257
    +258        VAR_SINGLE_TOKENS = {"$"}
     
    @@ -743,6 +752,7 @@ @@ -759,80 +769,74 @@
    -
    258    class Parser(parser.Parser):
    -259        STRICT_CAST = False
    -260
    -261        FUNCTIONS = {
    -262            **parser.Parser.FUNCTIONS,
    -263            "DATE_TRUNC": lambda args: exp.TimestampTrunc(
    -264                this=seq_get(args, 1), unit=seq_get(args, 0)
    -265            ),
    -266            "GENERATE_SERIES": _generate_series,
    -267            "NOW": exp.CurrentTimestamp.from_arg_list,
    -268            "TO_CHAR": format_time_lambda(exp.TimeToStr, "postgres"),
    -269            "TO_TIMESTAMP": _to_timestamp,
    -270        }
    -271
    -272        FUNCTION_PARSERS = {
    -273            **parser.Parser.FUNCTION_PARSERS,
    -274            "DATE_PART": lambda self: self._parse_date_part(),
    -275        }
    -276
    -277        BITWISE = {
    -278            **parser.Parser.BITWISE,
    -279            TokenType.HASH: exp.BitwiseXor,
    -280        }
    -281
    -282        EXPONENT = {
    -283            TokenType.CARET: exp.Pow,
    +            
    260    class Parser(parser.Parser):
    +261        STRICT_CAST = False
    +262        CONCAT_NULL_OUTPUTS_STRING = True
    +263
    +264        FUNCTIONS = {
    +265            **parser.Parser.FUNCTIONS,
    +266            "DATE_TRUNC": lambda args: exp.TimestampTrunc(
    +267                this=seq_get(args, 1), unit=seq_get(args, 0)
    +268            ),
    +269            "GENERATE_SERIES": _generate_series,
    +270            "NOW": exp.CurrentTimestamp.from_arg_list,
    +271            "TO_CHAR": format_time_lambda(exp.TimeToStr, "postgres"),
    +272            "TO_TIMESTAMP": _to_timestamp,
    +273            "UNNEST": exp.Explode.from_arg_list,
    +274        }
    +275
    +276        FUNCTION_PARSERS = {
    +277            **parser.Parser.FUNCTION_PARSERS,
    +278            "DATE_PART": lambda self: self._parse_date_part(),
    +279        }
    +280
    +281        BITWISE = {
    +282            **parser.Parser.BITWISE,
    +283            TokenType.HASH: exp.BitwiseXor,
     284        }
     285
    -286        RANGE_PARSERS = {
    -287            **parser.Parser.RANGE_PARSERS,
    -288            TokenType.DAMP: binary_range_parser(exp.ArrayOverlaps),
    -289            TokenType.AT_GT: binary_range_parser(exp.ArrayContains),
    -290            TokenType.LT_AT: binary_range_parser(exp.ArrayContained),
    -291        }
    -292
    -293        def _parse_factor(self) -> t.Optional[exp.Expression]:
    -294            return self._parse_tokens(self._parse_exponent, self.FACTOR)
    -295
    -296        def _parse_exponent(self) -> t.Optional[exp.Expression]:
    -297            return self._parse_tokens(self._parse_unary, self.EXPONENT)
    -298
    -299        def _parse_date_part(self) -> exp.Expression:
    -300            part = self._parse_type()
    -301            self._match(TokenType.COMMA)
    -302            value = self._parse_bitwise()
    -303
    -304            if part and part.is_string:
    -305                part = exp.Var(this=part.name)
    -306
    -307            return self.expression(exp.Extract, this=part, expression=value)
    +286        EXPONENT = {
    +287            TokenType.CARET: exp.Pow,
    +288        }
    +289
    +290        RANGE_PARSERS = {
    +291            **parser.Parser.RANGE_PARSERS,
    +292            TokenType.DAMP: binary_range_parser(exp.ArrayOverlaps),
    +293            TokenType.AT_GT: binary_range_parser(exp.ArrayContains),
    +294            TokenType.LT_AT: binary_range_parser(exp.ArrayContained),
    +295        }
    +296
    +297        def _parse_factor(self) -> t.Optional[exp.Expression]:
    +298            return self._parse_tokens(self._parse_exponent, self.FACTOR)
    +299
    +300        def _parse_exponent(self) -> t.Optional[exp.Expression]:
    +301            return self._parse_tokens(self._parse_unary, self.EXPONENT)
    +302
    +303        def _parse_date_part(self) -> exp.Expression:
    +304            part = self._parse_type()
    +305            self._match(TokenType.COMMA)
    +306            value = self._parse_bitwise()
    +307
    +308            if part and part.is_string:
    +309                part = exp.var(part.name)
    +310
    +311            return self.expression(exp.Extract, this=part, expression=value)
     
    -

    Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces -a parsed syntax tree.

    +

    Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

    Arguments:
      -
    • error_level: the desired error level. +
    • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
    • -
    • error_message_context: determines the amount of context to capture from a +
    • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). -Default: 50.
    • -
    • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. -Default: 0
    • -
    • alias_post_tablesample: If the table alias comes after tablesample. -Default: False
    • +Default: 100
    • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
    • -
    • null_ordering: Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    @@ -865,119 +869,106 @@ Default: "nulls_are_small"
    -
    309    class Generator(generator.Generator):
    -310        SINGLE_STRING_INTERVAL = True
    -311        LOCKING_READS_SUPPORTED = True
    -312        JOIN_HINTS = False
    -313        TABLE_HINTS = False
    -314        PARAMETER_TOKEN = "$"
    -315
    -316        TYPE_MAPPING = {
    -317            **generator.Generator.TYPE_MAPPING,
    -318            exp.DataType.Type.TINYINT: "SMALLINT",
    -319            exp.DataType.Type.FLOAT: "REAL",
    -320            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
    -321            exp.DataType.Type.BINARY: "BYTEA",
    -322            exp.DataType.Type.VARBINARY: "BYTEA",
    -323            exp.DataType.Type.DATETIME: "TIMESTAMP",
    -324        }
    -325
    -326        TRANSFORMS = {
    -327            **generator.Generator.TRANSFORMS,
    -328            exp.BitwiseXor: lambda self, e: self.binary(e, "#"),
    -329            exp.ColumnDef: transforms.preprocess([_auto_increment_to_serial, _serial_to_generated]),
    -330            exp.JSONExtract: arrow_json_extract_sql,
    -331            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
    -332            exp.JSONBExtract: lambda self, e: self.binary(e, "#>"),
    -333            exp.JSONBExtractScalar: lambda self, e: self.binary(e, "#>>"),
    -334            exp.JSONBContains: lambda self, e: self.binary(e, "?"),
    -335            exp.Pow: lambda self, e: self.binary(e, "^"),
    -336            exp.CurrentDate: no_paren_current_date_sql,
    -337            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
    -338            exp.DateAdd: _date_add_sql("+"),
    -339            exp.DateStrToDate: datestrtodate_sql,
    -340            exp.DateSub: _date_add_sql("-"),
    -341            exp.DateDiff: _date_diff_sql,
    -342            exp.LogicalOr: rename_func("BOOL_OR"),
    -343            exp.LogicalAnd: rename_func("BOOL_AND"),
    -344            exp.Max: max_or_greatest,
    -345            exp.Min: min_or_least,
    -346            exp.ArrayOverlaps: lambda self, e: self.binary(e, "&&"),
    -347            exp.ArrayContains: lambda self, e: self.binary(e, "@>"),
    -348            exp.ArrayContained: lambda self, e: self.binary(e, "<@"),
    -349            exp.Merge: transforms.preprocess([transforms.remove_target_from_merge]),
    -350            exp.Pivot: no_pivot_sql,
    -351            exp.RegexpLike: lambda self, e: self.binary(e, "~"),
    -352            exp.RegexpILike: lambda self, e: self.binary(e, "~*"),
    -353            exp.StrPosition: str_position_sql,
    -354            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
    -355            exp.Substring: _substring_sql,
    -356            exp.TimestampTrunc: timestamptrunc_sql,
    -357            exp.TimeStrToTime: lambda self, e: f"CAST({self.sql(e, 'this')} AS TIMESTAMP)",
    -358            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
    -359            exp.TableSample: no_tablesample_sql,
    -360            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    -361            exp.Trim: trim_sql,
    -362            exp.TryCast: no_trycast_sql,
    -363            exp.UnixToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')})",
    -364            exp.DataType: _datatype_sql,
    -365            exp.GroupConcat: _string_agg_sql,
    -366            exp.Array: lambda self, e: f"{self.normalize_func('ARRAY')}({self.sql(e.expressions[0])})"
    -367            if isinstance(seq_get(e.expressions, 0), exp.Select)
    -368            else f"{self.normalize_func('ARRAY')}[{self.expressions(e, flat=True)}]",
    -369        }
    -370
    -371        PROPERTIES_LOCATION = {
    -372            **generator.Generator.PROPERTIES_LOCATION,
    -373            exp.TransientProperty: exp.Properties.Location.UNSUPPORTED,
    -374            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +            
    313    class Generator(generator.Generator):
    +314        SINGLE_STRING_INTERVAL = True
    +315        LOCKING_READS_SUPPORTED = True
    +316        JOIN_HINTS = False
    +317        TABLE_HINTS = False
    +318        PARAMETER_TOKEN = "$"
    +319
    +320        TYPE_MAPPING = {
    +321            **generator.Generator.TYPE_MAPPING,
    +322            exp.DataType.Type.TINYINT: "SMALLINT",
    +323            exp.DataType.Type.FLOAT: "REAL",
    +324            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
    +325            exp.DataType.Type.BINARY: "BYTEA",
    +326            exp.DataType.Type.VARBINARY: "BYTEA",
    +327            exp.DataType.Type.DATETIME: "TIMESTAMP",
    +328        }
    +329
    +330        TRANSFORMS = {
    +331            **generator.Generator.TRANSFORMS,
    +332            exp.BitwiseXor: lambda self, e: self.binary(e, "#"),
    +333            exp.ColumnDef: transforms.preprocess([_auto_increment_to_serial, _serial_to_generated]),
    +334            exp.Explode: rename_func("UNNEST"),
    +335            exp.JSONExtract: arrow_json_extract_sql,
    +336            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
    +337            exp.JSONBExtract: lambda self, e: self.binary(e, "#>"),
    +338            exp.JSONBExtractScalar: lambda self, e: self.binary(e, "#>>"),
    +339            exp.JSONBContains: lambda self, e: self.binary(e, "?"),
    +340            exp.Pow: lambda self, e: self.binary(e, "^"),
    +341            exp.CurrentDate: no_paren_current_date_sql,
    +342            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
    +343            exp.DateAdd: _date_add_sql("+"),
    +344            exp.DateStrToDate: datestrtodate_sql,
    +345            exp.DateSub: _date_add_sql("-"),
    +346            exp.DateDiff: _date_diff_sql,
    +347            exp.LogicalOr: rename_func("BOOL_OR"),
    +348            exp.LogicalAnd: rename_func("BOOL_AND"),
    +349            exp.Max: max_or_greatest,
    +350            exp.Min: min_or_least,
    +351            exp.ArrayOverlaps: lambda self, e: self.binary(e, "&&"),
    +352            exp.ArrayContains: lambda self, e: self.binary(e, "@>"),
    +353            exp.ArrayContained: lambda self, e: self.binary(e, "<@"),
    +354            exp.Merge: transforms.preprocess([transforms.remove_target_from_merge]),
    +355            exp.Pivot: no_pivot_sql,
    +356            exp.RegexpLike: lambda self, e: self.binary(e, "~"),
    +357            exp.RegexpILike: lambda self, e: self.binary(e, "~*"),
    +358            exp.StrPosition: str_position_sql,
    +359            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
    +360            exp.Substring: _substring_sql,
    +361            exp.TimestampTrunc: timestamptrunc_sql,
    +362            exp.TimeStrToTime: timestrtotime_sql,
    +363            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
    +364            exp.TableSample: no_tablesample_sql,
    +365            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    +366            exp.Trim: trim_sql,
    +367            exp.TryCast: no_trycast_sql,
    +368            exp.TsOrDsToDate: ts_or_ds_to_date_sql("postgres"),
    +369            exp.UnixToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')})",
    +370            exp.DataType: _datatype_sql,
    +371            exp.GroupConcat: _string_agg_sql,
    +372            exp.Array: lambda self, e: f"{self.normalize_func('ARRAY')}({self.sql(e.expressions[0])})"
    +373            if isinstance(seq_get(e.expressions, 0), exp.Select)
    +374            else f"{self.normalize_func('ARRAY')}[{self.expressions(e, flat=True)}]",
     375        }
    +376
    +377        PROPERTIES_LOCATION = {
    +378            **generator.Generator.PROPERTIES_LOCATION,
    +379            exp.TransientProperty: exp.Properties.Location.UNSUPPORTED,
    +380            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +381        }
     
    -

    Generator interprets the given syntax tree and produces a SQL string as an output.

    +

    Generator converts a given syntax tree to the corresponding SQL string.

    Arguments:
      -
    • time_mapping (dict): the dictionary of custom time mappings in which the key -represents a python time format and the output the target time format
    • -
    • time_trie (trie): a trie of the time_mapping keys
    • -
    • pretty (bool): if set to True the returned string will be formatted. Default: False.
    • -
    • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
    • -
    • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
    • -
    • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
    • -
    • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
    • -
    • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
    • -
    • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
    • -
    • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
    • -
    • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
    • -
    • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
    • -
    • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
    • -
    • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
    • -
    • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
    • -
    • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
    • -
    • normalize (bool): if set to True all identifiers will lower cased
    • -
    • string_escape (str): specifies a string escape character. Default: '.
    • -
    • identifier_escape (str): specifies an identifier escape character. Default: ".
    • -
    • pad (int): determines padding in a formatted string. Default: 2.
    • -
    • indent (int): determines the size of indentation in a formatted string. Default: 4.
    • -
    • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
    • -
    • normalize_functions (str): normalize function names, "upper", "lower", or None -Default: "upper"
    • -
    • alias_post_tablesample (bool): if the table alias comes after tablesample -Default: False
    • -
    • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit -Default: False
    • -
    • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters -unsupported expressions. Default ErrorLevel.WARN.
    • -
    • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    • -
    • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +
    • pretty: Whether or not to format the produced SQL string. +Default: False.
    • +
    • identify: Determines when an identifier should be quoted. Possible values are: +False (default): Never quote, except in cases where it's mandatory by the dialect. +True or 'always': Always quote. +'safe': Only quote identifiers that are case insensitive.
    • +
    • normalize: Whether or not to normalize identifiers to lowercase. +Default: False.
    • +
    • pad: Determines the pad size in a formatted string. +Default: 2.
    • +
    • indent: Determines the indentation size in a formatted string. +Default: 2.
    • +
    • normalize_functions: Whether or not to normalize all function names. Possible values are: +"upper" or True (default): Convert names to uppercase. +"lower": Convert names to lowercase. +False: Disables function name normalization.
    • +
    • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. +Default ErrorLevel.WARN.
    • +
    • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
    • -
    • leading_comma (bool): if the the comma is leading or trailing in select statements +
    • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. +This is only relevant when generating in pretty mode. Default: False
    • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true @@ -1018,6 +1009,7 @@ Default: True
    • notnullcolumnconstraint_sql
      primarykeycolumnconstraint_sql
      uniquecolumnconstraint_sql
      +
      createable_sql
      create_sql
      clone_sql
      describe_sql
      @@ -1100,10 +1092,12 @@ Default: True
      ordered_sql
      matchrecognize_sql
      query_modifiers
      +
      offset_limit_modifiers
      after_having_modifiers
      after_limit_modifiers
      select_sql
      schema_sql
      +
      schema_columns_sql
      star_sql
      parameter_sql
      sessionparameter_sql
      @@ -1128,7 +1122,7 @@ Default: True
      nextvaluefor_sql
      extract_sql
      trim_sql
      -
      concat_sql
      +
      safeconcat_sql
      check_sql
      foreignkey_sql
      primarykey_sql
      @@ -1179,6 +1173,7 @@ Default: True
      respectnulls_sql
      intdiv_sql
      dpipe_sql
      +
      safedpipe_sql
      div_sql
      overlaps_sql
      distance_sql
      @@ -1227,6 +1222,7 @@ Default: True
      dictproperty_sql
      dictrange_sql
      dictsubproperty_sql
      +
      oncluster_sql
    diff --git a/docs/sqlglot/dialects/presto.html b/docs/sqlglot/dialects/presto.html index 9a29740..d48e50f 100644 --- a/docs/sqlglot/dialects/presto.html +++ b/docs/sqlglot/dialects/presto.html @@ -57,6 +57,9 @@
  • generateseries_sql
  • +
  • + offset_limit_modifiers +
  • @@ -190,7 +193,7 @@ 102 103def _ts_or_ds_to_date_sql(self: generator.Generator, expression: exp.TsOrDsToDate) -> str: 104 time_format = self.format_time(expression) -105 if time_format and time_format not in (Presto.time_format, Presto.date_format): +105 if time_format and time_format not in (Presto.TIME_FORMAT, Presto.DATE_FORMAT): 106 return f"CAST({_str_to_time_sql(self, expression)} AS DATE)" 107 return f"CAST(SUBSTR(CAST({self.sql(expression, 'this')} AS VARCHAR), 1, 10) AS DATE)" 108 @@ -207,7 +210,7 @@ 119 exp.Literal.number(1), 120 exp.Literal.number(10), 121 ), -122 Presto.date_format, +122 Presto.DATE_FORMAT, 123 ) 124 125 return self.func( @@ -233,232 +236,227 @@ 145 ) 146 if len(args) == 3: 147 return exp.ApproxQuantile( -148 this=seq_get(args, 0), -149 quantile=seq_get(args, 1), -150 accuracy=seq_get(args, 2), -151 ) -152 return exp.ApproxQuantile.from_arg_list(args) -153 -154 -155def _from_unixtime(args: t.List) -> exp.Expression: -156 if len(args) == 3: -157 return exp.UnixToTime( -158 this=seq_get(args, 0), -159 hours=seq_get(args, 1), -160 minutes=seq_get(args, 2), -161 ) -162 if len(args) == 2: -163 return exp.UnixToTime( -164 this=seq_get(args, 0), -165 zone=seq_get(args, 1), -166 ) -167 return exp.UnixToTime.from_arg_list(args) -168 -169 -170def _unnest_sequence(expression: exp.Expression) -> exp.Expression: -171 if isinstance(expression, exp.Table): -172 if isinstance(expression.this, exp.GenerateSeries): -173 unnest = exp.Unnest(expressions=[expression.this]) -174 -175 if expression.alias: -176 return exp.alias_( -177 unnest, -178 alias="_u", -179 table=[expression.alias], -180 copy=False, -181 ) -182 return unnest -183 return expression -184 -185 -186class Presto(Dialect): -187 index_offset = 1 -188 null_ordering = "nulls_are_last" -189 time_format = MySQL.time_format -190 time_mapping = MySQL.time_mapping +148 this=seq_get(args, 0), quantile=seq_get(args, 1), accuracy=seq_get(args, 2) +149 ) +150 return exp.ApproxQuantile.from_arg_list(args) +151 +152 +153def _from_unixtime(args: t.List) -> exp.Expression: +154 if len(args) == 3: +155 return exp.UnixToTime( +156 this=seq_get(args, 0), +157 hours=seq_get(args, 1), +158 minutes=seq_get(args, 2), +159 ) +160 if len(args) == 2: +161 return exp.UnixToTime(this=seq_get(args, 0), zone=seq_get(args, 1)) +162 +163 return exp.UnixToTime.from_arg_list(args) +164 +165 +166def _unnest_sequence(expression: exp.Expression) -> exp.Expression: +167 if isinstance(expression, exp.Table): +168 if isinstance(expression.this, exp.GenerateSeries): +169 unnest = exp.Unnest(expressions=[expression.this]) +170 +171 if expression.alias: +172 return exp.alias_(unnest, alias="_u", table=[expression.alias], copy=False) +173 return unnest +174 return expression +175 +176 +177class Presto(Dialect): +178 INDEX_OFFSET = 1 +179 NULL_ORDERING = "nulls_are_last" +180 TIME_FORMAT = MySQL.TIME_FORMAT +181 TIME_MAPPING = MySQL.TIME_MAPPING +182 STRICT_STRING_CONCAT = True +183 +184 class Tokenizer(tokens.Tokenizer): +185 KEYWORDS = { +186 **tokens.Tokenizer.KEYWORDS, +187 "START": TokenType.BEGIN, +188 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE, +189 "ROW": TokenType.STRUCT, +190 } 191 -192 class Tokenizer(tokens.Tokenizer): -193 KEYWORDS = { -194 **tokens.Tokenizer.KEYWORDS, -195 "START": TokenType.BEGIN, -196 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE, -197 "ROW": TokenType.STRUCT, -198 } -199 -200 class Parser(parser.Parser): -201 FUNCTIONS = { -202 **parser.Parser.FUNCTIONS, -203 "APPROX_DISTINCT": exp.ApproxDistinct.from_arg_list, -204 "APPROX_PERCENTILE": _approx_percentile, -205 "CARDINALITY": exp.ArraySize.from_arg_list, -206 "CONTAINS": exp.ArrayContains.from_arg_list, -207 "DATE_ADD": lambda args: exp.DateAdd( -208 this=seq_get(args, 2), -209 expression=seq_get(args, 1), -210 unit=seq_get(args, 0), -211 ), -212 "DATE_DIFF": lambda args: exp.DateDiff( -213 this=seq_get(args, 2), -214 expression=seq_get(args, 1), -215 unit=seq_get(args, 0), -216 ), -217 "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "presto"), -218 "DATE_PARSE": format_time_lambda(exp.StrToTime, "presto"), -219 "DATE_TRUNC": date_trunc_to_time, -220 "FROM_HEX": exp.Unhex.from_arg_list, -221 "FROM_UNIXTIME": _from_unixtime, -222 "FROM_UTF8": lambda args: exp.Decode( -223 this=seq_get(args, 0), replace=seq_get(args, 1), charset=exp.Literal.string("utf-8") -224 ), -225 "NOW": exp.CurrentTimestamp.from_arg_list, -226 "SEQUENCE": exp.GenerateSeries.from_arg_list, -227 "STRPOS": lambda args: exp.StrPosition( -228 this=seq_get(args, 0), -229 substr=seq_get(args, 1), -230 instance=seq_get(args, 2), -231 ), -232 "TO_UNIXTIME": exp.TimeToUnix.from_arg_list, -233 "TO_HEX": exp.Hex.from_arg_list, -234 "TO_UTF8": lambda args: exp.Encode( -235 this=seq_get(args, 0), charset=exp.Literal.string("utf-8") -236 ), -237 } -238 FUNCTION_PARSERS = parser.Parser.FUNCTION_PARSERS.copy() -239 FUNCTION_PARSERS.pop("TRIM") -240 -241 class Generator(generator.Generator): -242 INTERVAL_ALLOWS_PLURAL_FORM = False -243 JOIN_HINTS = False -244 TABLE_HINTS = False -245 STRUCT_DELIMITER = ("(", ")") -246 -247 PROPERTIES_LOCATION = { -248 **generator.Generator.PROPERTIES_LOCATION, -249 exp.LocationProperty: exp.Properties.Location.UNSUPPORTED, -250 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, -251 } -252 -253 TYPE_MAPPING = { -254 **generator.Generator.TYPE_MAPPING, -255 exp.DataType.Type.INT: "INTEGER", -256 exp.DataType.Type.FLOAT: "REAL", -257 exp.DataType.Type.BINARY: "VARBINARY", -258 exp.DataType.Type.TEXT: "VARCHAR", -259 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", -260 exp.DataType.Type.STRUCT: "ROW", -261 } -262 -263 TRANSFORMS = { -264 **generator.Generator.TRANSFORMS, -265 exp.ApproxDistinct: _approx_distinct_sql, -266 exp.ApproxQuantile: rename_func("APPROX_PERCENTILE"), -267 exp.Array: lambda self, e: f"ARRAY[{self.expressions(e, flat=True)}]", -268 exp.ArrayConcat: rename_func("CONCAT"), -269 exp.ArrayContains: rename_func("CONTAINS"), -270 exp.ArraySize: rename_func("CARDINALITY"), -271 exp.BitwiseAnd: lambda self, e: f"BITWISE_AND({self.sql(e, 'this')}, {self.sql(e, 'expression')})", -272 exp.BitwiseLeftShift: lambda self, e: f"BITWISE_ARITHMETIC_SHIFT_LEFT({self.sql(e, 'this')}, {self.sql(e, 'expression')})", -273 exp.BitwiseNot: lambda self, e: f"BITWISE_NOT({self.sql(e, 'this')})", -274 exp.BitwiseOr: lambda self, e: f"BITWISE_OR({self.sql(e, 'this')}, {self.sql(e, 'expression')})", -275 exp.BitwiseRightShift: lambda self, e: f"BITWISE_ARITHMETIC_SHIFT_RIGHT({self.sql(e, 'this')}, {self.sql(e, 'expression')})", -276 exp.BitwiseXor: lambda self, e: f"BITWISE_XOR({self.sql(e, 'this')}, {self.sql(e, 'expression')})", -277 exp.Cast: transforms.preprocess([transforms.epoch_cast_to_ts]), -278 exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP", -279 exp.DataType: _datatype_sql, -280 exp.DateAdd: lambda self, e: self.func( -281 "DATE_ADD", exp.Literal.string(e.text("unit") or "day"), e.expression, e.this -282 ), -283 exp.DateDiff: lambda self, e: self.func( -284 "DATE_DIFF", exp.Literal.string(e.text("unit") or "day"), e.expression, e.this -285 ), -286 exp.DateStrToDate: lambda self, e: f"CAST(DATE_PARSE({self.sql(e, 'this')}, {Presto.date_format}) AS DATE)", -287 exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Presto.dateint_format}) AS INT)", -288 exp.Decode: _decode_sql, -289 exp.DiToDate: lambda self, e: f"CAST(DATE_PARSE(CAST({self.sql(e, 'this')} AS VARCHAR), {Presto.dateint_format}) AS DATE)", -290 exp.Encode: _encode_sql, -291 exp.FileFormatProperty: lambda self, e: f"FORMAT='{e.name.upper()}'", -292 exp.Group: transforms.preprocess([transforms.unalias_group]), -293 exp.Hex: rename_func("TO_HEX"), -294 exp.If: if_sql, -295 exp.ILike: no_ilike_sql, -296 exp.Initcap: _initcap_sql, -297 exp.Lateral: _explode_to_unnest_sql, -298 exp.Left: left_to_substring_sql, -299 exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"), -300 exp.LogicalAnd: rename_func("BOOL_AND"), -301 exp.LogicalOr: rename_func("BOOL_OR"), -302 exp.Pivot: no_pivot_sql, -303 exp.Quantile: _quantile_sql, -304 exp.Right: right_to_substring_sql, -305 exp.SafeDivide: no_safe_divide_sql, -306 exp.Schema: _schema_sql, -307 exp.Select: transforms.preprocess( -308 [ -309 transforms.eliminate_qualify, -310 transforms.eliminate_distinct_on, -311 transforms.explode_to_unnest, -312 ] -313 ), -314 exp.SortArray: _no_sort_array, -315 exp.StrPosition: rename_func("STRPOS"), -316 exp.StrToDate: lambda self, e: f"CAST({_str_to_time_sql(self, e)} AS DATE)", -317 exp.StrToTime: _str_to_time_sql, -318 exp.StrToUnix: lambda self, e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {self.format_time(e)}))", -319 exp.StructExtract: struct_extract_sql, -320 exp.Table: transforms.preprocess([_unnest_sequence]), -321 exp.TimestampTrunc: timestamptrunc_sql, -322 exp.TimeStrToDate: timestrtotime_sql, -323 exp.TimeStrToTime: timestrtotime_sql, -324 exp.TimeStrToUnix: lambda self, e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {Presto.time_format}))", -325 exp.TimeToStr: lambda self, e: f"DATE_FORMAT({self.sql(e, 'this')}, {self.format_time(e)})", -326 exp.TimeToUnix: rename_func("TO_UNIXTIME"), -327 exp.TryCast: transforms.preprocess([transforms.epoch_cast_to_ts]), -328 exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)", -329 exp.TsOrDsAdd: _ts_or_ds_add_sql, -330 exp.TsOrDsToDate: _ts_or_ds_to_date_sql, -331 exp.Unhex: rename_func("FROM_HEX"), -332 exp.UnixToStr: lambda self, e: f"DATE_FORMAT(FROM_UNIXTIME({self.sql(e, 'this')}), {self.format_time(e)})", -333 exp.UnixToTime: rename_func("FROM_UNIXTIME"), -334 exp.UnixToTimeStr: lambda self, e: f"CAST(FROM_UNIXTIME({self.sql(e, 'this')}) AS VARCHAR)", -335 exp.VariancePop: rename_func("VAR_POP"), -336 exp.With: transforms.preprocess([transforms.add_recursive_cte_column_names]), -337 exp.WithinGroup: transforms.preprocess( -338 [transforms.remove_within_group_for_percentiles] -339 ), -340 } -341 -342 def interval_sql(self, expression: exp.Interval) -> str: -343 unit = self.sql(expression, "unit") -344 if expression.this and unit.lower().startswith("week"): -345 return f"({expression.this.name} * INTERVAL '7' day)" -346 return super().interval_sql(expression) -347 -348 def transaction_sql(self, expression: exp.Transaction) -> str: -349 modes = expression.args.get("modes") -350 modes = f" {', '.join(modes)}" if modes else "" -351 return f"START TRANSACTION{modes}" -352 -353 def generateseries_sql(self, expression: exp.GenerateSeries) -> str: -354 start = expression.args["start"] -355 end = expression.args["end"] -356 step = expression.args.get("step") -357 -358 if isinstance(start, exp.Cast): -359 target_type = start.to -360 elif isinstance(end, exp.Cast): -361 target_type = end.to -362 else: -363 target_type = None -364 -365 if target_type and target_type.is_type("timestamp"): -366 to = target_type.copy() -367 -368 if target_type is start.to: -369 end = exp.Cast(this=end, to=to) -370 else: -371 start = exp.Cast(this=start, to=to) -372 -373 return self.func("SEQUENCE", start, end, step) +192 class Parser(parser.Parser): +193 FUNCTIONS = { +194 **parser.Parser.FUNCTIONS, +195 "APPROX_DISTINCT": exp.ApproxDistinct.from_arg_list, +196 "APPROX_PERCENTILE": _approx_percentile, +197 "CARDINALITY": exp.ArraySize.from_arg_list, +198 "CONTAINS": exp.ArrayContains.from_arg_list, +199 "DATE_ADD": lambda args: exp.DateAdd( +200 this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0) +201 ), +202 "DATE_DIFF": lambda args: exp.DateDiff( +203 this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0) +204 ), +205 "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "presto"), +206 "DATE_PARSE": format_time_lambda(exp.StrToTime, "presto"), +207 "DATE_TRUNC": date_trunc_to_time, +208 "FROM_HEX": exp.Unhex.from_arg_list, +209 "FROM_UNIXTIME": _from_unixtime, +210 "FROM_UTF8": lambda args: exp.Decode( +211 this=seq_get(args, 0), replace=seq_get(args, 1), charset=exp.Literal.string("utf-8") +212 ), +213 "NOW": exp.CurrentTimestamp.from_arg_list, +214 "SEQUENCE": exp.GenerateSeries.from_arg_list, +215 "STRPOS": lambda args: exp.StrPosition( +216 this=seq_get(args, 0), substr=seq_get(args, 1), instance=seq_get(args, 2) +217 ), +218 "TO_UNIXTIME": exp.TimeToUnix.from_arg_list, +219 "TO_HEX": exp.Hex.from_arg_list, +220 "TO_UTF8": lambda args: exp.Encode( +221 this=seq_get(args, 0), charset=exp.Literal.string("utf-8") +222 ), +223 } +224 FUNCTION_PARSERS = parser.Parser.FUNCTION_PARSERS.copy() +225 FUNCTION_PARSERS.pop("TRIM") +226 +227 class Generator(generator.Generator): +228 INTERVAL_ALLOWS_PLURAL_FORM = False +229 JOIN_HINTS = False +230 TABLE_HINTS = False +231 IS_BOOL_ALLOWED = False +232 STRUCT_DELIMITER = ("(", ")") +233 +234 PROPERTIES_LOCATION = { +235 **generator.Generator.PROPERTIES_LOCATION, +236 exp.LocationProperty: exp.Properties.Location.UNSUPPORTED, +237 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, +238 } +239 +240 TYPE_MAPPING = { +241 **generator.Generator.TYPE_MAPPING, +242 exp.DataType.Type.INT: "INTEGER", +243 exp.DataType.Type.FLOAT: "REAL", +244 exp.DataType.Type.BINARY: "VARBINARY", +245 exp.DataType.Type.TEXT: "VARCHAR", +246 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", +247 exp.DataType.Type.STRUCT: "ROW", +248 } +249 +250 TRANSFORMS = { +251 **generator.Generator.TRANSFORMS, +252 exp.ApproxDistinct: _approx_distinct_sql, +253 exp.ApproxQuantile: rename_func("APPROX_PERCENTILE"), +254 exp.Array: lambda self, e: f"ARRAY[{self.expressions(e, flat=True)}]", +255 exp.ArrayConcat: rename_func("CONCAT"), +256 exp.ArrayContains: rename_func("CONTAINS"), +257 exp.ArraySize: rename_func("CARDINALITY"), +258 exp.BitwiseAnd: lambda self, e: f"BITWISE_AND({self.sql(e, 'this')}, {self.sql(e, 'expression')})", +259 exp.BitwiseLeftShift: lambda self, e: f"BITWISE_ARITHMETIC_SHIFT_LEFT({self.sql(e, 'this')}, {self.sql(e, 'expression')})", +260 exp.BitwiseNot: lambda self, e: f"BITWISE_NOT({self.sql(e, 'this')})", +261 exp.BitwiseOr: lambda self, e: f"BITWISE_OR({self.sql(e, 'this')}, {self.sql(e, 'expression')})", +262 exp.BitwiseRightShift: lambda self, e: f"BITWISE_ARITHMETIC_SHIFT_RIGHT({self.sql(e, 'this')}, {self.sql(e, 'expression')})", +263 exp.BitwiseXor: lambda self, e: f"BITWISE_XOR({self.sql(e, 'this')}, {self.sql(e, 'expression')})", +264 exp.Cast: transforms.preprocess([transforms.epoch_cast_to_ts]), +265 exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP", +266 exp.DataType: _datatype_sql, +267 exp.DateAdd: lambda self, e: self.func( +268 "DATE_ADD", exp.Literal.string(e.text("unit") or "day"), e.expression, e.this +269 ), +270 exp.DateDiff: lambda self, e: self.func( +271 "DATE_DIFF", exp.Literal.string(e.text("unit") or "day"), e.expression, e.this +272 ), +273 exp.DateStrToDate: lambda self, e: f"CAST(DATE_PARSE({self.sql(e, 'this')}, {Presto.DATE_FORMAT}) AS DATE)", +274 exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Presto.DATEINT_FORMAT}) AS INT)", +275 exp.Decode: _decode_sql, +276 exp.DiToDate: lambda self, e: f"CAST(DATE_PARSE(CAST({self.sql(e, 'this')} AS VARCHAR), {Presto.DATEINT_FORMAT}) AS DATE)", +277 exp.Encode: _encode_sql, +278 exp.FileFormatProperty: lambda self, e: f"FORMAT='{e.name.upper()}'", +279 exp.Group: transforms.preprocess([transforms.unalias_group]), +280 exp.Hex: rename_func("TO_HEX"), +281 exp.If: if_sql, +282 exp.ILike: no_ilike_sql, +283 exp.Initcap: _initcap_sql, +284 exp.Lateral: _explode_to_unnest_sql, +285 exp.Left: left_to_substring_sql, +286 exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"), +287 exp.LogicalAnd: rename_func("BOOL_AND"), +288 exp.LogicalOr: rename_func("BOOL_OR"), +289 exp.Pivot: no_pivot_sql, +290 exp.Quantile: _quantile_sql, +291 exp.Right: right_to_substring_sql, +292 exp.SafeDivide: no_safe_divide_sql, +293 exp.Schema: _schema_sql, +294 exp.Select: transforms.preprocess( +295 [ +296 transforms.eliminate_qualify, +297 transforms.eliminate_distinct_on, +298 transforms.explode_to_unnest, +299 ] +300 ), +301 exp.SortArray: _no_sort_array, +302 exp.StrPosition: rename_func("STRPOS"), +303 exp.StrToDate: lambda self, e: f"CAST({_str_to_time_sql(self, e)} AS DATE)", +304 exp.StrToTime: _str_to_time_sql, +305 exp.StrToUnix: lambda self, e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {self.format_time(e)}))", +306 exp.StructExtract: struct_extract_sql, +307 exp.Table: transforms.preprocess([_unnest_sequence]), +308 exp.TimestampTrunc: timestamptrunc_sql, +309 exp.TimeStrToDate: timestrtotime_sql, +310 exp.TimeStrToTime: timestrtotime_sql, +311 exp.TimeStrToUnix: lambda self, e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {Presto.TIME_FORMAT}))", +312 exp.TimeToStr: lambda self, e: f"DATE_FORMAT({self.sql(e, 'this')}, {self.format_time(e)})", +313 exp.TimeToUnix: rename_func("TO_UNIXTIME"), +314 exp.TryCast: transforms.preprocess([transforms.epoch_cast_to_ts]), +315 exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)", +316 exp.TsOrDsAdd: _ts_or_ds_add_sql, +317 exp.TsOrDsToDate: _ts_or_ds_to_date_sql, +318 exp.Unhex: rename_func("FROM_HEX"), +319 exp.UnixToStr: lambda self, e: f"DATE_FORMAT(FROM_UNIXTIME({self.sql(e, 'this')}), {self.format_time(e)})", +320 exp.UnixToTime: rename_func("FROM_UNIXTIME"), +321 exp.UnixToTimeStr: lambda self, e: f"CAST(FROM_UNIXTIME({self.sql(e, 'this')}) AS VARCHAR)", +322 exp.VariancePop: rename_func("VAR_POP"), +323 exp.With: transforms.preprocess([transforms.add_recursive_cte_column_names]), +324 exp.WithinGroup: transforms.preprocess( +325 [transforms.remove_within_group_for_percentiles] +326 ), +327 } +328 +329 def interval_sql(self, expression: exp.Interval) -> str: +330 unit = self.sql(expression, "unit") +331 if expression.this and unit.lower().startswith("week"): +332 return f"({expression.this.name} * INTERVAL '7' day)" +333 return super().interval_sql(expression) +334 +335 def transaction_sql(self, expression: exp.Transaction) -> str: +336 modes = expression.args.get("modes") +337 modes = f" {', '.join(modes)}" if modes else "" +338 return f"START TRANSACTION{modes}" +339 +340 def generateseries_sql(self, expression: exp.GenerateSeries) -> str: +341 start = expression.args["start"] +342 end = expression.args["end"] +343 step = expression.args.get("step") +344 +345 if isinstance(start, exp.Cast): +346 target_type = start.to +347 elif isinstance(end, exp.Cast): +348 target_type = end.to +349 else: +350 target_type = None +351 +352 if target_type and target_type.is_type("timestamp"): +353 to = target_type.copy() +354 +355 if target_type is start.to: +356 end = exp.cast(end, to) +357 else: +358 start = exp.cast(start, to) +359 +360 return self.func("SEQUENCE", start, end, step) +361 +362 def offset_limit_modifiers( +363 self, expression: exp.Expression, fetch: bool, limit: t.Optional[exp.Fetch | exp.Limit] +364 ) -> t.List[str]: +365 return [ +366 self.sql(expression, "offset"), +367 self.sql(limit), +368 ]
    @@ -474,194 +472,198 @@
    -
    187class Presto(Dialect):
    -188    index_offset = 1
    -189    null_ordering = "nulls_are_last"
    -190    time_format = MySQL.time_format
    -191    time_mapping = MySQL.time_mapping
    +            
    178class Presto(Dialect):
    +179    INDEX_OFFSET = 1
    +180    NULL_ORDERING = "nulls_are_last"
    +181    TIME_FORMAT = MySQL.TIME_FORMAT
    +182    TIME_MAPPING = MySQL.TIME_MAPPING
    +183    STRICT_STRING_CONCAT = True
    +184
    +185    class Tokenizer(tokens.Tokenizer):
    +186        KEYWORDS = {
    +187            **tokens.Tokenizer.KEYWORDS,
    +188            "START": TokenType.BEGIN,
    +189            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
    +190            "ROW": TokenType.STRUCT,
    +191        }
     192
    -193    class Tokenizer(tokens.Tokenizer):
    -194        KEYWORDS = {
    -195            **tokens.Tokenizer.KEYWORDS,
    -196            "START": TokenType.BEGIN,
    -197            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
    -198            "ROW": TokenType.STRUCT,
    -199        }
    -200
    -201    class Parser(parser.Parser):
    -202        FUNCTIONS = {
    -203            **parser.Parser.FUNCTIONS,
    -204            "APPROX_DISTINCT": exp.ApproxDistinct.from_arg_list,
    -205            "APPROX_PERCENTILE": _approx_percentile,
    -206            "CARDINALITY": exp.ArraySize.from_arg_list,
    -207            "CONTAINS": exp.ArrayContains.from_arg_list,
    -208            "DATE_ADD": lambda args: exp.DateAdd(
    -209                this=seq_get(args, 2),
    -210                expression=seq_get(args, 1),
    -211                unit=seq_get(args, 0),
    -212            ),
    -213            "DATE_DIFF": lambda args: exp.DateDiff(
    -214                this=seq_get(args, 2),
    -215                expression=seq_get(args, 1),
    -216                unit=seq_get(args, 0),
    -217            ),
    -218            "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "presto"),
    -219            "DATE_PARSE": format_time_lambda(exp.StrToTime, "presto"),
    -220            "DATE_TRUNC": date_trunc_to_time,
    -221            "FROM_HEX": exp.Unhex.from_arg_list,
    -222            "FROM_UNIXTIME": _from_unixtime,
    -223            "FROM_UTF8": lambda args: exp.Decode(
    -224                this=seq_get(args, 0), replace=seq_get(args, 1), charset=exp.Literal.string("utf-8")
    -225            ),
    -226            "NOW": exp.CurrentTimestamp.from_arg_list,
    -227            "SEQUENCE": exp.GenerateSeries.from_arg_list,
    -228            "STRPOS": lambda args: exp.StrPosition(
    -229                this=seq_get(args, 0),
    -230                substr=seq_get(args, 1),
    -231                instance=seq_get(args, 2),
    -232            ),
    -233            "TO_UNIXTIME": exp.TimeToUnix.from_arg_list,
    -234            "TO_HEX": exp.Hex.from_arg_list,
    -235            "TO_UTF8": lambda args: exp.Encode(
    -236                this=seq_get(args, 0), charset=exp.Literal.string("utf-8")
    -237            ),
    -238        }
    -239        FUNCTION_PARSERS = parser.Parser.FUNCTION_PARSERS.copy()
    -240        FUNCTION_PARSERS.pop("TRIM")
    -241
    -242    class Generator(generator.Generator):
    -243        INTERVAL_ALLOWS_PLURAL_FORM = False
    -244        JOIN_HINTS = False
    -245        TABLE_HINTS = False
    -246        STRUCT_DELIMITER = ("(", ")")
    -247
    -248        PROPERTIES_LOCATION = {
    -249            **generator.Generator.PROPERTIES_LOCATION,
    -250            exp.LocationProperty: exp.Properties.Location.UNSUPPORTED,
    -251            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    -252        }
    -253
    -254        TYPE_MAPPING = {
    -255            **generator.Generator.TYPE_MAPPING,
    -256            exp.DataType.Type.INT: "INTEGER",
    -257            exp.DataType.Type.FLOAT: "REAL",
    -258            exp.DataType.Type.BINARY: "VARBINARY",
    -259            exp.DataType.Type.TEXT: "VARCHAR",
    -260            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
    -261            exp.DataType.Type.STRUCT: "ROW",
    -262        }
    -263
    -264        TRANSFORMS = {
    -265            **generator.Generator.TRANSFORMS,
    -266            exp.ApproxDistinct: _approx_distinct_sql,
    -267            exp.ApproxQuantile: rename_func("APPROX_PERCENTILE"),
    -268            exp.Array: lambda self, e: f"ARRAY[{self.expressions(e, flat=True)}]",
    -269            exp.ArrayConcat: rename_func("CONCAT"),
    -270            exp.ArrayContains: rename_func("CONTAINS"),
    -271            exp.ArraySize: rename_func("CARDINALITY"),
    -272            exp.BitwiseAnd: lambda self, e: f"BITWISE_AND({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    -273            exp.BitwiseLeftShift: lambda self, e: f"BITWISE_ARITHMETIC_SHIFT_LEFT({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    -274            exp.BitwiseNot: lambda self, e: f"BITWISE_NOT({self.sql(e, 'this')})",
    -275            exp.BitwiseOr: lambda self, e: f"BITWISE_OR({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    -276            exp.BitwiseRightShift: lambda self, e: f"BITWISE_ARITHMETIC_SHIFT_RIGHT({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    -277            exp.BitwiseXor: lambda self, e: f"BITWISE_XOR({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    -278            exp.Cast: transforms.preprocess([transforms.epoch_cast_to_ts]),
    -279            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
    -280            exp.DataType: _datatype_sql,
    -281            exp.DateAdd: lambda self, e: self.func(
    -282                "DATE_ADD", exp.Literal.string(e.text("unit") or "day"), e.expression, e.this
    -283            ),
    -284            exp.DateDiff: lambda self, e: self.func(
    -285                "DATE_DIFF", exp.Literal.string(e.text("unit") or "day"), e.expression, e.this
    -286            ),
    -287            exp.DateStrToDate: lambda self, e: f"CAST(DATE_PARSE({self.sql(e, 'this')}, {Presto.date_format}) AS DATE)",
    -288            exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Presto.dateint_format}) AS INT)",
    -289            exp.Decode: _decode_sql,
    -290            exp.DiToDate: lambda self, e: f"CAST(DATE_PARSE(CAST({self.sql(e, 'this')} AS VARCHAR), {Presto.dateint_format}) AS DATE)",
    -291            exp.Encode: _encode_sql,
    -292            exp.FileFormatProperty: lambda self, e: f"FORMAT='{e.name.upper()}'",
    -293            exp.Group: transforms.preprocess([transforms.unalias_group]),
    -294            exp.Hex: rename_func("TO_HEX"),
    -295            exp.If: if_sql,
    -296            exp.ILike: no_ilike_sql,
    -297            exp.Initcap: _initcap_sql,
    -298            exp.Lateral: _explode_to_unnest_sql,
    -299            exp.Left: left_to_substring_sql,
    -300            exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"),
    -301            exp.LogicalAnd: rename_func("BOOL_AND"),
    -302            exp.LogicalOr: rename_func("BOOL_OR"),
    -303            exp.Pivot: no_pivot_sql,
    -304            exp.Quantile: _quantile_sql,
    -305            exp.Right: right_to_substring_sql,
    -306            exp.SafeDivide: no_safe_divide_sql,
    -307            exp.Schema: _schema_sql,
    -308            exp.Select: transforms.preprocess(
    -309                [
    -310                    transforms.eliminate_qualify,
    -311                    transforms.eliminate_distinct_on,
    -312                    transforms.explode_to_unnest,
    -313                ]
    -314            ),
    -315            exp.SortArray: _no_sort_array,
    -316            exp.StrPosition: rename_func("STRPOS"),
    -317            exp.StrToDate: lambda self, e: f"CAST({_str_to_time_sql(self, e)} AS DATE)",
    -318            exp.StrToTime: _str_to_time_sql,
    -319            exp.StrToUnix: lambda self, e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {self.format_time(e)}))",
    -320            exp.StructExtract: struct_extract_sql,
    -321            exp.Table: transforms.preprocess([_unnest_sequence]),
    -322            exp.TimestampTrunc: timestamptrunc_sql,
    -323            exp.TimeStrToDate: timestrtotime_sql,
    -324            exp.TimeStrToTime: timestrtotime_sql,
    -325            exp.TimeStrToUnix: lambda self, e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {Presto.time_format}))",
    -326            exp.TimeToStr: lambda self, e: f"DATE_FORMAT({self.sql(e, 'this')}, {self.format_time(e)})",
    -327            exp.TimeToUnix: rename_func("TO_UNIXTIME"),
    -328            exp.TryCast: transforms.preprocess([transforms.epoch_cast_to_ts]),
    -329            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)",
    -330            exp.TsOrDsAdd: _ts_or_ds_add_sql,
    -331            exp.TsOrDsToDate: _ts_or_ds_to_date_sql,
    -332            exp.Unhex: rename_func("FROM_HEX"),
    -333            exp.UnixToStr: lambda self, e: f"DATE_FORMAT(FROM_UNIXTIME({self.sql(e, 'this')}), {self.format_time(e)})",
    -334            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
    -335            exp.UnixToTimeStr: lambda self, e: f"CAST(FROM_UNIXTIME({self.sql(e, 'this')}) AS VARCHAR)",
    -336            exp.VariancePop: rename_func("VAR_POP"),
    -337            exp.With: transforms.preprocess([transforms.add_recursive_cte_column_names]),
    -338            exp.WithinGroup: transforms.preprocess(
    -339                [transforms.remove_within_group_for_percentiles]
    -340            ),
    -341        }
    -342
    -343        def interval_sql(self, expression: exp.Interval) -> str:
    -344            unit = self.sql(expression, "unit")
    -345            if expression.this and unit.lower().startswith("week"):
    -346                return f"({expression.this.name} * INTERVAL '7' day)"
    -347            return super().interval_sql(expression)
    -348
    -349        def transaction_sql(self, expression: exp.Transaction) -> str:
    -350            modes = expression.args.get("modes")
    -351            modes = f" {', '.join(modes)}" if modes else ""
    -352            return f"START TRANSACTION{modes}"
    -353
    -354        def generateseries_sql(self, expression: exp.GenerateSeries) -> str:
    -355            start = expression.args["start"]
    -356            end = expression.args["end"]
    -357            step = expression.args.get("step")
    -358
    -359            if isinstance(start, exp.Cast):
    -360                target_type = start.to
    -361            elif isinstance(end, exp.Cast):
    -362                target_type = end.to
    -363            else:
    -364                target_type = None
    -365
    -366            if target_type and target_type.is_type("timestamp"):
    -367                to = target_type.copy()
    -368
    -369                if target_type is start.to:
    -370                    end = exp.Cast(this=end, to=to)
    -371                else:
    -372                    start = exp.Cast(this=start, to=to)
    -373
    -374            return self.func("SEQUENCE", start, end, step)
    +193    class Parser(parser.Parser):
    +194        FUNCTIONS = {
    +195            **parser.Parser.FUNCTIONS,
    +196            "APPROX_DISTINCT": exp.ApproxDistinct.from_arg_list,
    +197            "APPROX_PERCENTILE": _approx_percentile,
    +198            "CARDINALITY": exp.ArraySize.from_arg_list,
    +199            "CONTAINS": exp.ArrayContains.from_arg_list,
    +200            "DATE_ADD": lambda args: exp.DateAdd(
    +201                this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0)
    +202            ),
    +203            "DATE_DIFF": lambda args: exp.DateDiff(
    +204                this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0)
    +205            ),
    +206            "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "presto"),
    +207            "DATE_PARSE": format_time_lambda(exp.StrToTime, "presto"),
    +208            "DATE_TRUNC": date_trunc_to_time,
    +209            "FROM_HEX": exp.Unhex.from_arg_list,
    +210            "FROM_UNIXTIME": _from_unixtime,
    +211            "FROM_UTF8": lambda args: exp.Decode(
    +212                this=seq_get(args, 0), replace=seq_get(args, 1), charset=exp.Literal.string("utf-8")
    +213            ),
    +214            "NOW": exp.CurrentTimestamp.from_arg_list,
    +215            "SEQUENCE": exp.GenerateSeries.from_arg_list,
    +216            "STRPOS": lambda args: exp.StrPosition(
    +217                this=seq_get(args, 0), substr=seq_get(args, 1), instance=seq_get(args, 2)
    +218            ),
    +219            "TO_UNIXTIME": exp.TimeToUnix.from_arg_list,
    +220            "TO_HEX": exp.Hex.from_arg_list,
    +221            "TO_UTF8": lambda args: exp.Encode(
    +222                this=seq_get(args, 0), charset=exp.Literal.string("utf-8")
    +223            ),
    +224        }
    +225        FUNCTION_PARSERS = parser.Parser.FUNCTION_PARSERS.copy()
    +226        FUNCTION_PARSERS.pop("TRIM")
    +227
    +228    class Generator(generator.Generator):
    +229        INTERVAL_ALLOWS_PLURAL_FORM = False
    +230        JOIN_HINTS = False
    +231        TABLE_HINTS = False
    +232        IS_BOOL_ALLOWED = False
    +233        STRUCT_DELIMITER = ("(", ")")
    +234
    +235        PROPERTIES_LOCATION = {
    +236            **generator.Generator.PROPERTIES_LOCATION,
    +237            exp.LocationProperty: exp.Properties.Location.UNSUPPORTED,
    +238            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +239        }
    +240
    +241        TYPE_MAPPING = {
    +242            **generator.Generator.TYPE_MAPPING,
    +243            exp.DataType.Type.INT: "INTEGER",
    +244            exp.DataType.Type.FLOAT: "REAL",
    +245            exp.DataType.Type.BINARY: "VARBINARY",
    +246            exp.DataType.Type.TEXT: "VARCHAR",
    +247            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
    +248            exp.DataType.Type.STRUCT: "ROW",
    +249        }
    +250
    +251        TRANSFORMS = {
    +252            **generator.Generator.TRANSFORMS,
    +253            exp.ApproxDistinct: _approx_distinct_sql,
    +254            exp.ApproxQuantile: rename_func("APPROX_PERCENTILE"),
    +255            exp.Array: lambda self, e: f"ARRAY[{self.expressions(e, flat=True)}]",
    +256            exp.ArrayConcat: rename_func("CONCAT"),
    +257            exp.ArrayContains: rename_func("CONTAINS"),
    +258            exp.ArraySize: rename_func("CARDINALITY"),
    +259            exp.BitwiseAnd: lambda self, e: f"BITWISE_AND({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    +260            exp.BitwiseLeftShift: lambda self, e: f"BITWISE_ARITHMETIC_SHIFT_LEFT({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    +261            exp.BitwiseNot: lambda self, e: f"BITWISE_NOT({self.sql(e, 'this')})",
    +262            exp.BitwiseOr: lambda self, e: f"BITWISE_OR({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    +263            exp.BitwiseRightShift: lambda self, e: f"BITWISE_ARITHMETIC_SHIFT_RIGHT({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    +264            exp.BitwiseXor: lambda self, e: f"BITWISE_XOR({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    +265            exp.Cast: transforms.preprocess([transforms.epoch_cast_to_ts]),
    +266            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
    +267            exp.DataType: _datatype_sql,
    +268            exp.DateAdd: lambda self, e: self.func(
    +269                "DATE_ADD", exp.Literal.string(e.text("unit") or "day"), e.expression, e.this
    +270            ),
    +271            exp.DateDiff: lambda self, e: self.func(
    +272                "DATE_DIFF", exp.Literal.string(e.text("unit") or "day"), e.expression, e.this
    +273            ),
    +274            exp.DateStrToDate: lambda self, e: f"CAST(DATE_PARSE({self.sql(e, 'this')}, {Presto.DATE_FORMAT}) AS DATE)",
    +275            exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Presto.DATEINT_FORMAT}) AS INT)",
    +276            exp.Decode: _decode_sql,
    +277            exp.DiToDate: lambda self, e: f"CAST(DATE_PARSE(CAST({self.sql(e, 'this')} AS VARCHAR), {Presto.DATEINT_FORMAT}) AS DATE)",
    +278            exp.Encode: _encode_sql,
    +279            exp.FileFormatProperty: lambda self, e: f"FORMAT='{e.name.upper()}'",
    +280            exp.Group: transforms.preprocess([transforms.unalias_group]),
    +281            exp.Hex: rename_func("TO_HEX"),
    +282            exp.If: if_sql,
    +283            exp.ILike: no_ilike_sql,
    +284            exp.Initcap: _initcap_sql,
    +285            exp.Lateral: _explode_to_unnest_sql,
    +286            exp.Left: left_to_substring_sql,
    +287            exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"),
    +288            exp.LogicalAnd: rename_func("BOOL_AND"),
    +289            exp.LogicalOr: rename_func("BOOL_OR"),
    +290            exp.Pivot: no_pivot_sql,
    +291            exp.Quantile: _quantile_sql,
    +292            exp.Right: right_to_substring_sql,
    +293            exp.SafeDivide: no_safe_divide_sql,
    +294            exp.Schema: _schema_sql,
    +295            exp.Select: transforms.preprocess(
    +296                [
    +297                    transforms.eliminate_qualify,
    +298                    transforms.eliminate_distinct_on,
    +299                    transforms.explode_to_unnest,
    +300                ]
    +301            ),
    +302            exp.SortArray: _no_sort_array,
    +303            exp.StrPosition: rename_func("STRPOS"),
    +304            exp.StrToDate: lambda self, e: f"CAST({_str_to_time_sql(self, e)} AS DATE)",
    +305            exp.StrToTime: _str_to_time_sql,
    +306            exp.StrToUnix: lambda self, e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {self.format_time(e)}))",
    +307            exp.StructExtract: struct_extract_sql,
    +308            exp.Table: transforms.preprocess([_unnest_sequence]),
    +309            exp.TimestampTrunc: timestamptrunc_sql,
    +310            exp.TimeStrToDate: timestrtotime_sql,
    +311            exp.TimeStrToTime: timestrtotime_sql,
    +312            exp.TimeStrToUnix: lambda self, e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {Presto.TIME_FORMAT}))",
    +313            exp.TimeToStr: lambda self, e: f"DATE_FORMAT({self.sql(e, 'this')}, {self.format_time(e)})",
    +314            exp.TimeToUnix: rename_func("TO_UNIXTIME"),
    +315            exp.TryCast: transforms.preprocess([transforms.epoch_cast_to_ts]),
    +316            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)",
    +317            exp.TsOrDsAdd: _ts_or_ds_add_sql,
    +318            exp.TsOrDsToDate: _ts_or_ds_to_date_sql,
    +319            exp.Unhex: rename_func("FROM_HEX"),
    +320            exp.UnixToStr: lambda self, e: f"DATE_FORMAT(FROM_UNIXTIME({self.sql(e, 'this')}), {self.format_time(e)})",
    +321            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
    +322            exp.UnixToTimeStr: lambda self, e: f"CAST(FROM_UNIXTIME({self.sql(e, 'this')}) AS VARCHAR)",
    +323            exp.VariancePop: rename_func("VAR_POP"),
    +324            exp.With: transforms.preprocess([transforms.add_recursive_cte_column_names]),
    +325            exp.WithinGroup: transforms.preprocess(
    +326                [transforms.remove_within_group_for_percentiles]
    +327            ),
    +328        }
    +329
    +330        def interval_sql(self, expression: exp.Interval) -> str:
    +331            unit = self.sql(expression, "unit")
    +332            if expression.this and unit.lower().startswith("week"):
    +333                return f"({expression.this.name} * INTERVAL '7' day)"
    +334            return super().interval_sql(expression)
    +335
    +336        def transaction_sql(self, expression: exp.Transaction) -> str:
    +337            modes = expression.args.get("modes")
    +338            modes = f" {', '.join(modes)}" if modes else ""
    +339            return f"START TRANSACTION{modes}"
    +340
    +341        def generateseries_sql(self, expression: exp.GenerateSeries) -> str:
    +342            start = expression.args["start"]
    +343            end = expression.args["end"]
    +344            step = expression.args.get("step")
    +345
    +346            if isinstance(start, exp.Cast):
    +347                target_type = start.to
    +348            elif isinstance(end, exp.Cast):
    +349                target_type = end.to
    +350            else:
    +351                target_type = None
    +352
    +353            if target_type and target_type.is_type("timestamp"):
    +354                to = target_type.copy()
    +355
    +356                if target_type is start.to:
    +357                    end = exp.cast(end, to)
    +358                else:
    +359                    start = exp.cast(start, to)
    +360
    +361            return self.func("SEQUENCE", start, end, step)
    +362
    +363        def offset_limit_modifiers(
    +364            self, expression: exp.Expression, fetch: bool, limit: t.Optional[exp.Fetch | exp.Limit]
    +365        ) -> t.List[str]:
    +366            return [
    +367                self.sql(expression, "offset"),
    +368                self.sql(limit),
    +369            ]
     
    @@ -696,13 +698,13 @@
    -
    193    class Tokenizer(tokens.Tokenizer):
    -194        KEYWORDS = {
    -195            **tokens.Tokenizer.KEYWORDS,
    -196            "START": TokenType.BEGIN,
    -197            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
    -198            "ROW": TokenType.STRUCT,
    -199        }
    +            
    185    class Tokenizer(tokens.Tokenizer):
    +186        KEYWORDS = {
    +187            **tokens.Tokenizer.KEYWORDS,
    +188            "START": TokenType.BEGIN,
    +189            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
    +190            "ROW": TokenType.STRUCT,
    +191        }
     
    @@ -714,6 +716,7 @@ @@ -730,70 +733,56 @@
    -
    201    class Parser(parser.Parser):
    -202        FUNCTIONS = {
    -203            **parser.Parser.FUNCTIONS,
    -204            "APPROX_DISTINCT": exp.ApproxDistinct.from_arg_list,
    -205            "APPROX_PERCENTILE": _approx_percentile,
    -206            "CARDINALITY": exp.ArraySize.from_arg_list,
    -207            "CONTAINS": exp.ArrayContains.from_arg_list,
    -208            "DATE_ADD": lambda args: exp.DateAdd(
    -209                this=seq_get(args, 2),
    -210                expression=seq_get(args, 1),
    -211                unit=seq_get(args, 0),
    -212            ),
    -213            "DATE_DIFF": lambda args: exp.DateDiff(
    -214                this=seq_get(args, 2),
    -215                expression=seq_get(args, 1),
    -216                unit=seq_get(args, 0),
    -217            ),
    -218            "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "presto"),
    -219            "DATE_PARSE": format_time_lambda(exp.StrToTime, "presto"),
    -220            "DATE_TRUNC": date_trunc_to_time,
    -221            "FROM_HEX": exp.Unhex.from_arg_list,
    -222            "FROM_UNIXTIME": _from_unixtime,
    -223            "FROM_UTF8": lambda args: exp.Decode(
    -224                this=seq_get(args, 0), replace=seq_get(args, 1), charset=exp.Literal.string("utf-8")
    -225            ),
    -226            "NOW": exp.CurrentTimestamp.from_arg_list,
    -227            "SEQUENCE": exp.GenerateSeries.from_arg_list,
    -228            "STRPOS": lambda args: exp.StrPosition(
    -229                this=seq_get(args, 0),
    -230                substr=seq_get(args, 1),
    -231                instance=seq_get(args, 2),
    -232            ),
    -233            "TO_UNIXTIME": exp.TimeToUnix.from_arg_list,
    -234            "TO_HEX": exp.Hex.from_arg_list,
    -235            "TO_UTF8": lambda args: exp.Encode(
    -236                this=seq_get(args, 0), charset=exp.Literal.string("utf-8")
    -237            ),
    -238        }
    -239        FUNCTION_PARSERS = parser.Parser.FUNCTION_PARSERS.copy()
    -240        FUNCTION_PARSERS.pop("TRIM")
    +            
    193    class Parser(parser.Parser):
    +194        FUNCTIONS = {
    +195            **parser.Parser.FUNCTIONS,
    +196            "APPROX_DISTINCT": exp.ApproxDistinct.from_arg_list,
    +197            "APPROX_PERCENTILE": _approx_percentile,
    +198            "CARDINALITY": exp.ArraySize.from_arg_list,
    +199            "CONTAINS": exp.ArrayContains.from_arg_list,
    +200            "DATE_ADD": lambda args: exp.DateAdd(
    +201                this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0)
    +202            ),
    +203            "DATE_DIFF": lambda args: exp.DateDiff(
    +204                this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0)
    +205            ),
    +206            "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "presto"),
    +207            "DATE_PARSE": format_time_lambda(exp.StrToTime, "presto"),
    +208            "DATE_TRUNC": date_trunc_to_time,
    +209            "FROM_HEX": exp.Unhex.from_arg_list,
    +210            "FROM_UNIXTIME": _from_unixtime,
    +211            "FROM_UTF8": lambda args: exp.Decode(
    +212                this=seq_get(args, 0), replace=seq_get(args, 1), charset=exp.Literal.string("utf-8")
    +213            ),
    +214            "NOW": exp.CurrentTimestamp.from_arg_list,
    +215            "SEQUENCE": exp.GenerateSeries.from_arg_list,
    +216            "STRPOS": lambda args: exp.StrPosition(
    +217                this=seq_get(args, 0), substr=seq_get(args, 1), instance=seq_get(args, 2)
    +218            ),
    +219            "TO_UNIXTIME": exp.TimeToUnix.from_arg_list,
    +220            "TO_HEX": exp.Hex.from_arg_list,
    +221            "TO_UTF8": lambda args: exp.Encode(
    +222                this=seq_get(args, 0), charset=exp.Literal.string("utf-8")
    +223            ),
    +224        }
    +225        FUNCTION_PARSERS = parser.Parser.FUNCTION_PARSERS.copy()
    +226        FUNCTION_PARSERS.pop("TRIM")
     
    -

    Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces -a parsed syntax tree.

    +

    Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

    Arguments:
      -
    • error_level: the desired error level. +
    • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
    • -
    • error_message_context: determines the amount of context to capture from a +
    • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). -Default: 50.
    • -
    • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. -Default: 0
    • -
    • alias_post_tablesample: If the table alias comes after tablesample. -Default: False
    • +Default: 100
    • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
    • -
    • null_ordering: Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    @@ -826,185 +815,179 @@ Default: "nulls_are_small"
    -
    242    class Generator(generator.Generator):
    -243        INTERVAL_ALLOWS_PLURAL_FORM = False
    -244        JOIN_HINTS = False
    -245        TABLE_HINTS = False
    -246        STRUCT_DELIMITER = ("(", ")")
    -247
    -248        PROPERTIES_LOCATION = {
    -249            **generator.Generator.PROPERTIES_LOCATION,
    -250            exp.LocationProperty: exp.Properties.Location.UNSUPPORTED,
    -251            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    -252        }
    -253
    -254        TYPE_MAPPING = {
    -255            **generator.Generator.TYPE_MAPPING,
    -256            exp.DataType.Type.INT: "INTEGER",
    -257            exp.DataType.Type.FLOAT: "REAL",
    -258            exp.DataType.Type.BINARY: "VARBINARY",
    -259            exp.DataType.Type.TEXT: "VARCHAR",
    -260            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
    -261            exp.DataType.Type.STRUCT: "ROW",
    -262        }
    -263
    -264        TRANSFORMS = {
    -265            **generator.Generator.TRANSFORMS,
    -266            exp.ApproxDistinct: _approx_distinct_sql,
    -267            exp.ApproxQuantile: rename_func("APPROX_PERCENTILE"),
    -268            exp.Array: lambda self, e: f"ARRAY[{self.expressions(e, flat=True)}]",
    -269            exp.ArrayConcat: rename_func("CONCAT"),
    -270            exp.ArrayContains: rename_func("CONTAINS"),
    -271            exp.ArraySize: rename_func("CARDINALITY"),
    -272            exp.BitwiseAnd: lambda self, e: f"BITWISE_AND({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    -273            exp.BitwiseLeftShift: lambda self, e: f"BITWISE_ARITHMETIC_SHIFT_LEFT({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    -274            exp.BitwiseNot: lambda self, e: f"BITWISE_NOT({self.sql(e, 'this')})",
    -275            exp.BitwiseOr: lambda self, e: f"BITWISE_OR({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    -276            exp.BitwiseRightShift: lambda self, e: f"BITWISE_ARITHMETIC_SHIFT_RIGHT({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    -277            exp.BitwiseXor: lambda self, e: f"BITWISE_XOR({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    -278            exp.Cast: transforms.preprocess([transforms.epoch_cast_to_ts]),
    -279            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
    -280            exp.DataType: _datatype_sql,
    -281            exp.DateAdd: lambda self, e: self.func(
    -282                "DATE_ADD", exp.Literal.string(e.text("unit") or "day"), e.expression, e.this
    -283            ),
    -284            exp.DateDiff: lambda self, e: self.func(
    -285                "DATE_DIFF", exp.Literal.string(e.text("unit") or "day"), e.expression, e.this
    -286            ),
    -287            exp.DateStrToDate: lambda self, e: f"CAST(DATE_PARSE({self.sql(e, 'this')}, {Presto.date_format}) AS DATE)",
    -288            exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Presto.dateint_format}) AS INT)",
    -289            exp.Decode: _decode_sql,
    -290            exp.DiToDate: lambda self, e: f"CAST(DATE_PARSE(CAST({self.sql(e, 'this')} AS VARCHAR), {Presto.dateint_format}) AS DATE)",
    -291            exp.Encode: _encode_sql,
    -292            exp.FileFormatProperty: lambda self, e: f"FORMAT='{e.name.upper()}'",
    -293            exp.Group: transforms.preprocess([transforms.unalias_group]),
    -294            exp.Hex: rename_func("TO_HEX"),
    -295            exp.If: if_sql,
    -296            exp.ILike: no_ilike_sql,
    -297            exp.Initcap: _initcap_sql,
    -298            exp.Lateral: _explode_to_unnest_sql,
    -299            exp.Left: left_to_substring_sql,
    -300            exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"),
    -301            exp.LogicalAnd: rename_func("BOOL_AND"),
    -302            exp.LogicalOr: rename_func("BOOL_OR"),
    -303            exp.Pivot: no_pivot_sql,
    -304            exp.Quantile: _quantile_sql,
    -305            exp.Right: right_to_substring_sql,
    -306            exp.SafeDivide: no_safe_divide_sql,
    -307            exp.Schema: _schema_sql,
    -308            exp.Select: transforms.preprocess(
    -309                [
    -310                    transforms.eliminate_qualify,
    -311                    transforms.eliminate_distinct_on,
    -312                    transforms.explode_to_unnest,
    -313                ]
    -314            ),
    -315            exp.SortArray: _no_sort_array,
    -316            exp.StrPosition: rename_func("STRPOS"),
    -317            exp.StrToDate: lambda self, e: f"CAST({_str_to_time_sql(self, e)} AS DATE)",
    -318            exp.StrToTime: _str_to_time_sql,
    -319            exp.StrToUnix: lambda self, e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {self.format_time(e)}))",
    -320            exp.StructExtract: struct_extract_sql,
    -321            exp.Table: transforms.preprocess([_unnest_sequence]),
    -322            exp.TimestampTrunc: timestamptrunc_sql,
    -323            exp.TimeStrToDate: timestrtotime_sql,
    -324            exp.TimeStrToTime: timestrtotime_sql,
    -325            exp.TimeStrToUnix: lambda self, e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {Presto.time_format}))",
    -326            exp.TimeToStr: lambda self, e: f"DATE_FORMAT({self.sql(e, 'this')}, {self.format_time(e)})",
    -327            exp.TimeToUnix: rename_func("TO_UNIXTIME"),
    -328            exp.TryCast: transforms.preprocess([transforms.epoch_cast_to_ts]),
    -329            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)",
    -330            exp.TsOrDsAdd: _ts_or_ds_add_sql,
    -331            exp.TsOrDsToDate: _ts_or_ds_to_date_sql,
    -332            exp.Unhex: rename_func("FROM_HEX"),
    -333            exp.UnixToStr: lambda self, e: f"DATE_FORMAT(FROM_UNIXTIME({self.sql(e, 'this')}), {self.format_time(e)})",
    -334            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
    -335            exp.UnixToTimeStr: lambda self, e: f"CAST(FROM_UNIXTIME({self.sql(e, 'this')}) AS VARCHAR)",
    -336            exp.VariancePop: rename_func("VAR_POP"),
    -337            exp.With: transforms.preprocess([transforms.add_recursive_cte_column_names]),
    -338            exp.WithinGroup: transforms.preprocess(
    -339                [transforms.remove_within_group_for_percentiles]
    -340            ),
    -341        }
    -342
    -343        def interval_sql(self, expression: exp.Interval) -> str:
    -344            unit = self.sql(expression, "unit")
    -345            if expression.this and unit.lower().startswith("week"):
    -346                return f"({expression.this.name} * INTERVAL '7' day)"
    -347            return super().interval_sql(expression)
    -348
    -349        def transaction_sql(self, expression: exp.Transaction) -> str:
    -350            modes = expression.args.get("modes")
    -351            modes = f" {', '.join(modes)}" if modes else ""
    -352            return f"START TRANSACTION{modes}"
    -353
    -354        def generateseries_sql(self, expression: exp.GenerateSeries) -> str:
    -355            start = expression.args["start"]
    -356            end = expression.args["end"]
    -357            step = expression.args.get("step")
    -358
    -359            if isinstance(start, exp.Cast):
    -360                target_type = start.to
    -361            elif isinstance(end, exp.Cast):
    -362                target_type = end.to
    -363            else:
    -364                target_type = None
    -365
    -366            if target_type and target_type.is_type("timestamp"):
    -367                to = target_type.copy()
    -368
    -369                if target_type is start.to:
    -370                    end = exp.Cast(this=end, to=to)
    -371                else:
    -372                    start = exp.Cast(this=start, to=to)
    -373
    -374            return self.func("SEQUENCE", start, end, step)
    +            
    228    class Generator(generator.Generator):
    +229        INTERVAL_ALLOWS_PLURAL_FORM = False
    +230        JOIN_HINTS = False
    +231        TABLE_HINTS = False
    +232        IS_BOOL_ALLOWED = False
    +233        STRUCT_DELIMITER = ("(", ")")
    +234
    +235        PROPERTIES_LOCATION = {
    +236            **generator.Generator.PROPERTIES_LOCATION,
    +237            exp.LocationProperty: exp.Properties.Location.UNSUPPORTED,
    +238            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +239        }
    +240
    +241        TYPE_MAPPING = {
    +242            **generator.Generator.TYPE_MAPPING,
    +243            exp.DataType.Type.INT: "INTEGER",
    +244            exp.DataType.Type.FLOAT: "REAL",
    +245            exp.DataType.Type.BINARY: "VARBINARY",
    +246            exp.DataType.Type.TEXT: "VARCHAR",
    +247            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
    +248            exp.DataType.Type.STRUCT: "ROW",
    +249        }
    +250
    +251        TRANSFORMS = {
    +252            **generator.Generator.TRANSFORMS,
    +253            exp.ApproxDistinct: _approx_distinct_sql,
    +254            exp.ApproxQuantile: rename_func("APPROX_PERCENTILE"),
    +255            exp.Array: lambda self, e: f"ARRAY[{self.expressions(e, flat=True)}]",
    +256            exp.ArrayConcat: rename_func("CONCAT"),
    +257            exp.ArrayContains: rename_func("CONTAINS"),
    +258            exp.ArraySize: rename_func("CARDINALITY"),
    +259            exp.BitwiseAnd: lambda self, e: f"BITWISE_AND({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    +260            exp.BitwiseLeftShift: lambda self, e: f"BITWISE_ARITHMETIC_SHIFT_LEFT({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    +261            exp.BitwiseNot: lambda self, e: f"BITWISE_NOT({self.sql(e, 'this')})",
    +262            exp.BitwiseOr: lambda self, e: f"BITWISE_OR({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    +263            exp.BitwiseRightShift: lambda self, e: f"BITWISE_ARITHMETIC_SHIFT_RIGHT({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    +264            exp.BitwiseXor: lambda self, e: f"BITWISE_XOR({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    +265            exp.Cast: transforms.preprocess([transforms.epoch_cast_to_ts]),
    +266            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
    +267            exp.DataType: _datatype_sql,
    +268            exp.DateAdd: lambda self, e: self.func(
    +269                "DATE_ADD", exp.Literal.string(e.text("unit") or "day"), e.expression, e.this
    +270            ),
    +271            exp.DateDiff: lambda self, e: self.func(
    +272                "DATE_DIFF", exp.Literal.string(e.text("unit") or "day"), e.expression, e.this
    +273            ),
    +274            exp.DateStrToDate: lambda self, e: f"CAST(DATE_PARSE({self.sql(e, 'this')}, {Presto.DATE_FORMAT}) AS DATE)",
    +275            exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Presto.DATEINT_FORMAT}) AS INT)",
    +276            exp.Decode: _decode_sql,
    +277            exp.DiToDate: lambda self, e: f"CAST(DATE_PARSE(CAST({self.sql(e, 'this')} AS VARCHAR), {Presto.DATEINT_FORMAT}) AS DATE)",
    +278            exp.Encode: _encode_sql,
    +279            exp.FileFormatProperty: lambda self, e: f"FORMAT='{e.name.upper()}'",
    +280            exp.Group: transforms.preprocess([transforms.unalias_group]),
    +281            exp.Hex: rename_func("TO_HEX"),
    +282            exp.If: if_sql,
    +283            exp.ILike: no_ilike_sql,
    +284            exp.Initcap: _initcap_sql,
    +285            exp.Lateral: _explode_to_unnest_sql,
    +286            exp.Left: left_to_substring_sql,
    +287            exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"),
    +288            exp.LogicalAnd: rename_func("BOOL_AND"),
    +289            exp.LogicalOr: rename_func("BOOL_OR"),
    +290            exp.Pivot: no_pivot_sql,
    +291            exp.Quantile: _quantile_sql,
    +292            exp.Right: right_to_substring_sql,
    +293            exp.SafeDivide: no_safe_divide_sql,
    +294            exp.Schema: _schema_sql,
    +295            exp.Select: transforms.preprocess(
    +296                [
    +297                    transforms.eliminate_qualify,
    +298                    transforms.eliminate_distinct_on,
    +299                    transforms.explode_to_unnest,
    +300                ]
    +301            ),
    +302            exp.SortArray: _no_sort_array,
    +303            exp.StrPosition: rename_func("STRPOS"),
    +304            exp.StrToDate: lambda self, e: f"CAST({_str_to_time_sql(self, e)} AS DATE)",
    +305            exp.StrToTime: _str_to_time_sql,
    +306            exp.StrToUnix: lambda self, e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {self.format_time(e)}))",
    +307            exp.StructExtract: struct_extract_sql,
    +308            exp.Table: transforms.preprocess([_unnest_sequence]),
    +309            exp.TimestampTrunc: timestamptrunc_sql,
    +310            exp.TimeStrToDate: timestrtotime_sql,
    +311            exp.TimeStrToTime: timestrtotime_sql,
    +312            exp.TimeStrToUnix: lambda self, e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {Presto.TIME_FORMAT}))",
    +313            exp.TimeToStr: lambda self, e: f"DATE_FORMAT({self.sql(e, 'this')}, {self.format_time(e)})",
    +314            exp.TimeToUnix: rename_func("TO_UNIXTIME"),
    +315            exp.TryCast: transforms.preprocess([transforms.epoch_cast_to_ts]),
    +316            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)",
    +317            exp.TsOrDsAdd: _ts_or_ds_add_sql,
    +318            exp.TsOrDsToDate: _ts_or_ds_to_date_sql,
    +319            exp.Unhex: rename_func("FROM_HEX"),
    +320            exp.UnixToStr: lambda self, e: f"DATE_FORMAT(FROM_UNIXTIME({self.sql(e, 'this')}), {self.format_time(e)})",
    +321            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
    +322            exp.UnixToTimeStr: lambda self, e: f"CAST(FROM_UNIXTIME({self.sql(e, 'this')}) AS VARCHAR)",
    +323            exp.VariancePop: rename_func("VAR_POP"),
    +324            exp.With: transforms.preprocess([transforms.add_recursive_cte_column_names]),
    +325            exp.WithinGroup: transforms.preprocess(
    +326                [transforms.remove_within_group_for_percentiles]
    +327            ),
    +328        }
    +329
    +330        def interval_sql(self, expression: exp.Interval) -> str:
    +331            unit = self.sql(expression, "unit")
    +332            if expression.this and unit.lower().startswith("week"):
    +333                return f"({expression.this.name} * INTERVAL '7' day)"
    +334            return super().interval_sql(expression)
    +335
    +336        def transaction_sql(self, expression: exp.Transaction) -> str:
    +337            modes = expression.args.get("modes")
    +338            modes = f" {', '.join(modes)}" if modes else ""
    +339            return f"START TRANSACTION{modes}"
    +340
    +341        def generateseries_sql(self, expression: exp.GenerateSeries) -> str:
    +342            start = expression.args["start"]
    +343            end = expression.args["end"]
    +344            step = expression.args.get("step")
    +345
    +346            if isinstance(start, exp.Cast):
    +347                target_type = start.to
    +348            elif isinstance(end, exp.Cast):
    +349                target_type = end.to
    +350            else:
    +351                target_type = None
    +352
    +353            if target_type and target_type.is_type("timestamp"):
    +354                to = target_type.copy()
    +355
    +356                if target_type is start.to:
    +357                    end = exp.cast(end, to)
    +358                else:
    +359                    start = exp.cast(start, to)
    +360
    +361            return self.func("SEQUENCE", start, end, step)
    +362
    +363        def offset_limit_modifiers(
    +364            self, expression: exp.Expression, fetch: bool, limit: t.Optional[exp.Fetch | exp.Limit]
    +365        ) -> t.List[str]:
    +366            return [
    +367                self.sql(expression, "offset"),
    +368                self.sql(limit),
    +369            ]
     
    -

    Generator interprets the given syntax tree and produces a SQL string as an output.

    +

    Generator converts a given syntax tree to the corresponding SQL string.

    Arguments:
      -
    • time_mapping (dict): the dictionary of custom time mappings in which the key -represents a python time format and the output the target time format
    • -
    • time_trie (trie): a trie of the time_mapping keys
    • -
    • pretty (bool): if set to True the returned string will be formatted. Default: False.
    • -
    • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
    • -
    • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
    • -
    • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
    • -
    • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
    • -
    • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
    • -
    • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
    • -
    • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
    • -
    • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
    • -
    • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
    • -
    • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
    • -
    • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
    • -
    • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
    • -
    • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
    • -
    • normalize (bool): if set to True all identifiers will lower cased
    • -
    • string_escape (str): specifies a string escape character. Default: '.
    • -
    • identifier_escape (str): specifies an identifier escape character. Default: ".
    • -
    • pad (int): determines padding in a formatted string. Default: 2.
    • -
    • indent (int): determines the size of indentation in a formatted string. Default: 4.
    • -
    • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
    • -
    • normalize_functions (str): normalize function names, "upper", "lower", or None -Default: "upper"
    • -
    • alias_post_tablesample (bool): if the table alias comes after tablesample -Default: False
    • -
    • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit -Default: False
    • -
    • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters -unsupported expressions. Default ErrorLevel.WARN.
    • -
    • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    • -
    • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +
    • pretty: Whether or not to format the produced SQL string. +Default: False.
    • +
    • identify: Determines when an identifier should be quoted. Possible values are: +False (default): Never quote, except in cases where it's mandatory by the dialect. +True or 'always': Always quote. +'safe': Only quote identifiers that are case insensitive.
    • +
    • normalize: Whether or not to normalize identifiers to lowercase. +Default: False.
    • +
    • pad: Determines the pad size in a formatted string. +Default: 2.
    • +
    • indent: Determines the indentation size in a formatted string. +Default: 2.
    • +
    • normalize_functions: Whether or not to normalize all function names. Possible values are: +"upper" or True (default): Convert names to uppercase. +"lower": Convert names to lowercase. +False: Disables function name normalization.
    • +
    • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. +Default ErrorLevel.WARN.
    • +
    • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
    • -
    • leading_comma (bool): if the the comma is leading or trailing in select statements +
    • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. +This is only relevant when generating in pretty mode. Default: False
    • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true @@ -1027,11 +1010,11 @@ Default: True
    -
    343        def interval_sql(self, expression: exp.Interval) -> str:
    -344            unit = self.sql(expression, "unit")
    -345            if expression.this and unit.lower().startswith("week"):
    -346                return f"({expression.this.name} * INTERVAL '7' day)"
    -347            return super().interval_sql(expression)
    +            
    330        def interval_sql(self, expression: exp.Interval) -> str:
    +331            unit = self.sql(expression, "unit")
    +332            if expression.this and unit.lower().startswith("week"):
    +333                return f"({expression.this.name} * INTERVAL '7' day)"
    +334            return super().interval_sql(expression)
     
    @@ -1049,10 +1032,10 @@ Default: True
    -
    349        def transaction_sql(self, expression: exp.Transaction) -> str:
    -350            modes = expression.args.get("modes")
    -351            modes = f" {', '.join(modes)}" if modes else ""
    -352            return f"START TRANSACTION{modes}"
    +            
    336        def transaction_sql(self, expression: exp.Transaction) -> str:
    +337            modes = expression.args.get("modes")
    +338            modes = f" {', '.join(modes)}" if modes else ""
    +339            return f"START TRANSACTION{modes}"
     
    @@ -1070,27 +1053,51 @@ Default: True
    -
    354        def generateseries_sql(self, expression: exp.GenerateSeries) -> str:
    -355            start = expression.args["start"]
    -356            end = expression.args["end"]
    -357            step = expression.args.get("step")
    -358
    -359            if isinstance(start, exp.Cast):
    -360                target_type = start.to
    -361            elif isinstance(end, exp.Cast):
    -362                target_type = end.to
    -363            else:
    -364                target_type = None
    -365
    -366            if target_type and target_type.is_type("timestamp"):
    -367                to = target_type.copy()
    -368
    -369                if target_type is start.to:
    -370                    end = exp.Cast(this=end, to=to)
    -371                else:
    -372                    start = exp.Cast(this=start, to=to)
    -373
    -374            return self.func("SEQUENCE", start, end, step)
    +            
    341        def generateseries_sql(self, expression: exp.GenerateSeries) -> str:
    +342            start = expression.args["start"]
    +343            end = expression.args["end"]
    +344            step = expression.args.get("step")
    +345
    +346            if isinstance(start, exp.Cast):
    +347                target_type = start.to
    +348            elif isinstance(end, exp.Cast):
    +349                target_type = end.to
    +350            else:
    +351                target_type = None
    +352
    +353            if target_type and target_type.is_type("timestamp"):
    +354                to = target_type.copy()
    +355
    +356                if target_type is start.to:
    +357                    end = exp.cast(end, to)
    +358                else:
    +359                    start = exp.cast(start, to)
    +360
    +361            return self.func("SEQUENCE", start, end, step)
    +
    + + + + +
    +
    + +
    + + def + offset_limit_modifiers( self, expression: sqlglot.expressions.Expression, fetch: bool, limit: Union[sqlglot.expressions.Fetch, sqlglot.expressions.Limit, NoneType]) -> List[str]: + + + +
    + +
    363        def offset_limit_modifiers(
    +364            self, expression: exp.Expression, fetch: bool, limit: t.Optional[exp.Fetch | exp.Limit]
    +365        ) -> t.List[str]:
    +366            return [
    +367                self.sql(expression, "offset"),
    +368                self.sql(limit),
    +369            ]
     
    @@ -1126,6 +1133,7 @@ Default: True
    notnullcolumnconstraint_sql
    primarykeycolumnconstraint_sql
    uniquecolumnconstraint_sql
    +
    createable_sql
    create_sql
    clone_sql
    describe_sql
    @@ -1212,6 +1220,7 @@ Default: True
    after_limit_modifiers
    select_sql
    schema_sql
    +
    schema_columns_sql
    star_sql
    parameter_sql
    sessionparameter_sql
    @@ -1236,7 +1245,7 @@ Default: True
    nextvaluefor_sql
    extract_sql
    trim_sql
    -
    concat_sql
    +
    safeconcat_sql
    check_sql
    foreignkey_sql
    primarykey_sql
    @@ -1285,6 +1294,7 @@ Default: True
    respectnulls_sql
    intdiv_sql
    dpipe_sql
    +
    safedpipe_sql
    div_sql
    overlaps_sql
    distance_sql
    @@ -1333,6 +1343,7 @@ Default: True
    dictproperty_sql
    dictrange_sql
    dictsubproperty_sql
    +
    oncluster_sql
    diff --git a/docs/sqlglot/dialects/redshift.html b/docs/sqlglot/dialects/redshift.html index 9ba45c1..dfee18f 100644 --- a/docs/sqlglot/dialects/redshift.html +++ b/docs/sqlglot/dialects/redshift.html @@ -91,172 +91,178 @@ 3import typing as t 4 5from sqlglot import exp, transforms - 6from sqlglot.dialects.postgres import Postgres - 7from sqlglot.helper import seq_get - 8from sqlglot.tokens import TokenType - 9 + 6from sqlglot.dialects.dialect import concat_to_dpipe_sql, rename_func + 7from sqlglot.dialects.postgres import Postgres + 8from sqlglot.helper import seq_get + 9from sqlglot.tokens import TokenType 10 - 11def _json_sql(self: Postgres.Generator, expression: exp.JSONExtract | exp.JSONExtractScalar) -> str: - 12 return f'{self.sql(expression, "this")}."{expression.expression.name}"' - 13 + 11 + 12def _json_sql(self: Postgres.Generator, expression: exp.JSONExtract | exp.JSONExtractScalar) -> str: + 13 return f'{self.sql(expression, "this")}."{expression.expression.name}"' 14 - 15class Redshift(Postgres): - 16 time_format = "'YYYY-MM-DD HH:MI:SS'" - 17 time_mapping = { - 18 **Postgres.time_mapping, - 19 "MON": "%b", - 20 "HH": "%H", - 21 } - 22 - 23 class Parser(Postgres.Parser): - 24 FUNCTIONS = { - 25 **Postgres.Parser.FUNCTIONS, - 26 "DATEADD": lambda args: exp.DateAdd( - 27 this=seq_get(args, 2), - 28 expression=seq_get(args, 1), - 29 unit=seq_get(args, 0), - 30 ), - 31 "DATEDIFF": lambda args: exp.DateDiff( - 32 this=seq_get(args, 2), - 33 expression=seq_get(args, 1), - 34 unit=seq_get(args, 0), - 35 ), - 36 "NVL": exp.Coalesce.from_arg_list, - 37 } - 38 - 39 CONVERT_TYPE_FIRST = True + 15 + 16class Redshift(Postgres): + 17 TIME_FORMAT = "'YYYY-MM-DD HH:MI:SS'" + 18 TIME_MAPPING = { + 19 **Postgres.TIME_MAPPING, + 20 "MON": "%b", + 21 "HH": "%H", + 22 } + 23 + 24 class Parser(Postgres.Parser): + 25 FUNCTIONS = { + 26 **Postgres.Parser.FUNCTIONS, + 27 "DATEADD": lambda args: exp.DateAdd( + 28 this=exp.TsOrDsToDate(this=seq_get(args, 2)), + 29 expression=seq_get(args, 1), + 30 unit=seq_get(args, 0), + 31 ), + 32 "DATEDIFF": lambda args: exp.DateDiff( + 33 this=exp.TsOrDsToDate(this=seq_get(args, 2)), + 34 expression=exp.TsOrDsToDate(this=seq_get(args, 1)), + 35 unit=seq_get(args, 0), + 36 ), + 37 "NVL": exp.Coalesce.from_arg_list, + 38 "STRTOL": exp.FromBase.from_arg_list, + 39 } 40 - 41 def _parse_types( - 42 self, check_func: bool = False, schema: bool = False - 43 ) -> t.Optional[exp.Expression]: - 44 this = super()._parse_types(check_func=check_func, schema=schema) - 45 - 46 if ( - 47 isinstance(this, exp.DataType) - 48 and this.is_type("varchar") - 49 and this.expressions - 50 and this.expressions[0].this == exp.column("MAX") - 51 ): - 52 this.set("expressions", [exp.Var(this="MAX")]) - 53 - 54 return this + 41 CONVERT_TYPE_FIRST = True + 42 + 43 def _parse_types( + 44 self, check_func: bool = False, schema: bool = False + 45 ) -> t.Optional[exp.Expression]: + 46 this = super()._parse_types(check_func=check_func, schema=schema) + 47 + 48 if ( + 49 isinstance(this, exp.DataType) + 50 and this.is_type("varchar") + 51 and this.expressions + 52 and this.expressions[0].this == exp.column("MAX") + 53 ): + 54 this.set("expressions", [exp.var("MAX")]) 55 - 56 class Tokenizer(Postgres.Tokenizer): - 57 BIT_STRINGS = [] - 58 HEX_STRINGS = [] - 59 STRING_ESCAPES = ["\\"] - 60 - 61 KEYWORDS = { - 62 **Postgres.Tokenizer.KEYWORDS, - 63 "HLLSKETCH": TokenType.HLLSKETCH, - 64 "SUPER": TokenType.SUPER, - 65 "SYSDATE": TokenType.CURRENT_TIMESTAMP, - 66 "TIME": TokenType.TIMESTAMP, - 67 "TIMETZ": TokenType.TIMESTAMPTZ, - 68 "TOP": TokenType.TOP, - 69 "UNLOAD": TokenType.COMMAND, - 70 "VARBYTE": TokenType.VARBINARY, - 71 } - 72 - 73 # Redshift allows # to appear as a table identifier prefix - 74 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() - 75 SINGLE_TOKENS.pop("#") - 76 - 77 class Generator(Postgres.Generator): - 78 LOCKING_READS_SUPPORTED = False - 79 RENAME_TABLE_WITH_DB = False - 80 - 81 TYPE_MAPPING = { - 82 **Postgres.Generator.TYPE_MAPPING, - 83 exp.DataType.Type.BINARY: "VARBYTE", - 84 exp.DataType.Type.VARBINARY: "VARBYTE", - 85 exp.DataType.Type.INT: "INTEGER", - 86 } - 87 - 88 PROPERTIES_LOCATION = { - 89 **Postgres.Generator.PROPERTIES_LOCATION, - 90 exp.LikeProperty: exp.Properties.Location.POST_WITH, - 91 } - 92 - 93 TRANSFORMS = { - 94 **Postgres.Generator.TRANSFORMS, - 95 exp.CurrentTimestamp: lambda self, e: "SYSDATE", - 96 exp.DateAdd: lambda self, e: self.func( - 97 "DATEADD", exp.var(e.text("unit") or "day"), e.expression, e.this - 98 ), - 99 exp.DateDiff: lambda self, e: self.func( -100 "DATEDIFF", exp.var(e.text("unit") or "day"), e.expression, e.this + 56 return this + 57 + 58 class Tokenizer(Postgres.Tokenizer): + 59 BIT_STRINGS = [] + 60 HEX_STRINGS = [] + 61 STRING_ESCAPES = ["\\"] + 62 + 63 KEYWORDS = { + 64 **Postgres.Tokenizer.KEYWORDS, + 65 "HLLSKETCH": TokenType.HLLSKETCH, + 66 "SUPER": TokenType.SUPER, + 67 "SYSDATE": TokenType.CURRENT_TIMESTAMP, + 68 "TIME": TokenType.TIMESTAMP, + 69 "TIMETZ": TokenType.TIMESTAMPTZ, + 70 "TOP": TokenType.TOP, + 71 "UNLOAD": TokenType.COMMAND, + 72 "VARBYTE": TokenType.VARBINARY, + 73 } + 74 + 75 # Redshift allows # to appear as a table identifier prefix + 76 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() + 77 SINGLE_TOKENS.pop("#") + 78 + 79 class Generator(Postgres.Generator): + 80 LOCKING_READS_SUPPORTED = False + 81 RENAME_TABLE_WITH_DB = False + 82 + 83 TYPE_MAPPING = { + 84 **Postgres.Generator.TYPE_MAPPING, + 85 exp.DataType.Type.BINARY: "VARBYTE", + 86 exp.DataType.Type.VARBINARY: "VARBYTE", + 87 exp.DataType.Type.INT: "INTEGER", + 88 } + 89 + 90 PROPERTIES_LOCATION = { + 91 **Postgres.Generator.PROPERTIES_LOCATION, + 92 exp.LikeProperty: exp.Properties.Location.POST_WITH, + 93 } + 94 + 95 TRANSFORMS = { + 96 **Postgres.Generator.TRANSFORMS, + 97 exp.Concat: concat_to_dpipe_sql, + 98 exp.CurrentTimestamp: lambda self, e: "SYSDATE", + 99 exp.DateAdd: lambda self, e: self.func( +100 "DATEADD", exp.var(e.text("unit") or "day"), e.expression, e.this 101 ), -102 exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})", -103 exp.DistStyleProperty: lambda self, e: self.naked_property(e), -104 exp.JSONExtract: _json_sql, -105 exp.JSONExtractScalar: _json_sql, -106 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), -107 exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", -108 } -109 -110 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots -111 TRANSFORMS.pop(exp.Pivot) -112 -113 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres) -114 TRANSFORMS.pop(exp.Pow) +102 exp.DateDiff: lambda self, e: self.func( +103 "DATEDIFF", exp.var(e.text("unit") or "day"), e.expression, e.this +104 ), +105 exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})", +106 exp.DistStyleProperty: lambda self, e: self.naked_property(e), +107 exp.FromBase: rename_func("STRTOL"), +108 exp.JSONExtract: _json_sql, +109 exp.JSONExtractScalar: _json_sql, +110 exp.SafeConcat: concat_to_dpipe_sql, +111 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), +112 exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", +113 exp.TsOrDsToDate: lambda self, e: self.sql(e.this), +114 } 115 -116 RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, "snapshot", "type"} -117 -118 def values_sql(self, expression: exp.Values) -> str: -119 """ -120 Converts `VALUES...` expression into a series of unions. +116 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots +117 TRANSFORMS.pop(exp.Pivot) +118 +119 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres) +120 TRANSFORMS.pop(exp.Pow) 121 -122 Note: If you have a lot of unions then this will result in a large number of recursive statements to -123 evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be -124 very slow. -125 """ -126 -127 # The VALUES clause is still valid in an `INSERT INTO ..` statement, for example -128 if not expression.find_ancestor(exp.From, exp.Join): -129 return super().values_sql(expression) -130 -131 column_names = expression.alias and expression.args["alias"].columns +122 RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, "snapshot", "type"} +123 +124 def values_sql(self, expression: exp.Values) -> str: +125 """ +126 Converts `VALUES...` expression into a series of unions. +127 +128 Note: If you have a lot of unions then this will result in a large number of recursive statements to +129 evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be +130 very slow. +131 """ 132 -133 selects = [] -134 rows = [tuple_exp.expressions for tuple_exp in expression.expressions] -135 -136 for i, row in enumerate(rows): -137 if i == 0 and column_names: -138 row = [ -139 exp.alias_(value, column_name) -140 for value, column_name in zip(row, column_names) -141 ] -142 -143 selects.append(exp.Select(expressions=row)) -144 -145 subquery_expression: exp.Select | exp.Union = selects[0] -146 if len(selects) > 1: -147 for select in selects[1:]: -148 subquery_expression = exp.union(subquery_expression, select, distinct=False) -149 -150 return self.subquery_sql(subquery_expression.subquery(expression.alias)) -151 -152 def with_properties(self, properties: exp.Properties) -> str: -153 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" -154 return self.properties(properties, prefix=" ", suffix="") +133 # The VALUES clause is still valid in an `INSERT INTO ..` statement, for example +134 if not expression.find_ancestor(exp.From, exp.Join): +135 return super().values_sql(expression) +136 +137 column_names = expression.alias and expression.args["alias"].columns +138 +139 selects = [] +140 rows = [tuple_exp.expressions for tuple_exp in expression.expressions] +141 +142 for i, row in enumerate(rows): +143 if i == 0 and column_names: +144 row = [ +145 exp.alias_(value, column_name) +146 for value, column_name in zip(row, column_names) +147 ] +148 +149 selects.append(exp.Select(expressions=row)) +150 +151 subquery_expression: exp.Select | exp.Union = selects[0] +152 if len(selects) > 1: +153 for select in selects[1:]: +154 subquery_expression = exp.union(subquery_expression, select, distinct=False) 155 -156 def datatype_sql(self, expression: exp.DataType) -> str: -157 """ -158 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean -159 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type -160 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert -161 `TEXT` to `VARCHAR`. -162 """ -163 if expression.is_type("text"): -164 expression = expression.copy() -165 expression.set("this", exp.DataType.Type.VARCHAR) -166 precision = expression.args.get("expressions") -167 -168 if not precision: -169 expression.append("expressions", exp.Var(this="MAX")) -170 -171 return super().datatype_sql(expression) +156 return self.subquery_sql(subquery_expression.subquery(expression.alias)) +157 +158 def with_properties(self, properties: exp.Properties) -> str: +159 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" +160 return self.properties(properties, prefix=" ", suffix="") +161 +162 def datatype_sql(self, expression: exp.DataType) -> str: +163 """ +164 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean +165 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type +166 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert +167 `TEXT` to `VARCHAR`. +168 """ +169 if expression.is_type("text"): +170 expression = expression.copy() +171 expression.set("this", exp.DataType.Type.VARCHAR) +172 precision = expression.args.get("expressions") +173 +174 if not precision: +175 expression.append("expressions", exp.var("MAX")) +176 +177 return super().datatype_sql(expression)
    @@ -272,163 +278,168 @@
    -
     16class Redshift(Postgres):
    - 17    time_format = "'YYYY-MM-DD HH:MI:SS'"
    - 18    time_mapping = {
    - 19        **Postgres.time_mapping,
    - 20        "MON": "%b",
    - 21        "HH": "%H",
    - 22    }
    - 23
    - 24    class Parser(Postgres.Parser):
    - 25        FUNCTIONS = {
    - 26            **Postgres.Parser.FUNCTIONS,
    - 27            "DATEADD": lambda args: exp.DateAdd(
    - 28                this=seq_get(args, 2),
    - 29                expression=seq_get(args, 1),
    - 30                unit=seq_get(args, 0),
    - 31            ),
    - 32            "DATEDIFF": lambda args: exp.DateDiff(
    - 33                this=seq_get(args, 2),
    - 34                expression=seq_get(args, 1),
    - 35                unit=seq_get(args, 0),
    - 36            ),
    - 37            "NVL": exp.Coalesce.from_arg_list,
    - 38        }
    - 39
    - 40        CONVERT_TYPE_FIRST = True
    +            
     17class Redshift(Postgres):
    + 18    TIME_FORMAT = "'YYYY-MM-DD HH:MI:SS'"
    + 19    TIME_MAPPING = {
    + 20        **Postgres.TIME_MAPPING,
    + 21        "MON": "%b",
    + 22        "HH": "%H",
    + 23    }
    + 24
    + 25    class Parser(Postgres.Parser):
    + 26        FUNCTIONS = {
    + 27            **Postgres.Parser.FUNCTIONS,
    + 28            "DATEADD": lambda args: exp.DateAdd(
    + 29                this=exp.TsOrDsToDate(this=seq_get(args, 2)),
    + 30                expression=seq_get(args, 1),
    + 31                unit=seq_get(args, 0),
    + 32            ),
    + 33            "DATEDIFF": lambda args: exp.DateDiff(
    + 34                this=exp.TsOrDsToDate(this=seq_get(args, 2)),
    + 35                expression=exp.TsOrDsToDate(this=seq_get(args, 1)),
    + 36                unit=seq_get(args, 0),
    + 37            ),
    + 38            "NVL": exp.Coalesce.from_arg_list,
    + 39            "STRTOL": exp.FromBase.from_arg_list,
    + 40        }
      41
    - 42        def _parse_types(
    - 43            self, check_func: bool = False, schema: bool = False
    - 44        ) -> t.Optional[exp.Expression]:
    - 45            this = super()._parse_types(check_func=check_func, schema=schema)
    - 46
    - 47            if (
    - 48                isinstance(this, exp.DataType)
    - 49                and this.is_type("varchar")
    - 50                and this.expressions
    - 51                and this.expressions[0].this == exp.column("MAX")
    - 52            ):
    - 53                this.set("expressions", [exp.Var(this="MAX")])
    - 54
    - 55            return this
    + 42        CONVERT_TYPE_FIRST = True
    + 43
    + 44        def _parse_types(
    + 45            self, check_func: bool = False, schema: bool = False
    + 46        ) -> t.Optional[exp.Expression]:
    + 47            this = super()._parse_types(check_func=check_func, schema=schema)
    + 48
    + 49            if (
    + 50                isinstance(this, exp.DataType)
    + 51                and this.is_type("varchar")
    + 52                and this.expressions
    + 53                and this.expressions[0].this == exp.column("MAX")
    + 54            ):
    + 55                this.set("expressions", [exp.var("MAX")])
      56
    - 57    class Tokenizer(Postgres.Tokenizer):
    - 58        BIT_STRINGS = []
    - 59        HEX_STRINGS = []
    - 60        STRING_ESCAPES = ["\\"]
    - 61
    - 62        KEYWORDS = {
    - 63            **Postgres.Tokenizer.KEYWORDS,
    - 64            "HLLSKETCH": TokenType.HLLSKETCH,
    - 65            "SUPER": TokenType.SUPER,
    - 66            "SYSDATE": TokenType.CURRENT_TIMESTAMP,
    - 67            "TIME": TokenType.TIMESTAMP,
    - 68            "TIMETZ": TokenType.TIMESTAMPTZ,
    - 69            "TOP": TokenType.TOP,
    - 70            "UNLOAD": TokenType.COMMAND,
    - 71            "VARBYTE": TokenType.VARBINARY,
    - 72        }
    - 73
    - 74        # Redshift allows # to appear as a table identifier prefix
    - 75        SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy()
    - 76        SINGLE_TOKENS.pop("#")
    - 77
    - 78    class Generator(Postgres.Generator):
    - 79        LOCKING_READS_SUPPORTED = False
    - 80        RENAME_TABLE_WITH_DB = False
    - 81
    - 82        TYPE_MAPPING = {
    - 83            **Postgres.Generator.TYPE_MAPPING,
    - 84            exp.DataType.Type.BINARY: "VARBYTE",
    - 85            exp.DataType.Type.VARBINARY: "VARBYTE",
    - 86            exp.DataType.Type.INT: "INTEGER",
    - 87        }
    - 88
    - 89        PROPERTIES_LOCATION = {
    - 90            **Postgres.Generator.PROPERTIES_LOCATION,
    - 91            exp.LikeProperty: exp.Properties.Location.POST_WITH,
    - 92        }
    - 93
    - 94        TRANSFORMS = {
    - 95            **Postgres.Generator.TRANSFORMS,
    - 96            exp.CurrentTimestamp: lambda self, e: "SYSDATE",
    - 97            exp.DateAdd: lambda self, e: self.func(
    - 98                "DATEADD", exp.var(e.text("unit") or "day"), e.expression, e.this
    - 99            ),
    -100            exp.DateDiff: lambda self, e: self.func(
    -101                "DATEDIFF", exp.var(e.text("unit") or "day"), e.expression, e.this
    + 57            return this
    + 58
    + 59    class Tokenizer(Postgres.Tokenizer):
    + 60        BIT_STRINGS = []
    + 61        HEX_STRINGS = []
    + 62        STRING_ESCAPES = ["\\"]
    + 63
    + 64        KEYWORDS = {
    + 65            **Postgres.Tokenizer.KEYWORDS,
    + 66            "HLLSKETCH": TokenType.HLLSKETCH,
    + 67            "SUPER": TokenType.SUPER,
    + 68            "SYSDATE": TokenType.CURRENT_TIMESTAMP,
    + 69            "TIME": TokenType.TIMESTAMP,
    + 70            "TIMETZ": TokenType.TIMESTAMPTZ,
    + 71            "TOP": TokenType.TOP,
    + 72            "UNLOAD": TokenType.COMMAND,
    + 73            "VARBYTE": TokenType.VARBINARY,
    + 74        }
    + 75
    + 76        # Redshift allows # to appear as a table identifier prefix
    + 77        SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy()
    + 78        SINGLE_TOKENS.pop("#")
    + 79
    + 80    class Generator(Postgres.Generator):
    + 81        LOCKING_READS_SUPPORTED = False
    + 82        RENAME_TABLE_WITH_DB = False
    + 83
    + 84        TYPE_MAPPING = {
    + 85            **Postgres.Generator.TYPE_MAPPING,
    + 86            exp.DataType.Type.BINARY: "VARBYTE",
    + 87            exp.DataType.Type.VARBINARY: "VARBYTE",
    + 88            exp.DataType.Type.INT: "INTEGER",
    + 89        }
    + 90
    + 91        PROPERTIES_LOCATION = {
    + 92            **Postgres.Generator.PROPERTIES_LOCATION,
    + 93            exp.LikeProperty: exp.Properties.Location.POST_WITH,
    + 94        }
    + 95
    + 96        TRANSFORMS = {
    + 97            **Postgres.Generator.TRANSFORMS,
    + 98            exp.Concat: concat_to_dpipe_sql,
    + 99            exp.CurrentTimestamp: lambda self, e: "SYSDATE",
    +100            exp.DateAdd: lambda self, e: self.func(
    +101                "DATEADD", exp.var(e.text("unit") or "day"), e.expression, e.this
     102            ),
    -103            exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})",
    -104            exp.DistStyleProperty: lambda self, e: self.naked_property(e),
    -105            exp.JSONExtract: _json_sql,
    -106            exp.JSONExtractScalar: _json_sql,
    -107            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    -108            exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})",
    -109        }
    -110
    -111        # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots
    -112        TRANSFORMS.pop(exp.Pivot)
    -113
    -114        # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres)
    -115        TRANSFORMS.pop(exp.Pow)
    +103            exp.DateDiff: lambda self, e: self.func(
    +104                "DATEDIFF", exp.var(e.text("unit") or "day"), e.expression, e.this
    +105            ),
    +106            exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})",
    +107            exp.DistStyleProperty: lambda self, e: self.naked_property(e),
    +108            exp.FromBase: rename_func("STRTOL"),
    +109            exp.JSONExtract: _json_sql,
    +110            exp.JSONExtractScalar: _json_sql,
    +111            exp.SafeConcat: concat_to_dpipe_sql,
    +112            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    +113            exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})",
    +114            exp.TsOrDsToDate: lambda self, e: self.sql(e.this),
    +115        }
     116
    -117        RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, "snapshot", "type"}
    -118
    -119        def values_sql(self, expression: exp.Values) -> str:
    -120            """
    -121            Converts `VALUES...` expression into a series of unions.
    +117        # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots
    +118        TRANSFORMS.pop(exp.Pivot)
    +119
    +120        # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres)
    +121        TRANSFORMS.pop(exp.Pow)
     122
    -123            Note: If you have a lot of unions then this will result in a large number of recursive statements to
    -124            evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be
    -125            very slow.
    -126            """
    -127
    -128            # The VALUES clause is still valid in an `INSERT INTO ..` statement, for example
    -129            if not expression.find_ancestor(exp.From, exp.Join):
    -130                return super().values_sql(expression)
    -131
    -132            column_names = expression.alias and expression.args["alias"].columns
    +123        RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, "snapshot", "type"}
    +124
    +125        def values_sql(self, expression: exp.Values) -> str:
    +126            """
    +127            Converts `VALUES...` expression into a series of unions.
    +128
    +129            Note: If you have a lot of unions then this will result in a large number of recursive statements to
    +130            evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be
    +131            very slow.
    +132            """
     133
    -134            selects = []
    -135            rows = [tuple_exp.expressions for tuple_exp in expression.expressions]
    -136
    -137            for i, row in enumerate(rows):
    -138                if i == 0 and column_names:
    -139                    row = [
    -140                        exp.alias_(value, column_name)
    -141                        for value, column_name in zip(row, column_names)
    -142                    ]
    -143
    -144                selects.append(exp.Select(expressions=row))
    -145
    -146            subquery_expression: exp.Select | exp.Union = selects[0]
    -147            if len(selects) > 1:
    -148                for select in selects[1:]:
    -149                    subquery_expression = exp.union(subquery_expression, select, distinct=False)
    -150
    -151            return self.subquery_sql(subquery_expression.subquery(expression.alias))
    -152
    -153        def with_properties(self, properties: exp.Properties) -> str:
    -154            """Redshift doesn't have `WITH` as part of their with_properties so we remove it"""
    -155            return self.properties(properties, prefix=" ", suffix="")
    +134            # The VALUES clause is still valid in an `INSERT INTO ..` statement, for example
    +135            if not expression.find_ancestor(exp.From, exp.Join):
    +136                return super().values_sql(expression)
    +137
    +138            column_names = expression.alias and expression.args["alias"].columns
    +139
    +140            selects = []
    +141            rows = [tuple_exp.expressions for tuple_exp in expression.expressions]
    +142
    +143            for i, row in enumerate(rows):
    +144                if i == 0 and column_names:
    +145                    row = [
    +146                        exp.alias_(value, column_name)
    +147                        for value, column_name in zip(row, column_names)
    +148                    ]
    +149
    +150                selects.append(exp.Select(expressions=row))
    +151
    +152            subquery_expression: exp.Select | exp.Union = selects[0]
    +153            if len(selects) > 1:
    +154                for select in selects[1:]:
    +155                    subquery_expression = exp.union(subquery_expression, select, distinct=False)
     156
    -157        def datatype_sql(self, expression: exp.DataType) -> str:
    -158            """
    -159            Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean
    -160            VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type
    -161            without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert
    -162            `TEXT` to `VARCHAR`.
    -163            """
    -164            if expression.is_type("text"):
    -165                expression = expression.copy()
    -166                expression.set("this", exp.DataType.Type.VARCHAR)
    -167                precision = expression.args.get("expressions")
    -168
    -169                if not precision:
    -170                    expression.append("expressions", exp.Var(this="MAX"))
    -171
    -172            return super().datatype_sql(expression)
    +157            return self.subquery_sql(subquery_expression.subquery(expression.alias))
    +158
    +159        def with_properties(self, properties: exp.Properties) -> str:
    +160            """Redshift doesn't have `WITH` as part of their with_properties so we remove it"""
    +161            return self.properties(properties, prefix=" ", suffix="")
    +162
    +163        def datatype_sql(self, expression: exp.DataType) -> str:
    +164            """
    +165            Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean
    +166            VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type
    +167            without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert
    +168            `TEXT` to `VARCHAR`.
    +169            """
    +170            if expression.is_type("text"):
    +171                expression = expression.copy()
    +172                expression.set("this", exp.DataType.Type.VARCHAR)
    +173                precision = expression.args.get("expressions")
    +174
    +175                if not precision:
    +176                    expression.append("expressions", exp.var("MAX"))
    +177
    +178            return super().datatype_sql(expression)
     
    @@ -463,62 +474,55 @@
    -
    24    class Parser(Postgres.Parser):
    -25        FUNCTIONS = {
    -26            **Postgres.Parser.FUNCTIONS,
    -27            "DATEADD": lambda args: exp.DateAdd(
    -28                this=seq_get(args, 2),
    -29                expression=seq_get(args, 1),
    -30                unit=seq_get(args, 0),
    -31            ),
    -32            "DATEDIFF": lambda args: exp.DateDiff(
    -33                this=seq_get(args, 2),
    -34                expression=seq_get(args, 1),
    -35                unit=seq_get(args, 0),
    -36            ),
    -37            "NVL": exp.Coalesce.from_arg_list,
    -38        }
    -39
    -40        CONVERT_TYPE_FIRST = True
    +            
    25    class Parser(Postgres.Parser):
    +26        FUNCTIONS = {
    +27            **Postgres.Parser.FUNCTIONS,
    +28            "DATEADD": lambda args: exp.DateAdd(
    +29                this=exp.TsOrDsToDate(this=seq_get(args, 2)),
    +30                expression=seq_get(args, 1),
    +31                unit=seq_get(args, 0),
    +32            ),
    +33            "DATEDIFF": lambda args: exp.DateDiff(
    +34                this=exp.TsOrDsToDate(this=seq_get(args, 2)),
    +35                expression=exp.TsOrDsToDate(this=seq_get(args, 1)),
    +36                unit=seq_get(args, 0),
    +37            ),
    +38            "NVL": exp.Coalesce.from_arg_list,
    +39            "STRTOL": exp.FromBase.from_arg_list,
    +40        }
     41
    -42        def _parse_types(
    -43            self, check_func: bool = False, schema: bool = False
    -44        ) -> t.Optional[exp.Expression]:
    -45            this = super()._parse_types(check_func=check_func, schema=schema)
    -46
    -47            if (
    -48                isinstance(this, exp.DataType)
    -49                and this.is_type("varchar")
    -50                and this.expressions
    -51                and this.expressions[0].this == exp.column("MAX")
    -52            ):
    -53                this.set("expressions", [exp.Var(this="MAX")])
    -54
    -55            return this
    +42        CONVERT_TYPE_FIRST = True
    +43
    +44        def _parse_types(
    +45            self, check_func: bool = False, schema: bool = False
    +46        ) -> t.Optional[exp.Expression]:
    +47            this = super()._parse_types(check_func=check_func, schema=schema)
    +48
    +49            if (
    +50                isinstance(this, exp.DataType)
    +51                and this.is_type("varchar")
    +52                and this.expressions
    +53                and this.expressions[0].this == exp.column("MAX")
    +54            ):
    +55                this.set("expressions", [exp.var("MAX")])
    +56
    +57            return this
     
    -

    Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces -a parsed syntax tree.

    +

    Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

    Arguments:
      -
    • error_level: the desired error level. +
    • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
    • -
    • error_message_context: determines the amount of context to capture from a +
    • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). -Default: 50.
    • -
    • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. -Default: 0
    • -
    • alias_post_tablesample: If the table alias comes after tablesample. -Default: False
    • +Default: 100
    • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
    • -
    • null_ordering: Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    @@ -551,26 +555,26 @@ Default: "nulls_are_small"
    -
    57    class Tokenizer(Postgres.Tokenizer):
    -58        BIT_STRINGS = []
    -59        HEX_STRINGS = []
    -60        STRING_ESCAPES = ["\\"]
    -61
    -62        KEYWORDS = {
    -63            **Postgres.Tokenizer.KEYWORDS,
    -64            "HLLSKETCH": TokenType.HLLSKETCH,
    -65            "SUPER": TokenType.SUPER,
    -66            "SYSDATE": TokenType.CURRENT_TIMESTAMP,
    -67            "TIME": TokenType.TIMESTAMP,
    -68            "TIMETZ": TokenType.TIMESTAMPTZ,
    -69            "TOP": TokenType.TOP,
    -70            "UNLOAD": TokenType.COMMAND,
    -71            "VARBYTE": TokenType.VARBINARY,
    -72        }
    -73
    -74        # Redshift allows # to appear as a table identifier prefix
    -75        SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy()
    -76        SINGLE_TOKENS.pop("#")
    +            
    59    class Tokenizer(Postgres.Tokenizer):
    +60        BIT_STRINGS = []
    +61        HEX_STRINGS = []
    +62        STRING_ESCAPES = ["\\"]
    +63
    +64        KEYWORDS = {
    +65            **Postgres.Tokenizer.KEYWORDS,
    +66            "HLLSKETCH": TokenType.HLLSKETCH,
    +67            "SUPER": TokenType.SUPER,
    +68            "SYSDATE": TokenType.CURRENT_TIMESTAMP,
    +69            "TIME": TokenType.TIMESTAMP,
    +70            "TIMETZ": TokenType.TIMESTAMPTZ,
    +71            "TOP": TokenType.TOP,
    +72            "UNLOAD": TokenType.COMMAND,
    +73            "VARBYTE": TokenType.VARBINARY,
    +74        }
    +75
    +76        # Redshift allows # to appear as a table identifier prefix
    +77        SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy()
    +78        SINGLE_TOKENS.pop("#")
     
    @@ -582,6 +586,7 @@ Default: "nulls_are_small" @@ -598,147 +603,136 @@ Default: "nulls_are_small"
    -
     78    class Generator(Postgres.Generator):
    - 79        LOCKING_READS_SUPPORTED = False
    - 80        RENAME_TABLE_WITH_DB = False
    - 81
    - 82        TYPE_MAPPING = {
    - 83            **Postgres.Generator.TYPE_MAPPING,
    - 84            exp.DataType.Type.BINARY: "VARBYTE",
    - 85            exp.DataType.Type.VARBINARY: "VARBYTE",
    - 86            exp.DataType.Type.INT: "INTEGER",
    - 87        }
    - 88
    - 89        PROPERTIES_LOCATION = {
    - 90            **Postgres.Generator.PROPERTIES_LOCATION,
    - 91            exp.LikeProperty: exp.Properties.Location.POST_WITH,
    - 92        }
    - 93
    - 94        TRANSFORMS = {
    - 95            **Postgres.Generator.TRANSFORMS,
    - 96            exp.CurrentTimestamp: lambda self, e: "SYSDATE",
    - 97            exp.DateAdd: lambda self, e: self.func(
    - 98                "DATEADD", exp.var(e.text("unit") or "day"), e.expression, e.this
    - 99            ),
    -100            exp.DateDiff: lambda self, e: self.func(
    -101                "DATEDIFF", exp.var(e.text("unit") or "day"), e.expression, e.this
    +            
     80    class Generator(Postgres.Generator):
    + 81        LOCKING_READS_SUPPORTED = False
    + 82        RENAME_TABLE_WITH_DB = False
    + 83
    + 84        TYPE_MAPPING = {
    + 85            **Postgres.Generator.TYPE_MAPPING,
    + 86            exp.DataType.Type.BINARY: "VARBYTE",
    + 87            exp.DataType.Type.VARBINARY: "VARBYTE",
    + 88            exp.DataType.Type.INT: "INTEGER",
    + 89        }
    + 90
    + 91        PROPERTIES_LOCATION = {
    + 92            **Postgres.Generator.PROPERTIES_LOCATION,
    + 93            exp.LikeProperty: exp.Properties.Location.POST_WITH,
    + 94        }
    + 95
    + 96        TRANSFORMS = {
    + 97            **Postgres.Generator.TRANSFORMS,
    + 98            exp.Concat: concat_to_dpipe_sql,
    + 99            exp.CurrentTimestamp: lambda self, e: "SYSDATE",
    +100            exp.DateAdd: lambda self, e: self.func(
    +101                "DATEADD", exp.var(e.text("unit") or "day"), e.expression, e.this
     102            ),
    -103            exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})",
    -104            exp.DistStyleProperty: lambda self, e: self.naked_property(e),
    -105            exp.JSONExtract: _json_sql,
    -106            exp.JSONExtractScalar: _json_sql,
    -107            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    -108            exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})",
    -109        }
    -110
    -111        # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots
    -112        TRANSFORMS.pop(exp.Pivot)
    -113
    -114        # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres)
    -115        TRANSFORMS.pop(exp.Pow)
    +103            exp.DateDiff: lambda self, e: self.func(
    +104                "DATEDIFF", exp.var(e.text("unit") or "day"), e.expression, e.this
    +105            ),
    +106            exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})",
    +107            exp.DistStyleProperty: lambda self, e: self.naked_property(e),
    +108            exp.FromBase: rename_func("STRTOL"),
    +109            exp.JSONExtract: _json_sql,
    +110            exp.JSONExtractScalar: _json_sql,
    +111            exp.SafeConcat: concat_to_dpipe_sql,
    +112            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    +113            exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})",
    +114            exp.TsOrDsToDate: lambda self, e: self.sql(e.this),
    +115        }
     116
    -117        RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, "snapshot", "type"}
    -118
    -119        def values_sql(self, expression: exp.Values) -> str:
    -120            """
    -121            Converts `VALUES...` expression into a series of unions.
    +117        # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots
    +118        TRANSFORMS.pop(exp.Pivot)
    +119
    +120        # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres)
    +121        TRANSFORMS.pop(exp.Pow)
     122
    -123            Note: If you have a lot of unions then this will result in a large number of recursive statements to
    -124            evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be
    -125            very slow.
    -126            """
    -127
    -128            # The VALUES clause is still valid in an `INSERT INTO ..` statement, for example
    -129            if not expression.find_ancestor(exp.From, exp.Join):
    -130                return super().values_sql(expression)
    -131
    -132            column_names = expression.alias and expression.args["alias"].columns
    +123        RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, "snapshot", "type"}
    +124
    +125        def values_sql(self, expression: exp.Values) -> str:
    +126            """
    +127            Converts `VALUES...` expression into a series of unions.
    +128
    +129            Note: If you have a lot of unions then this will result in a large number of recursive statements to
    +130            evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be
    +131            very slow.
    +132            """
     133
    -134            selects = []
    -135            rows = [tuple_exp.expressions for tuple_exp in expression.expressions]
    -136
    -137            for i, row in enumerate(rows):
    -138                if i == 0 and column_names:
    -139                    row = [
    -140                        exp.alias_(value, column_name)
    -141                        for value, column_name in zip(row, column_names)
    -142                    ]
    -143
    -144                selects.append(exp.Select(expressions=row))
    -145
    -146            subquery_expression: exp.Select | exp.Union = selects[0]
    -147            if len(selects) > 1:
    -148                for select in selects[1:]:
    -149                    subquery_expression = exp.union(subquery_expression, select, distinct=False)
    -150
    -151            return self.subquery_sql(subquery_expression.subquery(expression.alias))
    -152
    -153        def with_properties(self, properties: exp.Properties) -> str:
    -154            """Redshift doesn't have `WITH` as part of their with_properties so we remove it"""
    -155            return self.properties(properties, prefix=" ", suffix="")
    +134            # The VALUES clause is still valid in an `INSERT INTO ..` statement, for example
    +135            if not expression.find_ancestor(exp.From, exp.Join):
    +136                return super().values_sql(expression)
    +137
    +138            column_names = expression.alias and expression.args["alias"].columns
    +139
    +140            selects = []
    +141            rows = [tuple_exp.expressions for tuple_exp in expression.expressions]
    +142
    +143            for i, row in enumerate(rows):
    +144                if i == 0 and column_names:
    +145                    row = [
    +146                        exp.alias_(value, column_name)
    +147                        for value, column_name in zip(row, column_names)
    +148                    ]
    +149
    +150                selects.append(exp.Select(expressions=row))
    +151
    +152            subquery_expression: exp.Select | exp.Union = selects[0]
    +153            if len(selects) > 1:
    +154                for select in selects[1:]:
    +155                    subquery_expression = exp.union(subquery_expression, select, distinct=False)
     156
    -157        def datatype_sql(self, expression: exp.DataType) -> str:
    -158            """
    -159            Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean
    -160            VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type
    -161            without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert
    -162            `TEXT` to `VARCHAR`.
    -163            """
    -164            if expression.is_type("text"):
    -165                expression = expression.copy()
    -166                expression.set("this", exp.DataType.Type.VARCHAR)
    -167                precision = expression.args.get("expressions")
    -168
    -169                if not precision:
    -170                    expression.append("expressions", exp.Var(this="MAX"))
    -171
    -172            return super().datatype_sql(expression)
    +157            return self.subquery_sql(subquery_expression.subquery(expression.alias))
    +158
    +159        def with_properties(self, properties: exp.Properties) -> str:
    +160            """Redshift doesn't have `WITH` as part of their with_properties so we remove it"""
    +161            return self.properties(properties, prefix=" ", suffix="")
    +162
    +163        def datatype_sql(self, expression: exp.DataType) -> str:
    +164            """
    +165            Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean
    +166            VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type
    +167            without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert
    +168            `TEXT` to `VARCHAR`.
    +169            """
    +170            if expression.is_type("text"):
    +171                expression = expression.copy()
    +172                expression.set("this", exp.DataType.Type.VARCHAR)
    +173                precision = expression.args.get("expressions")
    +174
    +175                if not precision:
    +176                    expression.append("expressions", exp.var("MAX"))
    +177
    +178            return super().datatype_sql(expression)
     
    -

    Generator interprets the given syntax tree and produces a SQL string as an output.

    +

    Generator converts a given syntax tree to the corresponding SQL string.

    Arguments:
      -
    • time_mapping (dict): the dictionary of custom time mappings in which the key -represents a python time format and the output the target time format
    • -
    • time_trie (trie): a trie of the time_mapping keys
    • -
    • pretty (bool): if set to True the returned string will be formatted. Default: False.
    • -
    • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
    • -
    • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
    • -
    • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
    • -
    • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
    • -
    • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
    • -
    • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
    • -
    • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
    • -
    • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
    • -
    • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
    • -
    • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
    • -
    • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
    • -
    • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
    • -
    • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
    • -
    • normalize (bool): if set to True all identifiers will lower cased
    • -
    • string_escape (str): specifies a string escape character. Default: '.
    • -
    • identifier_escape (str): specifies an identifier escape character. Default: ".
    • -
    • pad (int): determines padding in a formatted string. Default: 2.
    • -
    • indent (int): determines the size of indentation in a formatted string. Default: 4.
    • -
    • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
    • -
    • normalize_functions (str): normalize function names, "upper", "lower", or None -Default: "upper"
    • -
    • alias_post_tablesample (bool): if the table alias comes after tablesample -Default: False
    • -
    • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit -Default: False
    • -
    • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters -unsupported expressions. Default ErrorLevel.WARN.
    • -
    • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    • -
    • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +
    • pretty: Whether or not to format the produced SQL string. +Default: False.
    • +
    • identify: Determines when an identifier should be quoted. Possible values are: +False (default): Never quote, except in cases where it's mandatory by the dialect. +True or 'always': Always quote. +'safe': Only quote identifiers that are case insensitive.
    • +
    • normalize: Whether or not to normalize identifiers to lowercase. +Default: False.
    • +
    • pad: Determines the pad size in a formatted string. +Default: 2.
    • +
    • indent: Determines the indentation size in a formatted string. +Default: 2.
    • +
    • normalize_functions: Whether or not to normalize all function names. Possible values are: +"upper" or True (default): Convert names to uppercase. +"lower": Convert names to lowercase. +False: Disables function name normalization.
    • +
    • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. +Default ErrorLevel.WARN.
    • +
    • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
    • -
    • leading_comma (bool): if the the comma is leading or trailing in select statements +
    • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. +This is only relevant when generating in pretty mode. Default: False
    • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true @@ -761,39 +755,39 @@ Default: True
    -
    119        def values_sql(self, expression: exp.Values) -> str:
    -120            """
    -121            Converts `VALUES...` expression into a series of unions.
    -122
    -123            Note: If you have a lot of unions then this will result in a large number of recursive statements to
    -124            evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be
    -125            very slow.
    -126            """
    -127
    -128            # The VALUES clause is still valid in an `INSERT INTO ..` statement, for example
    -129            if not expression.find_ancestor(exp.From, exp.Join):
    -130                return super().values_sql(expression)
    -131
    -132            column_names = expression.alias and expression.args["alias"].columns
    +            
    125        def values_sql(self, expression: exp.Values) -> str:
    +126            """
    +127            Converts `VALUES...` expression into a series of unions.
    +128
    +129            Note: If you have a lot of unions then this will result in a large number of recursive statements to
    +130            evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be
    +131            very slow.
    +132            """
     133
    -134            selects = []
    -135            rows = [tuple_exp.expressions for tuple_exp in expression.expressions]
    -136
    -137            for i, row in enumerate(rows):
    -138                if i == 0 and column_names:
    -139                    row = [
    -140                        exp.alias_(value, column_name)
    -141                        for value, column_name in zip(row, column_names)
    -142                    ]
    -143
    -144                selects.append(exp.Select(expressions=row))
    -145
    -146            subquery_expression: exp.Select | exp.Union = selects[0]
    -147            if len(selects) > 1:
    -148                for select in selects[1:]:
    -149                    subquery_expression = exp.union(subquery_expression, select, distinct=False)
    -150
    -151            return self.subquery_sql(subquery_expression.subquery(expression.alias))
    +134            # The VALUES clause is still valid in an `INSERT INTO ..` statement, for example
    +135            if not expression.find_ancestor(exp.From, exp.Join):
    +136                return super().values_sql(expression)
    +137
    +138            column_names = expression.alias and expression.args["alias"].columns
    +139
    +140            selects = []
    +141            rows = [tuple_exp.expressions for tuple_exp in expression.expressions]
    +142
    +143            for i, row in enumerate(rows):
    +144                if i == 0 and column_names:
    +145                    row = [
    +146                        exp.alias_(value, column_name)
    +147                        for value, column_name in zip(row, column_names)
    +148                    ]
    +149
    +150                selects.append(exp.Select(expressions=row))
    +151
    +152            subquery_expression: exp.Select | exp.Union = selects[0]
    +153            if len(selects) > 1:
    +154                for select in selects[1:]:
    +155                    subquery_expression = exp.union(subquery_expression, select, distinct=False)
    +156
    +157            return self.subquery_sql(subquery_expression.subquery(expression.alias))
     
    @@ -817,9 +811,9 @@ very slow.

    -
    153        def with_properties(self, properties: exp.Properties) -> str:
    -154            """Redshift doesn't have `WITH` as part of their with_properties so we remove it"""
    -155            return self.properties(properties, prefix=" ", suffix="")
    +            
    159        def with_properties(self, properties: exp.Properties) -> str:
    +160            """Redshift doesn't have `WITH` as part of their with_properties so we remove it"""
    +161            return self.properties(properties, prefix=" ", suffix="")
     
    @@ -839,22 +833,22 @@ very slow.

    -
    157        def datatype_sql(self, expression: exp.DataType) -> str:
    -158            """
    -159            Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean
    -160            VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type
    -161            without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert
    -162            `TEXT` to `VARCHAR`.
    -163            """
    -164            if expression.is_type("text"):
    -165                expression = expression.copy()
    -166                expression.set("this", exp.DataType.Type.VARCHAR)
    -167                precision = expression.args.get("expressions")
    -168
    -169                if not precision:
    -170                    expression.append("expressions", exp.Var(this="MAX"))
    -171
    -172            return super().datatype_sql(expression)
    +            
    163        def datatype_sql(self, expression: exp.DataType) -> str:
    +164            """
    +165            Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean
    +166            VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type
    +167            without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert
    +168            `TEXT` to `VARCHAR`.
    +169            """
    +170            if expression.is_type("text"):
    +171                expression = expression.copy()
    +172                expression.set("this", exp.DataType.Type.VARCHAR)
    +173                precision = expression.args.get("expressions")
    +174
    +175                if not precision:
    +176                    expression.append("expressions", exp.var("MAX"))
    +177
    +178            return super().datatype_sql(expression)
     
    @@ -895,6 +889,7 @@ without precision we convert it to VARCHAR(max) and if it does have
    notnullcolumnconstraint_sql
    primarykeycolumnconstraint_sql
    uniquecolumnconstraint_sql
    +
    createable_sql
    create_sql
    clone_sql
    describe_sql
    @@ -974,10 +969,12 @@ without precision we convert it to VARCHAR(max) and if it does have
    ordered_sql
    matchrecognize_sql
    query_modifiers
    +
    offset_limit_modifiers
    after_having_modifiers
    after_limit_modifiers
    select_sql
    schema_sql
    +
    schema_columns_sql
    star_sql
    parameter_sql
    sessionparameter_sql
    @@ -1002,7 +999,7 @@ without precision we convert it to VARCHAR(max) and if it does have
    nextvaluefor_sql
    extract_sql
    trim_sql
    -
    concat_sql
    +
    safeconcat_sql
    check_sql
    foreignkey_sql
    primarykey_sql
    @@ -1053,6 +1050,7 @@ without precision we convert it to VARCHAR(max) and if it does have
    respectnulls_sql
    intdiv_sql
    dpipe_sql
    +
    safedpipe_sql
    div_sql
    overlaps_sql
    distance_sql
    @@ -1101,6 +1099,7 @@ without precision we convert it to VARCHAR(max) and if it does have
    dictproperty_sql
    dictrange_sql
    dictsubproperty_sql
    +
    oncluster_sql
    diff --git a/docs/sqlglot/dialects/snowflake.html b/docs/sqlglot/dialects/snowflake.html index e8aeffe..323b478 100644 --- a/docs/sqlglot/dialects/snowflake.html +++ b/docs/sqlglot/dialects/snowflake.html @@ -261,10 +261,10 @@ 167 168 169class Snowflake(Dialect): -170 null_ordering = "nulls_are_large" -171 time_format = "'yyyy-mm-dd hh24:mi:ss'" +170 NULL_ORDERING = "nulls_are_large" +171 TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'" 172 -173 time_mapping = { +173 TIME_MAPPING = { 174 "YYYY": "%Y", 175 "yyyy": "%Y", 176 "YY": "%y", @@ -304,196 +304,191 @@ 210 "CONVERT_TIMEZONE": _parse_convert_timezone, 211 "DATE_TRUNC": date_trunc_to_time, 212 "DATEADD": lambda args: exp.DateAdd( -213 this=seq_get(args, 2), -214 expression=seq_get(args, 1), -215 unit=seq_get(args, 0), -216 ), -217 "DATEDIFF": lambda args: exp.DateDiff( -218 this=seq_get(args, 2), -219 expression=seq_get(args, 1), -220 unit=seq_get(args, 0), -221 ), -222 "DIV0": _div0_to_if, -223 "IFF": exp.If.from_arg_list, -224 "NULLIFZERO": _nullifzero_to_if, -225 "OBJECT_CONSTRUCT": _parse_object_construct, -226 "RLIKE": exp.RegexpLike.from_arg_list, -227 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), -228 "TO_ARRAY": exp.Array.from_arg_list, -229 "TO_VARCHAR": exp.ToChar.from_arg_list, -230 "TO_TIMESTAMP": _snowflake_to_timestamp, -231 "ZEROIFNULL": _zeroifnull_to_if, -232 } -233 -234 FUNCTION_PARSERS = { -235 **parser.Parser.FUNCTION_PARSERS, -236 "DATE_PART": _parse_date_part, -237 } -238 FUNCTION_PARSERS.pop("TRIM") -239 -240 FUNC_TOKENS = { -241 *parser.Parser.FUNC_TOKENS, -242 TokenType.RLIKE, -243 TokenType.TABLE, -244 } -245 -246 COLUMN_OPERATORS = { -247 **parser.Parser.COLUMN_OPERATORS, -248 TokenType.COLON: lambda self, this, path: self.expression( -249 exp.Bracket, -250 this=this, -251 expressions=[path], -252 ), -253 } -254 -255 TIMESTAMPS = parser.Parser.TIMESTAMPS.copy() - {TokenType.TIME} +213 this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0) +214 ), +215 "DATEDIFF": lambda args: exp.DateDiff( +216 this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0) +217 ), +218 "DIV0": _div0_to_if, +219 "IFF": exp.If.from_arg_list, +220 "NULLIFZERO": _nullifzero_to_if, +221 "OBJECT_CONSTRUCT": _parse_object_construct, +222 "RLIKE": exp.RegexpLike.from_arg_list, +223 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), +224 "TO_ARRAY": exp.Array.from_arg_list, +225 "TO_VARCHAR": exp.ToChar.from_arg_list, +226 "TO_TIMESTAMP": _snowflake_to_timestamp, +227 "ZEROIFNULL": _zeroifnull_to_if, +228 } +229 +230 FUNCTION_PARSERS = { +231 **parser.Parser.FUNCTION_PARSERS, +232 "DATE_PART": _parse_date_part, +233 } +234 FUNCTION_PARSERS.pop("TRIM") +235 +236 FUNC_TOKENS = { +237 *parser.Parser.FUNC_TOKENS, +238 TokenType.RLIKE, +239 TokenType.TABLE, +240 } +241 +242 COLUMN_OPERATORS = { +243 **parser.Parser.COLUMN_OPERATORS, +244 TokenType.COLON: lambda self, this, path: self.expression( +245 exp.Bracket, this=this, expressions=[path] +246 ), +247 } +248 +249 TIMESTAMPS = parser.Parser.TIMESTAMPS.copy() - {TokenType.TIME} +250 +251 RANGE_PARSERS = { +252 **parser.Parser.RANGE_PARSERS, +253 TokenType.LIKE_ANY: binary_range_parser(exp.LikeAny), +254 TokenType.ILIKE_ANY: binary_range_parser(exp.ILikeAny), +255 } 256 -257 RANGE_PARSERS = { -258 **parser.Parser.RANGE_PARSERS, -259 TokenType.LIKE_ANY: binary_range_parser(exp.LikeAny), -260 TokenType.ILIKE_ANY: binary_range_parser(exp.ILikeAny), +257 ALTER_PARSERS = { +258 **parser.Parser.ALTER_PARSERS, +259 "UNSET": lambda self: self._parse_alter_table_set_tag(unset=True), +260 "SET": lambda self: self._parse_alter_table_set_tag(), 261 } 262 -263 ALTER_PARSERS = { -264 **parser.Parser.ALTER_PARSERS, -265 "UNSET": lambda self: self._parse_alter_table_set_tag(unset=True), -266 "SET": lambda self: self._parse_alter_table_set_tag(), -267 } -268 -269 def _parse_alter_table_set_tag(self, unset: bool = False) -> exp.Expression: -270 self._match_text_seq("TAG") -271 parser = t.cast(t.Callable, self._parse_id_var if unset else self._parse_conjunction) -272 return self.expression(exp.SetTag, expressions=self._parse_csv(parser), unset=unset) +263 def _parse_alter_table_set_tag(self, unset: bool = False) -> exp.Expression: +264 self._match_text_seq("TAG") +265 parser = t.cast(t.Callable, self._parse_id_var if unset else self._parse_conjunction) +266 return self.expression(exp.SetTag, expressions=self._parse_csv(parser), unset=unset) +267 +268 class Tokenizer(tokens.Tokenizer): +269 QUOTES = ["'", "$$"] +270 STRING_ESCAPES = ["\\", "'"] +271 HEX_STRINGS = [("x'", "'"), ("X'", "'")] +272 COMMENTS = ["--", "//", ("/*", "*/")] 273 -274 class Tokenizer(tokens.Tokenizer): -275 QUOTES = ["'", "$$"] -276 STRING_ESCAPES = ["\\", "'"] -277 HEX_STRINGS = [("x'", "'"), ("X'", "'")] -278 -279 KEYWORDS = { -280 **tokens.Tokenizer.KEYWORDS, -281 "CHAR VARYING": TokenType.VARCHAR, -282 "CHARACTER VARYING": TokenType.VARCHAR, -283 "EXCLUDE": TokenType.EXCEPT, -284 "ILIKE ANY": TokenType.ILIKE_ANY, -285 "LIKE ANY": TokenType.LIKE_ANY, -286 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE, -287 "MINUS": TokenType.EXCEPT, -288 "NCHAR VARYING": TokenType.VARCHAR, -289 "PUT": TokenType.COMMAND, -290 "RENAME": TokenType.REPLACE, -291 "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ, -292 "TIMESTAMP_NTZ": TokenType.TIMESTAMP, -293 "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ, -294 "TIMESTAMPNTZ": TokenType.TIMESTAMP, -295 "SAMPLE": TokenType.TABLE_SAMPLE, +274 KEYWORDS = { +275 **tokens.Tokenizer.KEYWORDS, +276 "CHAR VARYING": TokenType.VARCHAR, +277 "CHARACTER VARYING": TokenType.VARCHAR, +278 "EXCLUDE": TokenType.EXCEPT, +279 "ILIKE ANY": TokenType.ILIKE_ANY, +280 "LIKE ANY": TokenType.LIKE_ANY, +281 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE, +282 "MINUS": TokenType.EXCEPT, +283 "NCHAR VARYING": TokenType.VARCHAR, +284 "PUT": TokenType.COMMAND, +285 "RENAME": TokenType.REPLACE, +286 "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ, +287 "TIMESTAMP_NTZ": TokenType.TIMESTAMP, +288 "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ, +289 "TIMESTAMPNTZ": TokenType.TIMESTAMP, +290 "SAMPLE": TokenType.TABLE_SAMPLE, +291 } +292 +293 SINGLE_TOKENS = { +294 **tokens.Tokenizer.SINGLE_TOKENS, +295 "$": TokenType.PARAMETER, 296 } 297 -298 SINGLE_TOKENS = { -299 **tokens.Tokenizer.SINGLE_TOKENS, -300 "$": TokenType.PARAMETER, -301 } -302 -303 VAR_SINGLE_TOKENS = {"$"} -304 -305 class Generator(generator.Generator): -306 PARAMETER_TOKEN = "$" -307 MATCHED_BY_SOURCE = False -308 SINGLE_STRING_INTERVAL = True -309 JOIN_HINTS = False -310 TABLE_HINTS = False -311 -312 TRANSFORMS = { -313 **generator.Generator.TRANSFORMS, -314 exp.Array: inline_array_sql, -315 exp.ArrayConcat: rename_func("ARRAY_CAT"), -316 exp.ArrayJoin: rename_func("ARRAY_TO_STRING"), -317 exp.AtTimeZone: lambda self, e: self.func( -318 "CONVERT_TIMEZONE", e.args.get("zone"), e.this -319 ), -320 exp.DateAdd: lambda self, e: self.func("DATEADD", e.text("unit"), e.expression, e.this), -321 exp.DateDiff: lambda self, e: self.func( -322 "DATEDIFF", e.text("unit"), e.expression, e.this -323 ), -324 exp.DateStrToDate: datestrtodate_sql, -325 exp.DataType: _datatype_sql, -326 exp.DayOfWeek: rename_func("DAYOFWEEK"), -327 exp.Extract: rename_func("DATE_PART"), -328 exp.If: rename_func("IFF"), -329 exp.LogicalAnd: rename_func("BOOLAND_AGG"), -330 exp.LogicalOr: rename_func("BOOLOR_AGG"), -331 exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"), -332 exp.Max: max_or_greatest, -333 exp.Min: min_or_least, -334 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", -335 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), -336 exp.StarMap: rename_func("OBJECT_CONSTRUCT"), -337 exp.StrPosition: lambda self, e: self.func( -338 "POSITION", e.args.get("substr"), e.this, e.args.get("position") +298 VAR_SINGLE_TOKENS = {"$"} +299 +300 class Generator(generator.Generator): +301 PARAMETER_TOKEN = "$" +302 MATCHED_BY_SOURCE = False +303 SINGLE_STRING_INTERVAL = True +304 JOIN_HINTS = False +305 TABLE_HINTS = False +306 +307 TRANSFORMS = { +308 **generator.Generator.TRANSFORMS, +309 exp.Array: inline_array_sql, +310 exp.ArrayConcat: rename_func("ARRAY_CAT"), +311 exp.ArrayJoin: rename_func("ARRAY_TO_STRING"), +312 exp.AtTimeZone: lambda self, e: self.func( +313 "CONVERT_TIMEZONE", e.args.get("zone"), e.this +314 ), +315 exp.DateAdd: lambda self, e: self.func("DATEADD", e.text("unit"), e.expression, e.this), +316 exp.DateDiff: lambda self, e: self.func( +317 "DATEDIFF", e.text("unit"), e.expression, e.this +318 ), +319 exp.DateStrToDate: datestrtodate_sql, +320 exp.DataType: _datatype_sql, +321 exp.DayOfWeek: rename_func("DAYOFWEEK"), +322 exp.Extract: rename_func("DATE_PART"), +323 exp.If: rename_func("IFF"), +324 exp.LogicalAnd: rename_func("BOOLAND_AGG"), +325 exp.LogicalOr: rename_func("BOOLOR_AGG"), +326 exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"), +327 exp.Max: max_or_greatest, +328 exp.Min: min_or_least, +329 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", +330 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), +331 exp.StarMap: rename_func("OBJECT_CONSTRUCT"), +332 exp.StrPosition: lambda self, e: self.func( +333 "POSITION", e.args.get("substr"), e.this, e.args.get("position") +334 ), +335 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", +336 exp.Struct: lambda self, e: self.func( +337 "OBJECT_CONSTRUCT", +338 *(arg for expression in e.expressions for arg in expression.flatten()), 339 ), -340 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", -341 exp.Struct: lambda self, e: self.func( -342 "OBJECT_CONSTRUCT", -343 *(arg for expression in e.expressions for arg in expression.flatten()), +340 exp.TimeStrToTime: timestrtotime_sql, +341 exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})", +342 exp.TimeToStr: lambda self, e: self.func( +343 "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e) 344 ), -345 exp.TimeStrToTime: timestrtotime_sql, -346 exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})", -347 exp.TimeToStr: lambda self, e: self.func( -348 "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e) -349 ), -350 exp.TimestampTrunc: timestamptrunc_sql, -351 exp.ToChar: lambda self, e: self.function_fallback_sql(e), -352 exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression), -353 exp.TsOrDsToDate: ts_or_ds_to_date_sql("snowflake"), -354 exp.UnixToTime: _unix_to_time_sql, -355 exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"), +345 exp.TimestampTrunc: timestamptrunc_sql, +346 exp.ToChar: lambda self, e: self.function_fallback_sql(e), +347 exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression), +348 exp.TsOrDsToDate: ts_or_ds_to_date_sql("snowflake"), +349 exp.UnixToTime: _unix_to_time_sql, +350 exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"), +351 } +352 +353 TYPE_MAPPING = { +354 **generator.Generator.TYPE_MAPPING, +355 exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ", 356 } 357 -358 TYPE_MAPPING = { -359 **generator.Generator.TYPE_MAPPING, -360 exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ", +358 STAR_MAPPING = { +359 "except": "EXCLUDE", +360 "replace": "RENAME", 361 } 362 -363 STAR_MAPPING = { -364 "except": "EXCLUDE", -365 "replace": "RENAME", -366 } -367 -368 PROPERTIES_LOCATION = { -369 **generator.Generator.PROPERTIES_LOCATION, -370 exp.SetProperty: exp.Properties.Location.UNSUPPORTED, -371 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, -372 } +363 PROPERTIES_LOCATION = { +364 **generator.Generator.PROPERTIES_LOCATION, +365 exp.SetProperty: exp.Properties.Location.UNSUPPORTED, +366 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, +367 } +368 +369 def except_op(self, expression: exp.Except) -> str: +370 if not expression.args.get("distinct", False): +371 self.unsupported("EXCEPT with All is not supported in Snowflake") +372 return super().except_op(expression) 373 -374 def except_op(self, expression: exp.Except) -> str: +374 def intersect_op(self, expression: exp.Intersect) -> str: 375 if not expression.args.get("distinct", False): -376 self.unsupported("EXCEPT with All is not supported in Snowflake") -377 return super().except_op(expression) +376 self.unsupported("INTERSECT with All is not supported in Snowflake") +377 return super().intersect_op(expression) 378 -379 def intersect_op(self, expression: exp.Intersect) -> str: -380 if not expression.args.get("distinct", False): -381 self.unsupported("INTERSECT with All is not supported in Snowflake") -382 return super().intersect_op(expression) -383 -384 def settag_sql(self, expression: exp.SetTag) -> str: -385 action = "UNSET" if expression.args.get("unset") else "SET" -386 return f"{action} TAG {self.expressions(expression)}" -387 -388 def describe_sql(self, expression: exp.Describe) -> str: -389 # Default to table if kind is unknown -390 kind_value = expression.args.get("kind") or "TABLE" -391 kind = f" {kind_value}" if kind_value else "" -392 this = f" {self.sql(expression, 'this')}" -393 return f"DESCRIBE{kind}{this}" -394 -395 def generatedasidentitycolumnconstraint_sql( -396 self, expression: exp.GeneratedAsIdentityColumnConstraint -397 ) -> str: -398 start = expression.args.get("start") -399 start = f" START {start}" if start else "" -400 increment = expression.args.get("increment") -401 increment = f" INCREMENT {increment}" if increment else "" -402 return f"AUTOINCREMENT{start}{increment}" +379 def settag_sql(self, expression: exp.SetTag) -> str: +380 action = "UNSET" if expression.args.get("unset") else "SET" +381 return f"{action} TAG {self.expressions(expression)}" +382 +383 def describe_sql(self, expression: exp.Describe) -> str: +384 # Default to table if kind is unknown +385 kind_value = expression.args.get("kind") or "TABLE" +386 kind = f" {kind_value}" if kind_value else "" +387 this = f" {self.sql(expression, 'this')}" +388 return f"DESCRIBE{kind}{this}" +389 +390 def generatedasidentitycolumnconstraint_sql( +391 self, expression: exp.GeneratedAsIdentityColumnConstraint +392 ) -> str: +393 start = expression.args.get("start") +394 start = f" START {start}" if start else "" +395 increment = expression.args.get("increment") +396 increment = f" INCREMENT {increment}" if increment else "" +397 return f"AUTOINCREMENT{start}{increment}"
    @@ -510,10 +505,10 @@
    170class Snowflake(Dialect):
    -171    null_ordering = "nulls_are_large"
    -172    time_format = "'yyyy-mm-dd hh24:mi:ss'"
    +171    NULL_ORDERING = "nulls_are_large"
    +172    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
     173
    -174    time_mapping = {
    +174    TIME_MAPPING = {
     175        "YYYY": "%Y",
     176        "yyyy": "%Y",
     177        "YY": "%y",
    @@ -553,196 +548,191 @@
     211            "CONVERT_TIMEZONE": _parse_convert_timezone,
     212            "DATE_TRUNC": date_trunc_to_time,
     213            "DATEADD": lambda args: exp.DateAdd(
    -214                this=seq_get(args, 2),
    -215                expression=seq_get(args, 1),
    -216                unit=seq_get(args, 0),
    -217            ),
    -218            "DATEDIFF": lambda args: exp.DateDiff(
    -219                this=seq_get(args, 2),
    -220                expression=seq_get(args, 1),
    -221                unit=seq_get(args, 0),
    -222            ),
    -223            "DIV0": _div0_to_if,
    -224            "IFF": exp.If.from_arg_list,
    -225            "NULLIFZERO": _nullifzero_to_if,
    -226            "OBJECT_CONSTRUCT": _parse_object_construct,
    -227            "RLIKE": exp.RegexpLike.from_arg_list,
    -228            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
    -229            "TO_ARRAY": exp.Array.from_arg_list,
    -230            "TO_VARCHAR": exp.ToChar.from_arg_list,
    -231            "TO_TIMESTAMP": _snowflake_to_timestamp,
    -232            "ZEROIFNULL": _zeroifnull_to_if,
    -233        }
    -234
    -235        FUNCTION_PARSERS = {
    -236            **parser.Parser.FUNCTION_PARSERS,
    -237            "DATE_PART": _parse_date_part,
    -238        }
    -239        FUNCTION_PARSERS.pop("TRIM")
    -240
    -241        FUNC_TOKENS = {
    -242            *parser.Parser.FUNC_TOKENS,
    -243            TokenType.RLIKE,
    -244            TokenType.TABLE,
    -245        }
    -246
    -247        COLUMN_OPERATORS = {
    -248            **parser.Parser.COLUMN_OPERATORS,
    -249            TokenType.COLON: lambda self, this, path: self.expression(
    -250                exp.Bracket,
    -251                this=this,
    -252                expressions=[path],
    -253            ),
    -254        }
    -255
    -256        TIMESTAMPS = parser.Parser.TIMESTAMPS.copy() - {TokenType.TIME}
    +214                this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0)
    +215            ),
    +216            "DATEDIFF": lambda args: exp.DateDiff(
    +217                this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0)
    +218            ),
    +219            "DIV0": _div0_to_if,
    +220            "IFF": exp.If.from_arg_list,
    +221            "NULLIFZERO": _nullifzero_to_if,
    +222            "OBJECT_CONSTRUCT": _parse_object_construct,
    +223            "RLIKE": exp.RegexpLike.from_arg_list,
    +224            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
    +225            "TO_ARRAY": exp.Array.from_arg_list,
    +226            "TO_VARCHAR": exp.ToChar.from_arg_list,
    +227            "TO_TIMESTAMP": _snowflake_to_timestamp,
    +228            "ZEROIFNULL": _zeroifnull_to_if,
    +229        }
    +230
    +231        FUNCTION_PARSERS = {
    +232            **parser.Parser.FUNCTION_PARSERS,
    +233            "DATE_PART": _parse_date_part,
    +234        }
    +235        FUNCTION_PARSERS.pop("TRIM")
    +236
    +237        FUNC_TOKENS = {
    +238            *parser.Parser.FUNC_TOKENS,
    +239            TokenType.RLIKE,
    +240            TokenType.TABLE,
    +241        }
    +242
    +243        COLUMN_OPERATORS = {
    +244            **parser.Parser.COLUMN_OPERATORS,
    +245            TokenType.COLON: lambda self, this, path: self.expression(
    +246                exp.Bracket, this=this, expressions=[path]
    +247            ),
    +248        }
    +249
    +250        TIMESTAMPS = parser.Parser.TIMESTAMPS.copy() - {TokenType.TIME}
    +251
    +252        RANGE_PARSERS = {
    +253            **parser.Parser.RANGE_PARSERS,
    +254            TokenType.LIKE_ANY: binary_range_parser(exp.LikeAny),
    +255            TokenType.ILIKE_ANY: binary_range_parser(exp.ILikeAny),
    +256        }
     257
    -258        RANGE_PARSERS = {
    -259            **parser.Parser.RANGE_PARSERS,
    -260            TokenType.LIKE_ANY: binary_range_parser(exp.LikeAny),
    -261            TokenType.ILIKE_ANY: binary_range_parser(exp.ILikeAny),
    +258        ALTER_PARSERS = {
    +259            **parser.Parser.ALTER_PARSERS,
    +260            "UNSET": lambda self: self._parse_alter_table_set_tag(unset=True),
    +261            "SET": lambda self: self._parse_alter_table_set_tag(),
     262        }
     263
    -264        ALTER_PARSERS = {
    -265            **parser.Parser.ALTER_PARSERS,
    -266            "UNSET": lambda self: self._parse_alter_table_set_tag(unset=True),
    -267            "SET": lambda self: self._parse_alter_table_set_tag(),
    -268        }
    -269
    -270        def _parse_alter_table_set_tag(self, unset: bool = False) -> exp.Expression:
    -271            self._match_text_seq("TAG")
    -272            parser = t.cast(t.Callable, self._parse_id_var if unset else self._parse_conjunction)
    -273            return self.expression(exp.SetTag, expressions=self._parse_csv(parser), unset=unset)
    +264        def _parse_alter_table_set_tag(self, unset: bool = False) -> exp.Expression:
    +265            self._match_text_seq("TAG")
    +266            parser = t.cast(t.Callable, self._parse_id_var if unset else self._parse_conjunction)
    +267            return self.expression(exp.SetTag, expressions=self._parse_csv(parser), unset=unset)
    +268
    +269    class Tokenizer(tokens.Tokenizer):
    +270        QUOTES = ["'", "$$"]
    +271        STRING_ESCAPES = ["\\", "'"]
    +272        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
    +273        COMMENTS = ["--", "//", ("/*", "*/")]
     274
    -275    class Tokenizer(tokens.Tokenizer):
    -276        QUOTES = ["'", "$$"]
    -277        STRING_ESCAPES = ["\\", "'"]
    -278        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
    -279
    -280        KEYWORDS = {
    -281            **tokens.Tokenizer.KEYWORDS,
    -282            "CHAR VARYING": TokenType.VARCHAR,
    -283            "CHARACTER VARYING": TokenType.VARCHAR,
    -284            "EXCLUDE": TokenType.EXCEPT,
    -285            "ILIKE ANY": TokenType.ILIKE_ANY,
    -286            "LIKE ANY": TokenType.LIKE_ANY,
    -287            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
    -288            "MINUS": TokenType.EXCEPT,
    -289            "NCHAR VARYING": TokenType.VARCHAR,
    -290            "PUT": TokenType.COMMAND,
    -291            "RENAME": TokenType.REPLACE,
    -292            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
    -293            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
    -294            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
    -295            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
    -296            "SAMPLE": TokenType.TABLE_SAMPLE,
    +275        KEYWORDS = {
    +276            **tokens.Tokenizer.KEYWORDS,
    +277            "CHAR VARYING": TokenType.VARCHAR,
    +278            "CHARACTER VARYING": TokenType.VARCHAR,
    +279            "EXCLUDE": TokenType.EXCEPT,
    +280            "ILIKE ANY": TokenType.ILIKE_ANY,
    +281            "LIKE ANY": TokenType.LIKE_ANY,
    +282            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
    +283            "MINUS": TokenType.EXCEPT,
    +284            "NCHAR VARYING": TokenType.VARCHAR,
    +285            "PUT": TokenType.COMMAND,
    +286            "RENAME": TokenType.REPLACE,
    +287            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
    +288            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
    +289            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
    +290            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
    +291            "SAMPLE": TokenType.TABLE_SAMPLE,
    +292        }
    +293
    +294        SINGLE_TOKENS = {
    +295            **tokens.Tokenizer.SINGLE_TOKENS,
    +296            "$": TokenType.PARAMETER,
     297        }
     298
    -299        SINGLE_TOKENS = {
    -300            **tokens.Tokenizer.SINGLE_TOKENS,
    -301            "$": TokenType.PARAMETER,
    -302        }
    -303
    -304        VAR_SINGLE_TOKENS = {"$"}
    -305
    -306    class Generator(generator.Generator):
    -307        PARAMETER_TOKEN = "$"
    -308        MATCHED_BY_SOURCE = False
    -309        SINGLE_STRING_INTERVAL = True
    -310        JOIN_HINTS = False
    -311        TABLE_HINTS = False
    -312
    -313        TRANSFORMS = {
    -314            **generator.Generator.TRANSFORMS,
    -315            exp.Array: inline_array_sql,
    -316            exp.ArrayConcat: rename_func("ARRAY_CAT"),
    -317            exp.ArrayJoin: rename_func("ARRAY_TO_STRING"),
    -318            exp.AtTimeZone: lambda self, e: self.func(
    -319                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
    -320            ),
    -321            exp.DateAdd: lambda self, e: self.func("DATEADD", e.text("unit"), e.expression, e.this),
    -322            exp.DateDiff: lambda self, e: self.func(
    -323                "DATEDIFF", e.text("unit"), e.expression, e.this
    -324            ),
    -325            exp.DateStrToDate: datestrtodate_sql,
    -326            exp.DataType: _datatype_sql,
    -327            exp.DayOfWeek: rename_func("DAYOFWEEK"),
    -328            exp.Extract: rename_func("DATE_PART"),
    -329            exp.If: rename_func("IFF"),
    -330            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
    -331            exp.LogicalOr: rename_func("BOOLOR_AGG"),
    -332            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
    -333            exp.Max: max_or_greatest,
    -334            exp.Min: min_or_least,
    -335            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
    -336            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    -337            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
    -338            exp.StrPosition: lambda self, e: self.func(
    -339                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
    +299        VAR_SINGLE_TOKENS = {"$"}
    +300
    +301    class Generator(generator.Generator):
    +302        PARAMETER_TOKEN = "$"
    +303        MATCHED_BY_SOURCE = False
    +304        SINGLE_STRING_INTERVAL = True
    +305        JOIN_HINTS = False
    +306        TABLE_HINTS = False
    +307
    +308        TRANSFORMS = {
    +309            **generator.Generator.TRANSFORMS,
    +310            exp.Array: inline_array_sql,
    +311            exp.ArrayConcat: rename_func("ARRAY_CAT"),
    +312            exp.ArrayJoin: rename_func("ARRAY_TO_STRING"),
    +313            exp.AtTimeZone: lambda self, e: self.func(
    +314                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
    +315            ),
    +316            exp.DateAdd: lambda self, e: self.func("DATEADD", e.text("unit"), e.expression, e.this),
    +317            exp.DateDiff: lambda self, e: self.func(
    +318                "DATEDIFF", e.text("unit"), e.expression, e.this
    +319            ),
    +320            exp.DateStrToDate: datestrtodate_sql,
    +321            exp.DataType: _datatype_sql,
    +322            exp.DayOfWeek: rename_func("DAYOFWEEK"),
    +323            exp.Extract: rename_func("DATE_PART"),
    +324            exp.If: rename_func("IFF"),
    +325            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
    +326            exp.LogicalOr: rename_func("BOOLOR_AGG"),
    +327            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
    +328            exp.Max: max_or_greatest,
    +329            exp.Min: min_or_least,
    +330            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
    +331            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    +332            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
    +333            exp.StrPosition: lambda self, e: self.func(
    +334                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
    +335            ),
    +336            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
    +337            exp.Struct: lambda self, e: self.func(
    +338                "OBJECT_CONSTRUCT",
    +339                *(arg for expression in e.expressions for arg in expression.flatten()),
     340            ),
    -341            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
    -342            exp.Struct: lambda self, e: self.func(
    -343                "OBJECT_CONSTRUCT",
    -344                *(arg for expression in e.expressions for arg in expression.flatten()),
    +341            exp.TimeStrToTime: timestrtotime_sql,
    +342            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
    +343            exp.TimeToStr: lambda self, e: self.func(
    +344                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
     345            ),
    -346            exp.TimeStrToTime: timestrtotime_sql,
    -347            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
    -348            exp.TimeToStr: lambda self, e: self.func(
    -349                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
    -350            ),
    -351            exp.TimestampTrunc: timestamptrunc_sql,
    -352            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    -353            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
    -354            exp.TsOrDsToDate: ts_or_ds_to_date_sql("snowflake"),
    -355            exp.UnixToTime: _unix_to_time_sql,
    -356            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
    +346            exp.TimestampTrunc: timestamptrunc_sql,
    +347            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    +348            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
    +349            exp.TsOrDsToDate: ts_or_ds_to_date_sql("snowflake"),
    +350            exp.UnixToTime: _unix_to_time_sql,
    +351            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
    +352        }
    +353
    +354        TYPE_MAPPING = {
    +355            **generator.Generator.TYPE_MAPPING,
    +356            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
     357        }
     358
    -359        TYPE_MAPPING = {
    -360            **generator.Generator.TYPE_MAPPING,
    -361            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
    +359        STAR_MAPPING = {
    +360            "except": "EXCLUDE",
    +361            "replace": "RENAME",
     362        }
     363
    -364        STAR_MAPPING = {
    -365            "except": "EXCLUDE",
    -366            "replace": "RENAME",
    -367        }
    -368
    -369        PROPERTIES_LOCATION = {
    -370            **generator.Generator.PROPERTIES_LOCATION,
    -371            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
    -372            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    -373        }
    +364        PROPERTIES_LOCATION = {
    +365            **generator.Generator.PROPERTIES_LOCATION,
    +366            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
    +367            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +368        }
    +369
    +370        def except_op(self, expression: exp.Except) -> str:
    +371            if not expression.args.get("distinct", False):
    +372                self.unsupported("EXCEPT with All is not supported in Snowflake")
    +373            return super().except_op(expression)
     374
    -375        def except_op(self, expression: exp.Except) -> str:
    +375        def intersect_op(self, expression: exp.Intersect) -> str:
     376            if not expression.args.get("distinct", False):
    -377                self.unsupported("EXCEPT with All is not supported in Snowflake")
    -378            return super().except_op(expression)
    +377                self.unsupported("INTERSECT with All is not supported in Snowflake")
    +378            return super().intersect_op(expression)
     379
    -380        def intersect_op(self, expression: exp.Intersect) -> str:
    -381            if not expression.args.get("distinct", False):
    -382                self.unsupported("INTERSECT with All is not supported in Snowflake")
    -383            return super().intersect_op(expression)
    -384
    -385        def settag_sql(self, expression: exp.SetTag) -> str:
    -386            action = "UNSET" if expression.args.get("unset") else "SET"
    -387            return f"{action} TAG {self.expressions(expression)}"
    -388
    -389        def describe_sql(self, expression: exp.Describe) -> str:
    -390            # Default to table if kind is unknown
    -391            kind_value = expression.args.get("kind") or "TABLE"
    -392            kind = f" {kind_value}" if kind_value else ""
    -393            this = f" {self.sql(expression, 'this')}"
    -394            return f"DESCRIBE{kind}{this}"
    -395
    -396        def generatedasidentitycolumnconstraint_sql(
    -397            self, expression: exp.GeneratedAsIdentityColumnConstraint
    -398        ) -> str:
    -399            start = expression.args.get("start")
    -400            start = f" START {start}" if start else ""
    -401            increment = expression.args.get("increment")
    -402            increment = f" INCREMENT {increment}" if increment else ""
    -403            return f"AUTOINCREMENT{start}{increment}"
    +380        def settag_sql(self, expression: exp.SetTag) -> str:
    +381            action = "UNSET" if expression.args.get("unset") else "SET"
    +382            return f"{action} TAG {self.expressions(expression)}"
    +383
    +384        def describe_sql(self, expression: exp.Describe) -> str:
    +385            # Default to table if kind is unknown
    +386            kind_value = expression.args.get("kind") or "TABLE"
    +387            kind = f" {kind_value}" if kind_value else ""
    +388            this = f" {self.sql(expression, 'this')}"
    +389            return f"DESCRIBE{kind}{this}"
    +390
    +391        def generatedasidentitycolumnconstraint_sql(
    +392            self, expression: exp.GeneratedAsIdentityColumnConstraint
    +393        ) -> str:
    +394            start = expression.args.get("start")
    +395            start = f" START {start}" if start else ""
    +396            increment = expression.args.get("increment")
    +397            increment = f" INCREMENT {increment}" if increment else ""
    +398            return f"AUTOINCREMENT{start}{increment}"
     
    @@ -788,90 +778,76 @@
    211 "CONVERT_TIMEZONE": _parse_convert_timezone, 212 "DATE_TRUNC": date_trunc_to_time, 213 "DATEADD": lambda args: exp.DateAdd( -214 this=seq_get(args, 2), -215 expression=seq_get(args, 1), -216 unit=seq_get(args, 0), -217 ), -218 "DATEDIFF": lambda args: exp.DateDiff( -219 this=seq_get(args, 2), -220 expression=seq_get(args, 1), -221 unit=seq_get(args, 0), -222 ), -223 "DIV0": _div0_to_if, -224 "IFF": exp.If.from_arg_list, -225 "NULLIFZERO": _nullifzero_to_if, -226 "OBJECT_CONSTRUCT": _parse_object_construct, -227 "RLIKE": exp.RegexpLike.from_arg_list, -228 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), -229 "TO_ARRAY": exp.Array.from_arg_list, -230 "TO_VARCHAR": exp.ToChar.from_arg_list, -231 "TO_TIMESTAMP": _snowflake_to_timestamp, -232 "ZEROIFNULL": _zeroifnull_to_if, -233 } -234 -235 FUNCTION_PARSERS = { -236 **parser.Parser.FUNCTION_PARSERS, -237 "DATE_PART": _parse_date_part, -238 } -239 FUNCTION_PARSERS.pop("TRIM") -240 -241 FUNC_TOKENS = { -242 *parser.Parser.FUNC_TOKENS, -243 TokenType.RLIKE, -244 TokenType.TABLE, -245 } -246 -247 COLUMN_OPERATORS = { -248 **parser.Parser.COLUMN_OPERATORS, -249 TokenType.COLON: lambda self, this, path: self.expression( -250 exp.Bracket, -251 this=this, -252 expressions=[path], -253 ), -254 } -255 -256 TIMESTAMPS = parser.Parser.TIMESTAMPS.copy() - {TokenType.TIME} +214 this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0) +215 ), +216 "DATEDIFF": lambda args: exp.DateDiff( +217 this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0) +218 ), +219 "DIV0": _div0_to_if, +220 "IFF": exp.If.from_arg_list, +221 "NULLIFZERO": _nullifzero_to_if, +222 "OBJECT_CONSTRUCT": _parse_object_construct, +223 "RLIKE": exp.RegexpLike.from_arg_list, +224 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), +225 "TO_ARRAY": exp.Array.from_arg_list, +226 "TO_VARCHAR": exp.ToChar.from_arg_list, +227 "TO_TIMESTAMP": _snowflake_to_timestamp, +228 "ZEROIFNULL": _zeroifnull_to_if, +229 } +230 +231 FUNCTION_PARSERS = { +232 **parser.Parser.FUNCTION_PARSERS, +233 "DATE_PART": _parse_date_part, +234 } +235 FUNCTION_PARSERS.pop("TRIM") +236 +237 FUNC_TOKENS = { +238 *parser.Parser.FUNC_TOKENS, +239 TokenType.RLIKE, +240 TokenType.TABLE, +241 } +242 +243 COLUMN_OPERATORS = { +244 **parser.Parser.COLUMN_OPERATORS, +245 TokenType.COLON: lambda self, this, path: self.expression( +246 exp.Bracket, this=this, expressions=[path] +247 ), +248 } +249 +250 TIMESTAMPS = parser.Parser.TIMESTAMPS.copy() - {TokenType.TIME} +251 +252 RANGE_PARSERS = { +253 **parser.Parser.RANGE_PARSERS, +254 TokenType.LIKE_ANY: binary_range_parser(exp.LikeAny), +255 TokenType.ILIKE_ANY: binary_range_parser(exp.ILikeAny), +256 } 257 -258 RANGE_PARSERS = { -259 **parser.Parser.RANGE_PARSERS, -260 TokenType.LIKE_ANY: binary_range_parser(exp.LikeAny), -261 TokenType.ILIKE_ANY: binary_range_parser(exp.ILikeAny), +258 ALTER_PARSERS = { +259 **parser.Parser.ALTER_PARSERS, +260 "UNSET": lambda self: self._parse_alter_table_set_tag(unset=True), +261 "SET": lambda self: self._parse_alter_table_set_tag(), 262 } 263 -264 ALTER_PARSERS = { -265 **parser.Parser.ALTER_PARSERS, -266 "UNSET": lambda self: self._parse_alter_table_set_tag(unset=True), -267 "SET": lambda self: self._parse_alter_table_set_tag(), -268 } -269 -270 def _parse_alter_table_set_tag(self, unset: bool = False) -> exp.Expression: -271 self._match_text_seq("TAG") -272 parser = t.cast(t.Callable, self._parse_id_var if unset else self._parse_conjunction) -273 return self.expression(exp.SetTag, expressions=self._parse_csv(parser), unset=unset) +264 def _parse_alter_table_set_tag(self, unset: bool = False) -> exp.Expression: +265 self._match_text_seq("TAG") +266 parser = t.cast(t.Callable, self._parse_id_var if unset else self._parse_conjunction) +267 return self.expression(exp.SetTag, expressions=self._parse_csv(parser), unset=unset)
    -

    Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces -a parsed syntax tree.

    +

    Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

    Arguments:
      -
    • error_level: the desired error level. +
    • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
    • -
    • error_message_context: determines the amount of context to capture from a +
    • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). -Default: 50.
    • -
    • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. -Default: 0
    • -
    • alias_post_tablesample: If the table alias comes after tablesample. -Default: False
    • +Default: 100
    • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
    • -
    • null_ordering: Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    @@ -904,36 +880,37 @@ Default: "nulls_are_small"
    -
    275    class Tokenizer(tokens.Tokenizer):
    -276        QUOTES = ["'", "$$"]
    -277        STRING_ESCAPES = ["\\", "'"]
    -278        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
    -279
    -280        KEYWORDS = {
    -281            **tokens.Tokenizer.KEYWORDS,
    -282            "CHAR VARYING": TokenType.VARCHAR,
    -283            "CHARACTER VARYING": TokenType.VARCHAR,
    -284            "EXCLUDE": TokenType.EXCEPT,
    -285            "ILIKE ANY": TokenType.ILIKE_ANY,
    -286            "LIKE ANY": TokenType.LIKE_ANY,
    -287            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
    -288            "MINUS": TokenType.EXCEPT,
    -289            "NCHAR VARYING": TokenType.VARCHAR,
    -290            "PUT": TokenType.COMMAND,
    -291            "RENAME": TokenType.REPLACE,
    -292            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
    -293            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
    -294            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
    -295            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
    -296            "SAMPLE": TokenType.TABLE_SAMPLE,
    +            
    269    class Tokenizer(tokens.Tokenizer):
    +270        QUOTES = ["'", "$$"]
    +271        STRING_ESCAPES = ["\\", "'"]
    +272        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
    +273        COMMENTS = ["--", "//", ("/*", "*/")]
    +274
    +275        KEYWORDS = {
    +276            **tokens.Tokenizer.KEYWORDS,
    +277            "CHAR VARYING": TokenType.VARCHAR,
    +278            "CHARACTER VARYING": TokenType.VARCHAR,
    +279            "EXCLUDE": TokenType.EXCEPT,
    +280            "ILIKE ANY": TokenType.ILIKE_ANY,
    +281            "LIKE ANY": TokenType.LIKE_ANY,
    +282            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
    +283            "MINUS": TokenType.EXCEPT,
    +284            "NCHAR VARYING": TokenType.VARCHAR,
    +285            "PUT": TokenType.COMMAND,
    +286            "RENAME": TokenType.REPLACE,
    +287            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
    +288            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
    +289            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
    +290            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
    +291            "SAMPLE": TokenType.TABLE_SAMPLE,
    +292        }
    +293
    +294        SINGLE_TOKENS = {
    +295            **tokens.Tokenizer.SINGLE_TOKENS,
    +296            "$": TokenType.PARAMETER,
     297        }
     298
    -299        SINGLE_TOKENS = {
    -300            **tokens.Tokenizer.SINGLE_TOKENS,
    -301            "$": TokenType.PARAMETER,
    -302        }
    -303
    -304        VAR_SINGLE_TOKENS = {"$"}
    +299        VAR_SINGLE_TOKENS = {"$"}
     
    @@ -945,6 +922,7 @@ Default: "nulls_are_small" @@ -961,150 +939,135 @@ Default: "nulls_are_small"
    -
    306    class Generator(generator.Generator):
    -307        PARAMETER_TOKEN = "$"
    -308        MATCHED_BY_SOURCE = False
    -309        SINGLE_STRING_INTERVAL = True
    -310        JOIN_HINTS = False
    -311        TABLE_HINTS = False
    -312
    -313        TRANSFORMS = {
    -314            **generator.Generator.TRANSFORMS,
    -315            exp.Array: inline_array_sql,
    -316            exp.ArrayConcat: rename_func("ARRAY_CAT"),
    -317            exp.ArrayJoin: rename_func("ARRAY_TO_STRING"),
    -318            exp.AtTimeZone: lambda self, e: self.func(
    -319                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
    -320            ),
    -321            exp.DateAdd: lambda self, e: self.func("DATEADD", e.text("unit"), e.expression, e.this),
    -322            exp.DateDiff: lambda self, e: self.func(
    -323                "DATEDIFF", e.text("unit"), e.expression, e.this
    -324            ),
    -325            exp.DateStrToDate: datestrtodate_sql,
    -326            exp.DataType: _datatype_sql,
    -327            exp.DayOfWeek: rename_func("DAYOFWEEK"),
    -328            exp.Extract: rename_func("DATE_PART"),
    -329            exp.If: rename_func("IFF"),
    -330            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
    -331            exp.LogicalOr: rename_func("BOOLOR_AGG"),
    -332            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
    -333            exp.Max: max_or_greatest,
    -334            exp.Min: min_or_least,
    -335            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
    -336            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    -337            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
    -338            exp.StrPosition: lambda self, e: self.func(
    -339                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
    +            
    301    class Generator(generator.Generator):
    +302        PARAMETER_TOKEN = "$"
    +303        MATCHED_BY_SOURCE = False
    +304        SINGLE_STRING_INTERVAL = True
    +305        JOIN_HINTS = False
    +306        TABLE_HINTS = False
    +307
    +308        TRANSFORMS = {
    +309            **generator.Generator.TRANSFORMS,
    +310            exp.Array: inline_array_sql,
    +311            exp.ArrayConcat: rename_func("ARRAY_CAT"),
    +312            exp.ArrayJoin: rename_func("ARRAY_TO_STRING"),
    +313            exp.AtTimeZone: lambda self, e: self.func(
    +314                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
    +315            ),
    +316            exp.DateAdd: lambda self, e: self.func("DATEADD", e.text("unit"), e.expression, e.this),
    +317            exp.DateDiff: lambda self, e: self.func(
    +318                "DATEDIFF", e.text("unit"), e.expression, e.this
    +319            ),
    +320            exp.DateStrToDate: datestrtodate_sql,
    +321            exp.DataType: _datatype_sql,
    +322            exp.DayOfWeek: rename_func("DAYOFWEEK"),
    +323            exp.Extract: rename_func("DATE_PART"),
    +324            exp.If: rename_func("IFF"),
    +325            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
    +326            exp.LogicalOr: rename_func("BOOLOR_AGG"),
    +327            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
    +328            exp.Max: max_or_greatest,
    +329            exp.Min: min_or_least,
    +330            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
    +331            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    +332            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
    +333            exp.StrPosition: lambda self, e: self.func(
    +334                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
    +335            ),
    +336            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
    +337            exp.Struct: lambda self, e: self.func(
    +338                "OBJECT_CONSTRUCT",
    +339                *(arg for expression in e.expressions for arg in expression.flatten()),
     340            ),
    -341            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
    -342            exp.Struct: lambda self, e: self.func(
    -343                "OBJECT_CONSTRUCT",
    -344                *(arg for expression in e.expressions for arg in expression.flatten()),
    +341            exp.TimeStrToTime: timestrtotime_sql,
    +342            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
    +343            exp.TimeToStr: lambda self, e: self.func(
    +344                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
     345            ),
    -346            exp.TimeStrToTime: timestrtotime_sql,
    -347            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
    -348            exp.TimeToStr: lambda self, e: self.func(
    -349                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
    -350            ),
    -351            exp.TimestampTrunc: timestamptrunc_sql,
    -352            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    -353            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
    -354            exp.TsOrDsToDate: ts_or_ds_to_date_sql("snowflake"),
    -355            exp.UnixToTime: _unix_to_time_sql,
    -356            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
    +346            exp.TimestampTrunc: timestamptrunc_sql,
    +347            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    +348            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
    +349            exp.TsOrDsToDate: ts_or_ds_to_date_sql("snowflake"),
    +350            exp.UnixToTime: _unix_to_time_sql,
    +351            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
    +352        }
    +353
    +354        TYPE_MAPPING = {
    +355            **generator.Generator.TYPE_MAPPING,
    +356            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
     357        }
     358
    -359        TYPE_MAPPING = {
    -360            **generator.Generator.TYPE_MAPPING,
    -361            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
    +359        STAR_MAPPING = {
    +360            "except": "EXCLUDE",
    +361            "replace": "RENAME",
     362        }
     363
    -364        STAR_MAPPING = {
    -365            "except": "EXCLUDE",
    -366            "replace": "RENAME",
    -367        }
    -368
    -369        PROPERTIES_LOCATION = {
    -370            **generator.Generator.PROPERTIES_LOCATION,
    -371            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
    -372            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    -373        }
    +364        PROPERTIES_LOCATION = {
    +365            **generator.Generator.PROPERTIES_LOCATION,
    +366            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
    +367            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +368        }
    +369
    +370        def except_op(self, expression: exp.Except) -> str:
    +371            if not expression.args.get("distinct", False):
    +372                self.unsupported("EXCEPT with All is not supported in Snowflake")
    +373            return super().except_op(expression)
     374
    -375        def except_op(self, expression: exp.Except) -> str:
    +375        def intersect_op(self, expression: exp.Intersect) -> str:
     376            if not expression.args.get("distinct", False):
    -377                self.unsupported("EXCEPT with All is not supported in Snowflake")
    -378            return super().except_op(expression)
    +377                self.unsupported("INTERSECT with All is not supported in Snowflake")
    +378            return super().intersect_op(expression)
     379
    -380        def intersect_op(self, expression: exp.Intersect) -> str:
    -381            if not expression.args.get("distinct", False):
    -382                self.unsupported("INTERSECT with All is not supported in Snowflake")
    -383            return super().intersect_op(expression)
    -384
    -385        def settag_sql(self, expression: exp.SetTag) -> str:
    -386            action = "UNSET" if expression.args.get("unset") else "SET"
    -387            return f"{action} TAG {self.expressions(expression)}"
    -388
    -389        def describe_sql(self, expression: exp.Describe) -> str:
    -390            # Default to table if kind is unknown
    -391            kind_value = expression.args.get("kind") or "TABLE"
    -392            kind = f" {kind_value}" if kind_value else ""
    -393            this = f" {self.sql(expression, 'this')}"
    -394            return f"DESCRIBE{kind}{this}"
    -395
    -396        def generatedasidentitycolumnconstraint_sql(
    -397            self, expression: exp.GeneratedAsIdentityColumnConstraint
    -398        ) -> str:
    -399            start = expression.args.get("start")
    -400            start = f" START {start}" if start else ""
    -401            increment = expression.args.get("increment")
    -402            increment = f" INCREMENT {increment}" if increment else ""
    -403            return f"AUTOINCREMENT{start}{increment}"
    +380        def settag_sql(self, expression: exp.SetTag) -> str:
    +381            action = "UNSET" if expression.args.get("unset") else "SET"
    +382            return f"{action} TAG {self.expressions(expression)}"
    +383
    +384        def describe_sql(self, expression: exp.Describe) -> str:
    +385            # Default to table if kind is unknown
    +386            kind_value = expression.args.get("kind") or "TABLE"
    +387            kind = f" {kind_value}" if kind_value else ""
    +388            this = f" {self.sql(expression, 'this')}"
    +389            return f"DESCRIBE{kind}{this}"
    +390
    +391        def generatedasidentitycolumnconstraint_sql(
    +392            self, expression: exp.GeneratedAsIdentityColumnConstraint
    +393        ) -> str:
    +394            start = expression.args.get("start")
    +395            start = f" START {start}" if start else ""
    +396            increment = expression.args.get("increment")
    +397            increment = f" INCREMENT {increment}" if increment else ""
    +398            return f"AUTOINCREMENT{start}{increment}"
     
    -

    Generator interprets the given syntax tree and produces a SQL string as an output.

    +

    Generator converts a given syntax tree to the corresponding SQL string.

    Arguments:
      -
    • time_mapping (dict): the dictionary of custom time mappings in which the key -represents a python time format and the output the target time format
    • -
    • time_trie (trie): a trie of the time_mapping keys
    • -
    • pretty (bool): if set to True the returned string will be formatted. Default: False.
    • -
    • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
    • -
    • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
    • -
    • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
    • -
    • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
    • -
    • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
    • -
    • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
    • -
    • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
    • -
    • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
    • -
    • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
    • -
    • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
    • -
    • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
    • -
    • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
    • -
    • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
    • -
    • normalize (bool): if set to True all identifiers will lower cased
    • -
    • string_escape (str): specifies a string escape character. Default: '.
    • -
    • identifier_escape (str): specifies an identifier escape character. Default: ".
    • -
    • pad (int): determines padding in a formatted string. Default: 2.
    • -
    • indent (int): determines the size of indentation in a formatted string. Default: 4.
    • -
    • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
    • -
    • normalize_functions (str): normalize function names, "upper", "lower", or None -Default: "upper"
    • -
    • alias_post_tablesample (bool): if the table alias comes after tablesample -Default: False
    • -
    • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit -Default: False
    • -
    • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters -unsupported expressions. Default ErrorLevel.WARN.
    • -
    • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    • -
    • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +
    • pretty: Whether or not to format the produced SQL string. +Default: False.
    • +
    • identify: Determines when an identifier should be quoted. Possible values are: +False (default): Never quote, except in cases where it's mandatory by the dialect. +True or 'always': Always quote. +'safe': Only quote identifiers that are case insensitive.
    • +
    • normalize: Whether or not to normalize identifiers to lowercase. +Default: False.
    • +
    • pad: Determines the pad size in a formatted string. +Default: 2.
    • +
    • indent: Determines the indentation size in a formatted string. +Default: 2.
    • +
    • normalize_functions: Whether or not to normalize all function names. Possible values are: +"upper" or True (default): Convert names to uppercase. +"lower": Convert names to lowercase. +False: Disables function name normalization.
    • +
    • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. +Default ErrorLevel.WARN.
    • +
    • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
    • -
    • leading_comma (bool): if the the comma is leading or trailing in select statements +
    • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. +This is only relevant when generating in pretty mode. Default: False
    • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true @@ -1127,10 +1090,10 @@ Default: True
    -
    375        def except_op(self, expression: exp.Except) -> str:
    -376            if not expression.args.get("distinct", False):
    -377                self.unsupported("EXCEPT with All is not supported in Snowflake")
    -378            return super().except_op(expression)
    +            
    370        def except_op(self, expression: exp.Except) -> str:
    +371            if not expression.args.get("distinct", False):
    +372                self.unsupported("EXCEPT with All is not supported in Snowflake")
    +373            return super().except_op(expression)
     
    @@ -1148,10 +1111,10 @@ Default: True
    -
    380        def intersect_op(self, expression: exp.Intersect) -> str:
    -381            if not expression.args.get("distinct", False):
    -382                self.unsupported("INTERSECT with All is not supported in Snowflake")
    -383            return super().intersect_op(expression)
    +            
    375        def intersect_op(self, expression: exp.Intersect) -> str:
    +376            if not expression.args.get("distinct", False):
    +377                self.unsupported("INTERSECT with All is not supported in Snowflake")
    +378            return super().intersect_op(expression)
     
    @@ -1169,9 +1132,9 @@ Default: True
    -
    385        def settag_sql(self, expression: exp.SetTag) -> str:
    -386            action = "UNSET" if expression.args.get("unset") else "SET"
    -387            return f"{action} TAG {self.expressions(expression)}"
    +            
    380        def settag_sql(self, expression: exp.SetTag) -> str:
    +381            action = "UNSET" if expression.args.get("unset") else "SET"
    +382            return f"{action} TAG {self.expressions(expression)}"
     
    @@ -1189,12 +1152,12 @@ Default: True
    -
    389        def describe_sql(self, expression: exp.Describe) -> str:
    -390            # Default to table if kind is unknown
    -391            kind_value = expression.args.get("kind") or "TABLE"
    -392            kind = f" {kind_value}" if kind_value else ""
    -393            this = f" {self.sql(expression, 'this')}"
    -394            return f"DESCRIBE{kind}{this}"
    +            
    384        def describe_sql(self, expression: exp.Describe) -> str:
    +385            # Default to table if kind is unknown
    +386            kind_value = expression.args.get("kind") or "TABLE"
    +387            kind = f" {kind_value}" if kind_value else ""
    +388            this = f" {self.sql(expression, 'this')}"
    +389            return f"DESCRIBE{kind}{this}"
     
    @@ -1212,14 +1175,14 @@ Default: True
    -
    396        def generatedasidentitycolumnconstraint_sql(
    -397            self, expression: exp.GeneratedAsIdentityColumnConstraint
    -398        ) -> str:
    -399            start = expression.args.get("start")
    -400            start = f" START {start}" if start else ""
    -401            increment = expression.args.get("increment")
    -402            increment = f" INCREMENT {increment}" if increment else ""
    -403            return f"AUTOINCREMENT{start}{increment}"
    +            
    391        def generatedasidentitycolumnconstraint_sql(
    +392            self, expression: exp.GeneratedAsIdentityColumnConstraint
    +393        ) -> str:
    +394            start = expression.args.get("start")
    +395            start = f" START {start}" if start else ""
    +396            increment = expression.args.get("increment")
    +397            increment = f" INCREMENT {increment}" if increment else ""
    +398            return f"AUTOINCREMENT{start}{increment}"
     
    @@ -1254,6 +1217,7 @@ Default: True
    notnullcolumnconstraint_sql
    primarykeycolumnconstraint_sql
    uniquecolumnconstraint_sql
    +
    createable_sql
    create_sql
    clone_sql
    prepend_ctes
    @@ -1333,10 +1297,12 @@ Default: True
    ordered_sql
    matchrecognize_sql
    query_modifiers
    +
    offset_limit_modifiers
    after_having_modifiers
    after_limit_modifiers
    select_sql
    schema_sql
    +
    schema_columns_sql
    star_sql
    parameter_sql
    sessionparameter_sql
    @@ -1361,7 +1327,7 @@ Default: True
    nextvaluefor_sql
    extract_sql
    trim_sql
    -
    concat_sql
    +
    safeconcat_sql
    check_sql
    foreignkey_sql
    primarykey_sql
    @@ -1412,6 +1378,7 @@ Default: True
    respectnulls_sql
    intdiv_sql
    dpipe_sql
    +
    safedpipe_sql
    div_sql
    overlaps_sql
    distance_sql
    @@ -1460,6 +1427,7 @@ Default: True
    dictproperty_sql
    dictrange_sql
    dictsubproperty_sql
    +
    oncluster_sql
    diff --git a/docs/sqlglot/dialects/spark.html b/docs/sqlglot/dialects/spark.html index 1051557..761a94b 100644 --- a/docs/sqlglot/dialects/spark.html +++ b/docs/sqlglot/dialects/spark.html @@ -210,27 +210,19 @@
    -

    Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces -a parsed syntax tree.

    +

    Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

    Arguments:
      -
    • error_level: the desired error level. +
    • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
    • -
    • error_message_context: determines the amount of context to capture from a +
    • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). -Default: 50.
    • -
    • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. -Default: 0
    • -
    • alias_post_tablesample: If the table alias comes after tablesample. -Default: False
    • +Default: 100
    • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
    • -
    • null_ordering: Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    @@ -279,49 +271,34 @@ Default: "nulls_are_small"
    -

    Generator interprets the given syntax tree and produces a SQL string as an output.

    +

    Generator converts a given syntax tree to the corresponding SQL string.

    Arguments:
      -
    • time_mapping (dict): the dictionary of custom time mappings in which the key -represents a python time format and the output the target time format
    • -
    • time_trie (trie): a trie of the time_mapping keys
    • -
    • pretty (bool): if set to True the returned string will be formatted. Default: False.
    • -
    • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
    • -
    • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
    • -
    • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
    • -
    • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
    • -
    • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
    • -
    • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
    • -
    • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
    • -
    • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
    • -
    • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
    • -
    • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
    • -
    • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
    • -
    • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
    • -
    • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
    • -
    • normalize (bool): if set to True all identifiers will lower cased
    • -
    • string_escape (str): specifies a string escape character. Default: '.
    • -
    • identifier_escape (str): specifies an identifier escape character. Default: ".
    • -
    • pad (int): determines padding in a formatted string. Default: 2.
    • -
    • indent (int): determines the size of indentation in a formatted string. Default: 4.
    • -
    • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
    • -
    • normalize_functions (str): normalize function names, "upper", "lower", or None -Default: "upper"
    • -
    • alias_post_tablesample (bool): if the table alias comes after tablesample -Default: False
    • -
    • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit -Default: False
    • -
    • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters -unsupported expressions. Default ErrorLevel.WARN.
    • -
    • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    • -
    • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +
    • pretty: Whether or not to format the produced SQL string. +Default: False.
    • +
    • identify: Determines when an identifier should be quoted. Possible values are: +False (default): Never quote, except in cases where it's mandatory by the dialect. +True or 'always': Always quote. +'safe': Only quote identifiers that are case insensitive.
    • +
    • normalize: Whether or not to normalize identifiers to lowercase. +Default: False.
    • +
    • pad: Determines the pad size in a formatted string. +Default: 2.
    • +
    • indent: Determines the indentation size in a formatted string. +Default: 2.
    • +
    • normalize_functions: Whether or not to normalize all function names. Possible values are: +"upper" or True (default): Convert names to uppercase. +"lower": Convert names to lowercase. +False: Disables function name normalization.
    • +
    • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. +Default ErrorLevel.WARN.
    • +
    • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
    • -
    • leading_comma (bool): if the the comma is leading or trailing in select statements +
    • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. +This is only relevant when generating in pretty mode. Default: False
    • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true @@ -387,6 +364,7 @@ Default: True
    • notnullcolumnconstraint_sql
      primarykeycolumnconstraint_sql
      uniquecolumnconstraint_sql
      +
      createable_sql
      create_sql
      clone_sql
      describe_sql
      @@ -467,9 +445,11 @@ Default: True
      ordered_sql
      matchrecognize_sql
      query_modifiers
      +
      offset_limit_modifiers
      after_limit_modifiers
      select_sql
      schema_sql
      +
      schema_columns_sql
      star_sql
      parameter_sql
      sessionparameter_sql
      @@ -494,7 +474,7 @@ Default: True
      nextvaluefor_sql
      extract_sql
      trim_sql
      -
      concat_sql
      +
      safeconcat_sql
      check_sql
      foreignkey_sql
      primarykey_sql
      @@ -544,6 +524,7 @@ Default: True
      respectnulls_sql
      intdiv_sql
      dpipe_sql
      +
      safedpipe_sql
      div_sql
      overlaps_sql
      distance_sql
      @@ -592,6 +573,7 @@ Default: True
      dictproperty_sql
      dictrange_sql
      dictsubproperty_sql
      +
      oncluster_sql
    sqlglot.dialects.spark2.Spark2.Generator
    diff --git a/docs/sqlglot/dialects/spark2.html b/docs/sqlglot/dialects/spark2.html index a468035..5e87bc5 100644 --- a/docs/sqlglot/dialects/spark2.html +++ b/docs/sqlglot/dialects/spark2.html @@ -123,7 +123,7 @@ 38def _str_to_date(self: Hive.Generator, expression: exp.StrToDate) -> str: 39 this = self.sql(expression, "this") 40 time_format = self.format_time(expression) - 41 if time_format == Hive.date_format: + 41 if time_format == Hive.DATE_FORMAT: 42 return f"TO_DATE({this})" 43 return f"TO_DATE({this}, {time_format})" 44 @@ -218,13 +218,13 @@ 133 "WEEKOFYEAR": lambda args: exp.WeekOfYear( 134 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 135 ), -136 "DATE": lambda args: exp.Cast(this=seq_get(args, 0), to=exp.DataType.build("date")), -137 "DATE_TRUNC": lambda args: exp.TimestampTrunc( -138 this=seq_get(args, 1), -139 unit=exp.var(seq_get(args, 0)), -140 ), -141 "TRUNC": lambda args: exp.DateTrunc(unit=seq_get(args, 1), this=seq_get(args, 0)), -142 "BOOLEAN": _parse_as_cast("boolean"), +136 "DATE_TRUNC": lambda args: exp.TimestampTrunc( +137 this=seq_get(args, 1), +138 unit=exp.var(seq_get(args, 0)), +139 ), +140 "TRUNC": lambda args: exp.DateTrunc(unit=seq_get(args, 1), this=seq_get(args, 0)), +141 "BOOLEAN": _parse_as_cast("boolean"), +142 "DATE": _parse_as_cast("date"), 143 "DOUBLE": _parse_as_cast("double"), 144 "FLOAT": _parse_as_cast("float"), 145 "INT": _parse_as_cast("int"), @@ -247,97 +247,95 @@ 162 def _parse_add_column(self) -> t.Optional[exp.Expression]: 163 return self._match_text_seq("ADD", "COLUMNS") and self._parse_schema() 164 -165 def _parse_drop_column(self) -> t.Optional[exp.Expression]: +165 def _parse_drop_column(self) -> t.Optional[exp.Drop | exp.Command]: 166 return self._match_text_seq("DROP", "COLUMNS") and self.expression( -167 exp.Drop, -168 this=self._parse_schema(), -169 kind="COLUMNS", -170 ) -171 -172 def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]: -173 if len(aggregations) == 1: -174 return [""] -175 return pivot_column_names(aggregations, dialect="spark") -176 -177 class Generator(Hive.Generator): -178 TYPE_MAPPING = { -179 **Hive.Generator.TYPE_MAPPING, -180 exp.DataType.Type.TINYINT: "BYTE", -181 exp.DataType.Type.SMALLINT: "SHORT", -182 exp.DataType.Type.BIGINT: "LONG", -183 } -184 -185 PROPERTIES_LOCATION = { -186 **Hive.Generator.PROPERTIES_LOCATION, -187 exp.EngineProperty: exp.Properties.Location.UNSUPPORTED, -188 exp.AutoIncrementProperty: exp.Properties.Location.UNSUPPORTED, -189 exp.CharacterSetProperty: exp.Properties.Location.UNSUPPORTED, -190 exp.CollateProperty: exp.Properties.Location.UNSUPPORTED, -191 } -192 -193 TRANSFORMS = { -194 **Hive.Generator.TRANSFORMS, -195 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), -196 exp.ArraySum: lambda self, e: f"AGGREGATE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)", -197 exp.AtTimeZone: lambda self, e: f"FROM_UTC_TIMESTAMP({self.sql(e, 'this')}, {self.sql(e, 'zone')})", -198 exp.BitwiseLeftShift: rename_func("SHIFTLEFT"), -199 exp.BitwiseRightShift: rename_func("SHIFTRIGHT"), -200 exp.Create: _create_sql, -201 exp.DateFromParts: rename_func("MAKE_DATE"), -202 exp.DateTrunc: lambda self, e: self.func("TRUNC", e.this, e.args.get("unit")), -203 exp.DayOfMonth: rename_func("DAYOFMONTH"), -204 exp.DayOfWeek: rename_func("DAYOFWEEK"), -205 exp.DayOfYear: rename_func("DAYOFYEAR"), -206 exp.FileFormatProperty: lambda self, e: f"USING {e.name.upper()}", -207 exp.From: transforms.preprocess([_unalias_pivot]), -208 exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */", -209 exp.LogicalAnd: rename_func("BOOL_AND"), -210 exp.LogicalOr: rename_func("BOOL_OR"), -211 exp.Map: _map_sql, -212 exp.Pivot: transforms.preprocess([_unqualify_pivot_columns]), -213 exp.Reduce: rename_func("AGGREGATE"), -214 exp.StrToDate: _str_to_date, -215 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", -216 exp.TimestampTrunc: lambda self, e: self.func( -217 "DATE_TRUNC", exp.Literal.string(e.text("unit")), e.this -218 ), -219 exp.Trim: trim_sql, -220 exp.UnixToTime: _unix_to_time_sql, -221 exp.VariancePop: rename_func("VAR_POP"), -222 exp.WeekOfYear: rename_func("WEEKOFYEAR"), -223 exp.WithinGroup: transforms.preprocess( -224 [transforms.remove_within_group_for_percentiles] -225 ), -226 } -227 TRANSFORMS.pop(exp.ArrayJoin) -228 TRANSFORMS.pop(exp.ArraySort) -229 TRANSFORMS.pop(exp.ILike) -230 TRANSFORMS.pop(exp.Left) -231 TRANSFORMS.pop(exp.Right) -232 -233 WRAP_DERIVED_VALUES = False -234 CREATE_FUNCTION_RETURN_AS = False -235 -236 def cast_sql(self, expression: exp.Cast) -> str: -237 if isinstance(expression.this, exp.Cast) and expression.this.is_type("json"): -238 schema = f"'{self.sql(expression, 'to')}'" -239 return self.func("FROM_JSON", expression.this.this, schema) -240 if expression.is_type("json"): -241 return self.func("TO_JSON", expression.this) +167 exp.Drop, this=self._parse_schema(), kind="COLUMNS" +168 ) +169 +170 def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]: +171 if len(aggregations) == 1: +172 return [""] +173 return pivot_column_names(aggregations, dialect="spark") +174 +175 class Generator(Hive.Generator): +176 TYPE_MAPPING = { +177 **Hive.Generator.TYPE_MAPPING, +178 exp.DataType.Type.TINYINT: "BYTE", +179 exp.DataType.Type.SMALLINT: "SHORT", +180 exp.DataType.Type.BIGINT: "LONG", +181 } +182 +183 PROPERTIES_LOCATION = { +184 **Hive.Generator.PROPERTIES_LOCATION, +185 exp.EngineProperty: exp.Properties.Location.UNSUPPORTED, +186 exp.AutoIncrementProperty: exp.Properties.Location.UNSUPPORTED, +187 exp.CharacterSetProperty: exp.Properties.Location.UNSUPPORTED, +188 exp.CollateProperty: exp.Properties.Location.UNSUPPORTED, +189 } +190 +191 TRANSFORMS = { +192 **Hive.Generator.TRANSFORMS, +193 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), +194 exp.ArraySum: lambda self, e: f"AGGREGATE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)", +195 exp.AtTimeZone: lambda self, e: f"FROM_UTC_TIMESTAMP({self.sql(e, 'this')}, {self.sql(e, 'zone')})", +196 exp.BitwiseLeftShift: rename_func("SHIFTLEFT"), +197 exp.BitwiseRightShift: rename_func("SHIFTRIGHT"), +198 exp.Create: _create_sql, +199 exp.DateFromParts: rename_func("MAKE_DATE"), +200 exp.DateTrunc: lambda self, e: self.func("TRUNC", e.this, e.args.get("unit")), +201 exp.DayOfMonth: rename_func("DAYOFMONTH"), +202 exp.DayOfWeek: rename_func("DAYOFWEEK"), +203 exp.DayOfYear: rename_func("DAYOFYEAR"), +204 exp.FileFormatProperty: lambda self, e: f"USING {e.name.upper()}", +205 exp.From: transforms.preprocess([_unalias_pivot]), +206 exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */", +207 exp.LogicalAnd: rename_func("BOOL_AND"), +208 exp.LogicalOr: rename_func("BOOL_OR"), +209 exp.Map: _map_sql, +210 exp.Pivot: transforms.preprocess([_unqualify_pivot_columns]), +211 exp.Reduce: rename_func("AGGREGATE"), +212 exp.StrToDate: _str_to_date, +213 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", +214 exp.TimestampTrunc: lambda self, e: self.func( +215 "DATE_TRUNC", exp.Literal.string(e.text("unit")), e.this +216 ), +217 exp.Trim: trim_sql, +218 exp.UnixToTime: _unix_to_time_sql, +219 exp.VariancePop: rename_func("VAR_POP"), +220 exp.WeekOfYear: rename_func("WEEKOFYEAR"), +221 exp.WithinGroup: transforms.preprocess( +222 [transforms.remove_within_group_for_percentiles] +223 ), +224 } +225 TRANSFORMS.pop(exp.ArrayJoin) +226 TRANSFORMS.pop(exp.ArraySort) +227 TRANSFORMS.pop(exp.ILike) +228 TRANSFORMS.pop(exp.Left) +229 TRANSFORMS.pop(exp.Right) +230 +231 WRAP_DERIVED_VALUES = False +232 CREATE_FUNCTION_RETURN_AS = False +233 +234 def cast_sql(self, expression: exp.Cast) -> str: +235 if isinstance(expression.this, exp.Cast) and expression.this.is_type("json"): +236 schema = f"'{self.sql(expression, 'to')}'" +237 return self.func("FROM_JSON", expression.this.this, schema) +238 if expression.is_type("json"): +239 return self.func("TO_JSON", expression.this) +240 +241 return super(Hive.Generator, self).cast_sql(expression) 242 -243 return super(Hive.Generator, self).cast_sql(expression) -244 -245 def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str: -246 return super().columndef_sql( -247 expression, -248 sep=": " -249 if isinstance(expression.parent, exp.DataType) -250 and expression.parent.is_type("struct") -251 else sep, -252 ) -253 -254 class Tokenizer(Hive.Tokenizer): -255 HEX_STRINGS = [("X'", "'")] +243 def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str: +244 return super().columndef_sql( +245 expression, +246 sep=": " +247 if isinstance(expression.parent, exp.DataType) +248 and expression.parent.is_type("struct") +249 else sep, +250 ) +251 +252 class Tokenizer(Hive.Tokenizer): +253 HEX_STRINGS = [("X'", "'")]
    @@ -382,13 +380,13 @@ 134 "WEEKOFYEAR": lambda args: exp.WeekOfYear( 135 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 136 ), -137 "DATE": lambda args: exp.Cast(this=seq_get(args, 0), to=exp.DataType.build("date")), -138 "DATE_TRUNC": lambda args: exp.TimestampTrunc( -139 this=seq_get(args, 1), -140 unit=exp.var(seq_get(args, 0)), -141 ), -142 "TRUNC": lambda args: exp.DateTrunc(unit=seq_get(args, 1), this=seq_get(args, 0)), -143 "BOOLEAN": _parse_as_cast("boolean"), +137 "DATE_TRUNC": lambda args: exp.TimestampTrunc( +138 this=seq_get(args, 1), +139 unit=exp.var(seq_get(args, 0)), +140 ), +141 "TRUNC": lambda args: exp.DateTrunc(unit=seq_get(args, 1), this=seq_get(args, 0)), +142 "BOOLEAN": _parse_as_cast("boolean"), +143 "DATE": _parse_as_cast("date"), 144 "DOUBLE": _parse_as_cast("double"), 145 "FLOAT": _parse_as_cast("float"), 146 "INT": _parse_as_cast("int"), @@ -411,97 +409,95 @@ 163 def _parse_add_column(self) -> t.Optional[exp.Expression]: 164 return self._match_text_seq("ADD", "COLUMNS") and self._parse_schema() 165 -166 def _parse_drop_column(self) -> t.Optional[exp.Expression]: +166 def _parse_drop_column(self) -> t.Optional[exp.Drop | exp.Command]: 167 return self._match_text_seq("DROP", "COLUMNS") and self.expression( -168 exp.Drop, -169 this=self._parse_schema(), -170 kind="COLUMNS", -171 ) -172 -173 def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]: -174 if len(aggregations) == 1: -175 return [""] -176 return pivot_column_names(aggregations, dialect="spark") -177 -178 class Generator(Hive.Generator): -179 TYPE_MAPPING = { -180 **Hive.Generator.TYPE_MAPPING, -181 exp.DataType.Type.TINYINT: "BYTE", -182 exp.DataType.Type.SMALLINT: "SHORT", -183 exp.DataType.Type.BIGINT: "LONG", -184 } -185 -186 PROPERTIES_LOCATION = { -187 **Hive.Generator.PROPERTIES_LOCATION, -188 exp.EngineProperty: exp.Properties.Location.UNSUPPORTED, -189 exp.AutoIncrementProperty: exp.Properties.Location.UNSUPPORTED, -190 exp.CharacterSetProperty: exp.Properties.Location.UNSUPPORTED, -191 exp.CollateProperty: exp.Properties.Location.UNSUPPORTED, -192 } -193 -194 TRANSFORMS = { -195 **Hive.Generator.TRANSFORMS, -196 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), -197 exp.ArraySum: lambda self, e: f"AGGREGATE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)", -198 exp.AtTimeZone: lambda self, e: f"FROM_UTC_TIMESTAMP({self.sql(e, 'this')}, {self.sql(e, 'zone')})", -199 exp.BitwiseLeftShift: rename_func("SHIFTLEFT"), -200 exp.BitwiseRightShift: rename_func("SHIFTRIGHT"), -201 exp.Create: _create_sql, -202 exp.DateFromParts: rename_func("MAKE_DATE"), -203 exp.DateTrunc: lambda self, e: self.func("TRUNC", e.this, e.args.get("unit")), -204 exp.DayOfMonth: rename_func("DAYOFMONTH"), -205 exp.DayOfWeek: rename_func("DAYOFWEEK"), -206 exp.DayOfYear: rename_func("DAYOFYEAR"), -207 exp.FileFormatProperty: lambda self, e: f"USING {e.name.upper()}", -208 exp.From: transforms.preprocess([_unalias_pivot]), -209 exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */", -210 exp.LogicalAnd: rename_func("BOOL_AND"), -211 exp.LogicalOr: rename_func("BOOL_OR"), -212 exp.Map: _map_sql, -213 exp.Pivot: transforms.preprocess([_unqualify_pivot_columns]), -214 exp.Reduce: rename_func("AGGREGATE"), -215 exp.StrToDate: _str_to_date, -216 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", -217 exp.TimestampTrunc: lambda self, e: self.func( -218 "DATE_TRUNC", exp.Literal.string(e.text("unit")), e.this -219 ), -220 exp.Trim: trim_sql, -221 exp.UnixToTime: _unix_to_time_sql, -222 exp.VariancePop: rename_func("VAR_POP"), -223 exp.WeekOfYear: rename_func("WEEKOFYEAR"), -224 exp.WithinGroup: transforms.preprocess( -225 [transforms.remove_within_group_for_percentiles] -226 ), -227 } -228 TRANSFORMS.pop(exp.ArrayJoin) -229 TRANSFORMS.pop(exp.ArraySort) -230 TRANSFORMS.pop(exp.ILike) -231 TRANSFORMS.pop(exp.Left) -232 TRANSFORMS.pop(exp.Right) -233 -234 WRAP_DERIVED_VALUES = False -235 CREATE_FUNCTION_RETURN_AS = False -236 -237 def cast_sql(self, expression: exp.Cast) -> str: -238 if isinstance(expression.this, exp.Cast) and expression.this.is_type("json"): -239 schema = f"'{self.sql(expression, 'to')}'" -240 return self.func("FROM_JSON", expression.this.this, schema) -241 if expression.is_type("json"): -242 return self.func("TO_JSON", expression.this) +168 exp.Drop, this=self._parse_schema(), kind="COLUMNS" +169 ) +170 +171 def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]: +172 if len(aggregations) == 1: +173 return [""] +174 return pivot_column_names(aggregations, dialect="spark") +175 +176 class Generator(Hive.Generator): +177 TYPE_MAPPING = { +178 **Hive.Generator.TYPE_MAPPING, +179 exp.DataType.Type.TINYINT: "BYTE", +180 exp.DataType.Type.SMALLINT: "SHORT", +181 exp.DataType.Type.BIGINT: "LONG", +182 } +183 +184 PROPERTIES_LOCATION = { +185 **Hive.Generator.PROPERTIES_LOCATION, +186 exp.EngineProperty: exp.Properties.Location.UNSUPPORTED, +187 exp.AutoIncrementProperty: exp.Properties.Location.UNSUPPORTED, +188 exp.CharacterSetProperty: exp.Properties.Location.UNSUPPORTED, +189 exp.CollateProperty: exp.Properties.Location.UNSUPPORTED, +190 } +191 +192 TRANSFORMS = { +193 **Hive.Generator.TRANSFORMS, +194 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), +195 exp.ArraySum: lambda self, e: f"AGGREGATE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)", +196 exp.AtTimeZone: lambda self, e: f"FROM_UTC_TIMESTAMP({self.sql(e, 'this')}, {self.sql(e, 'zone')})", +197 exp.BitwiseLeftShift: rename_func("SHIFTLEFT"), +198 exp.BitwiseRightShift: rename_func("SHIFTRIGHT"), +199 exp.Create: _create_sql, +200 exp.DateFromParts: rename_func("MAKE_DATE"), +201 exp.DateTrunc: lambda self, e: self.func("TRUNC", e.this, e.args.get("unit")), +202 exp.DayOfMonth: rename_func("DAYOFMONTH"), +203 exp.DayOfWeek: rename_func("DAYOFWEEK"), +204 exp.DayOfYear: rename_func("DAYOFYEAR"), +205 exp.FileFormatProperty: lambda self, e: f"USING {e.name.upper()}", +206 exp.From: transforms.preprocess([_unalias_pivot]), +207 exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */", +208 exp.LogicalAnd: rename_func("BOOL_AND"), +209 exp.LogicalOr: rename_func("BOOL_OR"), +210 exp.Map: _map_sql, +211 exp.Pivot: transforms.preprocess([_unqualify_pivot_columns]), +212 exp.Reduce: rename_func("AGGREGATE"), +213 exp.StrToDate: _str_to_date, +214 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", +215 exp.TimestampTrunc: lambda self, e: self.func( +216 "DATE_TRUNC", exp.Literal.string(e.text("unit")), e.this +217 ), +218 exp.Trim: trim_sql, +219 exp.UnixToTime: _unix_to_time_sql, +220 exp.VariancePop: rename_func("VAR_POP"), +221 exp.WeekOfYear: rename_func("WEEKOFYEAR"), +222 exp.WithinGroup: transforms.preprocess( +223 [transforms.remove_within_group_for_percentiles] +224 ), +225 } +226 TRANSFORMS.pop(exp.ArrayJoin) +227 TRANSFORMS.pop(exp.ArraySort) +228 TRANSFORMS.pop(exp.ILike) +229 TRANSFORMS.pop(exp.Left) +230 TRANSFORMS.pop(exp.Right) +231 +232 WRAP_DERIVED_VALUES = False +233 CREATE_FUNCTION_RETURN_AS = False +234 +235 def cast_sql(self, expression: exp.Cast) -> str: +236 if isinstance(expression.this, exp.Cast) and expression.this.is_type("json"): +237 schema = f"'{self.sql(expression, 'to')}'" +238 return self.func("FROM_JSON", expression.this.this, schema) +239 if expression.is_type("json"): +240 return self.func("TO_JSON", expression.this) +241 +242 return super(Hive.Generator, self).cast_sql(expression) 243 -244 return super(Hive.Generator, self).cast_sql(expression) -245 -246 def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str: -247 return super().columndef_sql( -248 expression, -249 sep=": " -250 if isinstance(expression.parent, exp.DataType) -251 and expression.parent.is_type("struct") -252 else sep, -253 ) -254 -255 class Tokenizer(Hive.Tokenizer): -256 HEX_STRINGS = [("X'", "'")] +244 def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str: +245 return super().columndef_sql( +246 expression, +247 sep=": " +248 if isinstance(expression.parent, exp.DataType) +249 and expression.parent.is_type("struct") +250 else sep, +251 ) +252 +253 class Tokenizer(Hive.Tokenizer): +254 HEX_STRINGS = [("X'", "'")]
    @@ -564,13 +560,13 @@ 134 "WEEKOFYEAR": lambda args: exp.WeekOfYear( 135 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 136 ), -137 "DATE": lambda args: exp.Cast(this=seq_get(args, 0), to=exp.DataType.build("date")), -138 "DATE_TRUNC": lambda args: exp.TimestampTrunc( -139 this=seq_get(args, 1), -140 unit=exp.var(seq_get(args, 0)), -141 ), -142 "TRUNC": lambda args: exp.DateTrunc(unit=seq_get(args, 1), this=seq_get(args, 0)), -143 "BOOLEAN": _parse_as_cast("boolean"), +137 "DATE_TRUNC": lambda args: exp.TimestampTrunc( +138 this=seq_get(args, 1), +139 unit=exp.var(seq_get(args, 0)), +140 ), +141 "TRUNC": lambda args: exp.DateTrunc(unit=seq_get(args, 1), this=seq_get(args, 0)), +142 "BOOLEAN": _parse_as_cast("boolean"), +143 "DATE": _parse_as_cast("date"), 144 "DOUBLE": _parse_as_cast("double"), 145 "FLOAT": _parse_as_cast("float"), 146 "INT": _parse_as_cast("int"), @@ -593,41 +589,31 @@ 163 def _parse_add_column(self) -> t.Optional[exp.Expression]: 164 return self._match_text_seq("ADD", "COLUMNS") and self._parse_schema() 165 -166 def _parse_drop_column(self) -> t.Optional[exp.Expression]: +166 def _parse_drop_column(self) -> t.Optional[exp.Drop | exp.Command]: 167 return self._match_text_seq("DROP", "COLUMNS") and self.expression( -168 exp.Drop, -169 this=self._parse_schema(), -170 kind="COLUMNS", -171 ) -172 -173 def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]: -174 if len(aggregations) == 1: -175 return [""] -176 return pivot_column_names(aggregations, dialect="spark") +168 exp.Drop, this=self._parse_schema(), kind="COLUMNS" +169 ) +170 +171 def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]: +172 if len(aggregations) == 1: +173 return [""] +174 return pivot_column_names(aggregations, dialect="spark")
    -

    Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces -a parsed syntax tree.

    +

    Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

    Arguments:
      -
    • error_level: the desired error level. +
    • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
    • -
    • error_message_context: determines the amount of context to capture from a +
    • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). -Default: 50.
    • -
    • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. -Default: 0
    • -
    • alias_post_tablesample: If the table alias comes after tablesample. -Default: False
    • +Default: 100
    • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
    • -
    • null_ordering: Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    @@ -660,128 +646,113 @@ Default: "nulls_are_small"
    -
    178    class Generator(Hive.Generator):
    -179        TYPE_MAPPING = {
    -180            **Hive.Generator.TYPE_MAPPING,
    -181            exp.DataType.Type.TINYINT: "BYTE",
    -182            exp.DataType.Type.SMALLINT: "SHORT",
    -183            exp.DataType.Type.BIGINT: "LONG",
    -184        }
    -185
    -186        PROPERTIES_LOCATION = {
    -187            **Hive.Generator.PROPERTIES_LOCATION,
    -188            exp.EngineProperty: exp.Properties.Location.UNSUPPORTED,
    -189            exp.AutoIncrementProperty: exp.Properties.Location.UNSUPPORTED,
    -190            exp.CharacterSetProperty: exp.Properties.Location.UNSUPPORTED,
    -191            exp.CollateProperty: exp.Properties.Location.UNSUPPORTED,
    -192        }
    -193
    -194        TRANSFORMS = {
    -195            **Hive.Generator.TRANSFORMS,
    -196            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
    -197            exp.ArraySum: lambda self, e: f"AGGREGATE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)",
    -198            exp.AtTimeZone: lambda self, e: f"FROM_UTC_TIMESTAMP({self.sql(e, 'this')}, {self.sql(e, 'zone')})",
    -199            exp.BitwiseLeftShift: rename_func("SHIFTLEFT"),
    -200            exp.BitwiseRightShift: rename_func("SHIFTRIGHT"),
    -201            exp.Create: _create_sql,
    -202            exp.DateFromParts: rename_func("MAKE_DATE"),
    -203            exp.DateTrunc: lambda self, e: self.func("TRUNC", e.this, e.args.get("unit")),
    -204            exp.DayOfMonth: rename_func("DAYOFMONTH"),
    -205            exp.DayOfWeek: rename_func("DAYOFWEEK"),
    -206            exp.DayOfYear: rename_func("DAYOFYEAR"),
    -207            exp.FileFormatProperty: lambda self, e: f"USING {e.name.upper()}",
    -208            exp.From: transforms.preprocess([_unalias_pivot]),
    -209            exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */",
    -210            exp.LogicalAnd: rename_func("BOOL_AND"),
    -211            exp.LogicalOr: rename_func("BOOL_OR"),
    -212            exp.Map: _map_sql,
    -213            exp.Pivot: transforms.preprocess([_unqualify_pivot_columns]),
    -214            exp.Reduce: rename_func("AGGREGATE"),
    -215            exp.StrToDate: _str_to_date,
    -216            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
    -217            exp.TimestampTrunc: lambda self, e: self.func(
    -218                "DATE_TRUNC", exp.Literal.string(e.text("unit")), e.this
    -219            ),
    -220            exp.Trim: trim_sql,
    -221            exp.UnixToTime: _unix_to_time_sql,
    -222            exp.VariancePop: rename_func("VAR_POP"),
    -223            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
    -224            exp.WithinGroup: transforms.preprocess(
    -225                [transforms.remove_within_group_for_percentiles]
    -226            ),
    -227        }
    -228        TRANSFORMS.pop(exp.ArrayJoin)
    -229        TRANSFORMS.pop(exp.ArraySort)
    -230        TRANSFORMS.pop(exp.ILike)
    -231        TRANSFORMS.pop(exp.Left)
    -232        TRANSFORMS.pop(exp.Right)
    -233
    -234        WRAP_DERIVED_VALUES = False
    -235        CREATE_FUNCTION_RETURN_AS = False
    -236
    -237        def cast_sql(self, expression: exp.Cast) -> str:
    -238            if isinstance(expression.this, exp.Cast) and expression.this.is_type("json"):
    -239                schema = f"'{self.sql(expression, 'to')}'"
    -240                return self.func("FROM_JSON", expression.this.this, schema)
    -241            if expression.is_type("json"):
    -242                return self.func("TO_JSON", expression.this)
    +            
    176    class Generator(Hive.Generator):
    +177        TYPE_MAPPING = {
    +178            **Hive.Generator.TYPE_MAPPING,
    +179            exp.DataType.Type.TINYINT: "BYTE",
    +180            exp.DataType.Type.SMALLINT: "SHORT",
    +181            exp.DataType.Type.BIGINT: "LONG",
    +182        }
    +183
    +184        PROPERTIES_LOCATION = {
    +185            **Hive.Generator.PROPERTIES_LOCATION,
    +186            exp.EngineProperty: exp.Properties.Location.UNSUPPORTED,
    +187            exp.AutoIncrementProperty: exp.Properties.Location.UNSUPPORTED,
    +188            exp.CharacterSetProperty: exp.Properties.Location.UNSUPPORTED,
    +189            exp.CollateProperty: exp.Properties.Location.UNSUPPORTED,
    +190        }
    +191
    +192        TRANSFORMS = {
    +193            **Hive.Generator.TRANSFORMS,
    +194            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
    +195            exp.ArraySum: lambda self, e: f"AGGREGATE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)",
    +196            exp.AtTimeZone: lambda self, e: f"FROM_UTC_TIMESTAMP({self.sql(e, 'this')}, {self.sql(e, 'zone')})",
    +197            exp.BitwiseLeftShift: rename_func("SHIFTLEFT"),
    +198            exp.BitwiseRightShift: rename_func("SHIFTRIGHT"),
    +199            exp.Create: _create_sql,
    +200            exp.DateFromParts: rename_func("MAKE_DATE"),
    +201            exp.DateTrunc: lambda self, e: self.func("TRUNC", e.this, e.args.get("unit")),
    +202            exp.DayOfMonth: rename_func("DAYOFMONTH"),
    +203            exp.DayOfWeek: rename_func("DAYOFWEEK"),
    +204            exp.DayOfYear: rename_func("DAYOFYEAR"),
    +205            exp.FileFormatProperty: lambda self, e: f"USING {e.name.upper()}",
    +206            exp.From: transforms.preprocess([_unalias_pivot]),
    +207            exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */",
    +208            exp.LogicalAnd: rename_func("BOOL_AND"),
    +209            exp.LogicalOr: rename_func("BOOL_OR"),
    +210            exp.Map: _map_sql,
    +211            exp.Pivot: transforms.preprocess([_unqualify_pivot_columns]),
    +212            exp.Reduce: rename_func("AGGREGATE"),
    +213            exp.StrToDate: _str_to_date,
    +214            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
    +215            exp.TimestampTrunc: lambda self, e: self.func(
    +216                "DATE_TRUNC", exp.Literal.string(e.text("unit")), e.this
    +217            ),
    +218            exp.Trim: trim_sql,
    +219            exp.UnixToTime: _unix_to_time_sql,
    +220            exp.VariancePop: rename_func("VAR_POP"),
    +221            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
    +222            exp.WithinGroup: transforms.preprocess(
    +223                [transforms.remove_within_group_for_percentiles]
    +224            ),
    +225        }
    +226        TRANSFORMS.pop(exp.ArrayJoin)
    +227        TRANSFORMS.pop(exp.ArraySort)
    +228        TRANSFORMS.pop(exp.ILike)
    +229        TRANSFORMS.pop(exp.Left)
    +230        TRANSFORMS.pop(exp.Right)
    +231
    +232        WRAP_DERIVED_VALUES = False
    +233        CREATE_FUNCTION_RETURN_AS = False
    +234
    +235        def cast_sql(self, expression: exp.Cast) -> str:
    +236            if isinstance(expression.this, exp.Cast) and expression.this.is_type("json"):
    +237                schema = f"'{self.sql(expression, 'to')}'"
    +238                return self.func("FROM_JSON", expression.this.this, schema)
    +239            if expression.is_type("json"):
    +240                return self.func("TO_JSON", expression.this)
    +241
    +242            return super(Hive.Generator, self).cast_sql(expression)
     243
    -244            return super(Hive.Generator, self).cast_sql(expression)
    -245
    -246        def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str:
    -247            return super().columndef_sql(
    -248                expression,
    -249                sep=": "
    -250                if isinstance(expression.parent, exp.DataType)
    -251                and expression.parent.is_type("struct")
    -252                else sep,
    -253            )
    +244        def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str:
    +245            return super().columndef_sql(
    +246                expression,
    +247                sep=": "
    +248                if isinstance(expression.parent, exp.DataType)
    +249                and expression.parent.is_type("struct")
    +250                else sep,
    +251            )
     
    -

    Generator interprets the given syntax tree and produces a SQL string as an output.

    +

    Generator converts a given syntax tree to the corresponding SQL string.

    Arguments:
      -
    • time_mapping (dict): the dictionary of custom time mappings in which the key -represents a python time format and the output the target time format
    • -
    • time_trie (trie): a trie of the time_mapping keys
    • -
    • pretty (bool): if set to True the returned string will be formatted. Default: False.
    • -
    • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
    • -
    • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
    • -
    • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
    • -
    • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
    • -
    • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
    • -
    • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
    • -
    • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
    • -
    • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
    • -
    • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
    • -
    • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
    • -
    • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
    • -
    • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
    • -
    • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
    • -
    • normalize (bool): if set to True all identifiers will lower cased
    • -
    • string_escape (str): specifies a string escape character. Default: '.
    • -
    • identifier_escape (str): specifies an identifier escape character. Default: ".
    • -
    • pad (int): determines padding in a formatted string. Default: 2.
    • -
    • indent (int): determines the size of indentation in a formatted string. Default: 4.
    • -
    • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
    • -
    • normalize_functions (str): normalize function names, "upper", "lower", or None -Default: "upper"
    • -
    • alias_post_tablesample (bool): if the table alias comes after tablesample -Default: False
    • -
    • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit -Default: False
    • -
    • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters -unsupported expressions. Default ErrorLevel.WARN.
    • -
    • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    • -
    • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +
    • pretty: Whether or not to format the produced SQL string. +Default: False.
    • +
    • identify: Determines when an identifier should be quoted. Possible values are: +False (default): Never quote, except in cases where it's mandatory by the dialect. +True or 'always': Always quote. +'safe': Only quote identifiers that are case insensitive.
    • +
    • normalize: Whether or not to normalize identifiers to lowercase. +Default: False.
    • +
    • pad: Determines the pad size in a formatted string. +Default: 2.
    • +
    • indent: Determines the indentation size in a formatted string. +Default: 2.
    • +
    • normalize_functions: Whether or not to normalize all function names. Possible values are: +"upper" or True (default): Convert names to uppercase. +"lower": Convert names to lowercase. +False: Disables function name normalization.
    • +
    • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. +Default ErrorLevel.WARN.
    • +
    • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
    • -
    • leading_comma (bool): if the the comma is leading or trailing in select statements +
    • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. +This is only relevant when generating in pretty mode. Default: False
    • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true @@ -804,14 +775,14 @@ Default: True
    -
    237        def cast_sql(self, expression: exp.Cast) -> str:
    -238            if isinstance(expression.this, exp.Cast) and expression.this.is_type("json"):
    -239                schema = f"'{self.sql(expression, 'to')}'"
    -240                return self.func("FROM_JSON", expression.this.this, schema)
    -241            if expression.is_type("json"):
    -242                return self.func("TO_JSON", expression.this)
    -243
    -244            return super(Hive.Generator, self).cast_sql(expression)
    +            
    235        def cast_sql(self, expression: exp.Cast) -> str:
    +236            if isinstance(expression.this, exp.Cast) and expression.this.is_type("json"):
    +237                schema = f"'{self.sql(expression, 'to')}'"
    +238                return self.func("FROM_JSON", expression.this.this, schema)
    +239            if expression.is_type("json"):
    +240                return self.func("TO_JSON", expression.this)
    +241
    +242            return super(Hive.Generator, self).cast_sql(expression)
     
    @@ -829,14 +800,14 @@ Default: True
    -
    246        def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str:
    -247            return super().columndef_sql(
    -248                expression,
    -249                sep=": "
    -250                if isinstance(expression.parent, exp.DataType)
    -251                and expression.parent.is_type("struct")
    -252                else sep,
    -253            )
    +            
    244        def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str:
    +245            return super().columndef_sql(
    +246                expression,
    +247                sep=": "
    +248                if isinstance(expression.parent, exp.DataType)
    +249                and expression.parent.is_type("struct")
    +250                else sep,
    +251            )
     
    @@ -871,6 +842,7 @@ Default: True
    notnullcolumnconstraint_sql
    primarykeycolumnconstraint_sql
    uniquecolumnconstraint_sql
    +
    createable_sql
    create_sql
    clone_sql
    describe_sql
    @@ -951,9 +923,11 @@ Default: True
    ordered_sql
    matchrecognize_sql
    query_modifiers
    +
    offset_limit_modifiers
    after_limit_modifiers
    select_sql
    schema_sql
    +
    schema_columns_sql
    star_sql
    parameter_sql
    sessionparameter_sql
    @@ -978,7 +952,7 @@ Default: True
    nextvaluefor_sql
    extract_sql
    trim_sql
    -
    concat_sql
    +
    safeconcat_sql
    check_sql
    foreignkey_sql
    primarykey_sql
    @@ -1028,6 +1002,7 @@ Default: True
    respectnulls_sql
    intdiv_sql
    dpipe_sql
    +
    safedpipe_sql
    div_sql
    overlaps_sql
    distance_sql
    @@ -1076,6 +1051,7 @@ Default: True
    dictproperty_sql
    dictrange_sql
    dictsubproperty_sql
    +
    oncluster_sql
    sqlglot.dialects.hive.Hive.Generator
    @@ -1099,8 +1075,8 @@ Default: True
    -
    255    class Tokenizer(Hive.Tokenizer):
    -256        HEX_STRINGS = [("X'", "'")]
    +            
    253    class Tokenizer(Hive.Tokenizer):
    +254        HEX_STRINGS = [("X'", "'")]
     
    @@ -1112,6 +1088,7 @@ Default: True diff --git a/docs/sqlglot/dialects/sqlite.html b/docs/sqlglot/dialects/sqlite.html index 0ae00eb..353cc6d 100644 --- a/docs/sqlglot/dialects/sqlite.html +++ b/docs/sqlglot/dialects/sqlite.html @@ -99,194 +99,193 @@
    5 Dialect, 6 arrow_json_extract_scalar_sql, 7 arrow_json_extract_sql, - 8 count_if_to_sum, - 9 no_ilike_sql, - 10 no_pivot_sql, - 11 no_tablesample_sql, - 12 no_trycast_sql, - 13 rename_func, - 14) - 15from sqlglot.tokens import TokenType - 16 + 8 concat_to_dpipe_sql, + 9 count_if_to_sum, + 10 no_ilike_sql, + 11 no_pivot_sql, + 12 no_tablesample_sql, + 13 no_trycast_sql, + 14 rename_func, + 15) + 16from sqlglot.tokens import TokenType 17 - 18def _date_add_sql(self: generator.Generator, expression: exp.DateAdd) -> str: - 19 modifier = expression.expression - 20 modifier = modifier.name if modifier.is_string else self.sql(modifier) - 21 unit = expression.args.get("unit") - 22 modifier = f"'{modifier} {unit.name}'" if unit else f"'{modifier}'" - 23 return self.func("DATE", expression.this, modifier) - 24 + 18 + 19def _date_add_sql(self: generator.Generator, expression: exp.DateAdd) -> str: + 20 modifier = expression.expression + 21 modifier = modifier.name if modifier.is_string else self.sql(modifier) + 22 unit = expression.args.get("unit") + 23 modifier = f"'{modifier} {unit.name}'" if unit else f"'{modifier}'" + 24 return self.func("DATE", expression.this, modifier) 25 - 26def _transform_create(expression: exp.Expression) -> exp.Expression: - 27 """Move primary key to a column and enforce auto_increment on primary keys.""" - 28 schema = expression.this - 29 - 30 if isinstance(expression, exp.Create) and isinstance(schema, exp.Schema): - 31 defs = {} - 32 primary_key = None - 33 - 34 for e in schema.expressions: - 35 if isinstance(e, exp.ColumnDef): - 36 defs[e.name] = e - 37 elif isinstance(e, exp.PrimaryKey): - 38 primary_key = e - 39 - 40 if primary_key and len(primary_key.expressions) == 1: - 41 column = defs[primary_key.expressions[0].name] - 42 column.append( - 43 "constraints", exp.ColumnConstraint(kind=exp.PrimaryKeyColumnConstraint()) - 44 ) - 45 schema.expressions.remove(primary_key) - 46 else: - 47 for column in defs.values(): - 48 auto_increment = None - 49 for constraint in column.constraints.copy(): - 50 if isinstance(constraint.kind, exp.PrimaryKeyColumnConstraint): - 51 break - 52 if isinstance(constraint.kind, exp.AutoIncrementColumnConstraint): - 53 auto_increment = constraint - 54 if auto_increment: - 55 column.constraints.remove(auto_increment) - 56 - 57 return expression - 58 + 26 + 27def _transform_create(expression: exp.Expression) -> exp.Expression: + 28 """Move primary key to a column and enforce auto_increment on primary keys.""" + 29 schema = expression.this + 30 + 31 if isinstance(expression, exp.Create) and isinstance(schema, exp.Schema): + 32 defs = {} + 33 primary_key = None + 34 + 35 for e in schema.expressions: + 36 if isinstance(e, exp.ColumnDef): + 37 defs[e.name] = e + 38 elif isinstance(e, exp.PrimaryKey): + 39 primary_key = e + 40 + 41 if primary_key and len(primary_key.expressions) == 1: + 42 column = defs[primary_key.expressions[0].name] + 43 column.append( + 44 "constraints", exp.ColumnConstraint(kind=exp.PrimaryKeyColumnConstraint()) + 45 ) + 46 schema.expressions.remove(primary_key) + 47 else: + 48 for column in defs.values(): + 49 auto_increment = None + 50 for constraint in column.constraints.copy(): + 51 if isinstance(constraint.kind, exp.PrimaryKeyColumnConstraint): + 52 break + 53 if isinstance(constraint.kind, exp.AutoIncrementColumnConstraint): + 54 auto_increment = constraint + 55 if auto_increment: + 56 column.constraints.remove(auto_increment) + 57 + 58 return expression 59 - 60class SQLite(Dialect): - 61 class Tokenizer(tokens.Tokenizer): - 62 IDENTIFIERS = ['"', ("[", "]"), "`"] - 63 HEX_STRINGS = [("x'", "'"), ("X'", "'"), ("0x", ""), ("0X", "")] - 64 - 65 KEYWORDS = { - 66 **tokens.Tokenizer.KEYWORDS, - 67 } - 68 - 69 class Parser(parser.Parser): - 70 FUNCTIONS = { - 71 **parser.Parser.FUNCTIONS, - 72 "EDITDIST3": exp.Levenshtein.from_arg_list, - 73 } - 74 - 75 class Generator(generator.Generator): - 76 JOIN_HINTS = False - 77 TABLE_HINTS = False - 78 - 79 TYPE_MAPPING = { - 80 **generator.Generator.TYPE_MAPPING, - 81 exp.DataType.Type.BOOLEAN: "INTEGER", - 82 exp.DataType.Type.TINYINT: "INTEGER", - 83 exp.DataType.Type.SMALLINT: "INTEGER", - 84 exp.DataType.Type.INT: "INTEGER", - 85 exp.DataType.Type.BIGINT: "INTEGER", - 86 exp.DataType.Type.FLOAT: "REAL", - 87 exp.DataType.Type.DOUBLE: "REAL", - 88 exp.DataType.Type.DECIMAL: "REAL", - 89 exp.DataType.Type.CHAR: "TEXT", - 90 exp.DataType.Type.NCHAR: "TEXT", - 91 exp.DataType.Type.VARCHAR: "TEXT", - 92 exp.DataType.Type.NVARCHAR: "TEXT", - 93 exp.DataType.Type.BINARY: "BLOB", - 94 exp.DataType.Type.VARBINARY: "BLOB", - 95 } - 96 - 97 TOKEN_MAPPING = { - 98 TokenType.AUTO_INCREMENT: "AUTOINCREMENT", - 99 } -100 -101 TRANSFORMS = { -102 **generator.Generator.TRANSFORMS, -103 exp.CountIf: count_if_to_sum, -104 exp.Create: transforms.preprocess([_transform_create]), -105 exp.CurrentDate: lambda *_: "CURRENT_DATE", -106 exp.CurrentTime: lambda *_: "CURRENT_TIME", -107 exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP", -108 exp.DateAdd: _date_add_sql, -109 exp.DateStrToDate: lambda self, e: self.sql(e, "this"), -110 exp.ILike: no_ilike_sql, -111 exp.JSONExtract: arrow_json_extract_sql, -112 exp.JSONExtractScalar: arrow_json_extract_scalar_sql, -113 exp.JSONBExtract: arrow_json_extract_sql, -114 exp.JSONBExtractScalar: arrow_json_extract_scalar_sql, -115 exp.Levenshtein: rename_func("EDITDIST3"), -116 exp.LogicalOr: rename_func("MAX"), -117 exp.LogicalAnd: rename_func("MIN"), -118 exp.Pivot: no_pivot_sql, -119 exp.Select: transforms.preprocess( -120 [transforms.eliminate_distinct_on, transforms.eliminate_qualify] -121 ), -122 exp.TableSample: no_tablesample_sql, -123 exp.TimeStrToTime: lambda self, e: self.sql(e, "this"), -124 exp.TryCast: no_trycast_sql, -125 } -126 -127 PROPERTIES_LOCATION = { -128 k: exp.Properties.Location.UNSUPPORTED -129 for k, v in generator.Generator.PROPERTIES_LOCATION.items() -130 } -131 -132 LIMIT_FETCH = "LIMIT" -133 -134 def cast_sql(self, expression: exp.Cast) -> str: -135 if expression.is_type("date"): -136 return self.func("DATE", expression.this) -137 -138 return super().cast_sql(expression) -139 -140 def datediff_sql(self, expression: exp.DateDiff) -> str: -141 unit = expression.args.get("unit") -142 unit = unit.name.upper() if unit else "DAY" -143 -144 sql = f"(JULIANDAY({self.sql(expression, 'this')}) - JULIANDAY({self.sql(expression, 'expression')}))" -145 -146 if unit == "MONTH": -147 sql = f"{sql} / 30.0" -148 elif unit == "YEAR": -149 sql = f"{sql} / 365.0" -150 elif unit == "HOUR": -151 sql = f"{sql} * 24.0" -152 elif unit == "MINUTE": -153 sql = f"{sql} * 1440.0" -154 elif unit == "SECOND": -155 sql = f"{sql} * 86400.0" -156 elif unit == "MILLISECOND": -157 sql = f"{sql} * 86400000.0" -158 elif unit == "MICROSECOND": -159 sql = f"{sql} * 86400000000.0" -160 elif unit == "NANOSECOND": -161 sql = f"{sql} * 8640000000000.0" -162 else: -163 self.unsupported("DATEDIFF unsupported for '{unit}'.") -164 -165 return f"CAST({sql} AS INTEGER)" -166 -167 # https://www.sqlite.org/lang_aggfunc.html#group_concat -168 def groupconcat_sql(self, expression: exp.GroupConcat) -> str: -169 this = expression.this -170 distinct = expression.find(exp.Distinct) -171 -172 if distinct: -173 this = distinct.expressions[0] -174 distinct_sql = "DISTINCT " -175 else: -176 distinct_sql = "" -177 -178 if isinstance(expression.this, exp.Order): -179 self.unsupported("SQLite GROUP_CONCAT doesn't support ORDER BY.") -180 if expression.this.this and not distinct: -181 this = expression.this.this -182 -183 separator = expression.args.get("separator") -184 return f"GROUP_CONCAT({distinct_sql}{self.format_args(this, separator)})" -185 -186 def least_sql(self, expression: exp.Least) -> str: -187 if len(expression.expressions) > 1: -188 return rename_func("MIN")(self, expression) -189 -190 return self.expressions(expression) -191 -192 def transaction_sql(self, expression: exp.Transaction) -> str: -193 this = expression.this -194 this = f" {this}" if this else "" -195 return f"BEGIN{this} TRANSACTION" + 60 + 61class SQLite(Dialect): + 62 class Tokenizer(tokens.Tokenizer): + 63 IDENTIFIERS = ['"', ("[", "]"), "`"] + 64 HEX_STRINGS = [("x'", "'"), ("X'", "'"), ("0x", ""), ("0X", "")] + 65 + 66 class Parser(parser.Parser): + 67 FUNCTIONS = { + 68 **parser.Parser.FUNCTIONS, + 69 "EDITDIST3": exp.Levenshtein.from_arg_list, + 70 } + 71 + 72 class Generator(generator.Generator): + 73 JOIN_HINTS = False + 74 TABLE_HINTS = False + 75 + 76 TYPE_MAPPING = { + 77 **generator.Generator.TYPE_MAPPING, + 78 exp.DataType.Type.BOOLEAN: "INTEGER", + 79 exp.DataType.Type.TINYINT: "INTEGER", + 80 exp.DataType.Type.SMALLINT: "INTEGER", + 81 exp.DataType.Type.INT: "INTEGER", + 82 exp.DataType.Type.BIGINT: "INTEGER", + 83 exp.DataType.Type.FLOAT: "REAL", + 84 exp.DataType.Type.DOUBLE: "REAL", + 85 exp.DataType.Type.DECIMAL: "REAL", + 86 exp.DataType.Type.CHAR: "TEXT", + 87 exp.DataType.Type.NCHAR: "TEXT", + 88 exp.DataType.Type.VARCHAR: "TEXT", + 89 exp.DataType.Type.NVARCHAR: "TEXT", + 90 exp.DataType.Type.BINARY: "BLOB", + 91 exp.DataType.Type.VARBINARY: "BLOB", + 92 } + 93 + 94 TOKEN_MAPPING = { + 95 TokenType.AUTO_INCREMENT: "AUTOINCREMENT", + 96 } + 97 + 98 TRANSFORMS = { + 99 **generator.Generator.TRANSFORMS, +100 exp.Concat: concat_to_dpipe_sql, +101 exp.CountIf: count_if_to_sum, +102 exp.Create: transforms.preprocess([_transform_create]), +103 exp.CurrentDate: lambda *_: "CURRENT_DATE", +104 exp.CurrentTime: lambda *_: "CURRENT_TIME", +105 exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP", +106 exp.DateAdd: _date_add_sql, +107 exp.DateStrToDate: lambda self, e: self.sql(e, "this"), +108 exp.ILike: no_ilike_sql, +109 exp.JSONExtract: arrow_json_extract_sql, +110 exp.JSONExtractScalar: arrow_json_extract_scalar_sql, +111 exp.JSONBExtract: arrow_json_extract_sql, +112 exp.JSONBExtractScalar: arrow_json_extract_scalar_sql, +113 exp.Levenshtein: rename_func("EDITDIST3"), +114 exp.LogicalOr: rename_func("MAX"), +115 exp.LogicalAnd: rename_func("MIN"), +116 exp.Pivot: no_pivot_sql, +117 exp.SafeConcat: concat_to_dpipe_sql, +118 exp.Select: transforms.preprocess( +119 [transforms.eliminate_distinct_on, transforms.eliminate_qualify] +120 ), +121 exp.TableSample: no_tablesample_sql, +122 exp.TimeStrToTime: lambda self, e: self.sql(e, "this"), +123 exp.TryCast: no_trycast_sql, +124 } +125 +126 PROPERTIES_LOCATION = { +127 k: exp.Properties.Location.UNSUPPORTED +128 for k, v in generator.Generator.PROPERTIES_LOCATION.items() +129 } +130 +131 LIMIT_FETCH = "LIMIT" +132 +133 def cast_sql(self, expression: exp.Cast) -> str: +134 if expression.is_type("date"): +135 return self.func("DATE", expression.this) +136 +137 return super().cast_sql(expression) +138 +139 def datediff_sql(self, expression: exp.DateDiff) -> str: +140 unit = expression.args.get("unit") +141 unit = unit.name.upper() if unit else "DAY" +142 +143 sql = f"(JULIANDAY({self.sql(expression, 'this')}) - JULIANDAY({self.sql(expression, 'expression')}))" +144 +145 if unit == "MONTH": +146 sql = f"{sql} / 30.0" +147 elif unit == "YEAR": +148 sql = f"{sql} / 365.0" +149 elif unit == "HOUR": +150 sql = f"{sql} * 24.0" +151 elif unit == "MINUTE": +152 sql = f"{sql} * 1440.0" +153 elif unit == "SECOND": +154 sql = f"{sql} * 86400.0" +155 elif unit == "MILLISECOND": +156 sql = f"{sql} * 86400000.0" +157 elif unit == "MICROSECOND": +158 sql = f"{sql} * 86400000000.0" +159 elif unit == "NANOSECOND": +160 sql = f"{sql} * 8640000000000.0" +161 else: +162 self.unsupported("DATEDIFF unsupported for '{unit}'.") +163 +164 return f"CAST({sql} AS INTEGER)" +165 +166 # https://www.sqlite.org/lang_aggfunc.html#group_concat +167 def groupconcat_sql(self, expression: exp.GroupConcat) -> str: +168 this = expression.this +169 distinct = expression.find(exp.Distinct) +170 +171 if distinct: +172 this = distinct.expressions[0] +173 distinct_sql = "DISTINCT " +174 else: +175 distinct_sql = "" +176 +177 if isinstance(expression.this, exp.Order): +178 self.unsupported("SQLite GROUP_CONCAT doesn't support ORDER BY.") +179 if expression.this.this and not distinct: +180 this = expression.this.this +181 +182 separator = expression.args.get("separator") +183 return f"GROUP_CONCAT({distinct_sql}{self.format_args(this, separator)})" +184 +185 def least_sql(self, expression: exp.Least) -> str: +186 if len(expression.expressions) > 1: +187 return rename_func("MIN")(self, expression) +188 +189 return self.expressions(expression) +190 +191 def transaction_sql(self, expression: exp.Transaction) -> str: +192 this = expression.this +193 this = f" {this}" if this else "" +194 return f"BEGIN{this} TRANSACTION"
    @@ -302,142 +301,140 @@
    -
     61class SQLite(Dialect):
    - 62    class Tokenizer(tokens.Tokenizer):
    - 63        IDENTIFIERS = ['"', ("[", "]"), "`"]
    - 64        HEX_STRINGS = [("x'", "'"), ("X'", "'"), ("0x", ""), ("0X", "")]
    - 65
    - 66        KEYWORDS = {
    - 67            **tokens.Tokenizer.KEYWORDS,
    - 68        }
    - 69
    - 70    class Parser(parser.Parser):
    - 71        FUNCTIONS = {
    - 72            **parser.Parser.FUNCTIONS,
    - 73            "EDITDIST3": exp.Levenshtein.from_arg_list,
    - 74        }
    - 75
    - 76    class Generator(generator.Generator):
    - 77        JOIN_HINTS = False
    - 78        TABLE_HINTS = False
    - 79
    - 80        TYPE_MAPPING = {
    - 81            **generator.Generator.TYPE_MAPPING,
    - 82            exp.DataType.Type.BOOLEAN: "INTEGER",
    - 83            exp.DataType.Type.TINYINT: "INTEGER",
    - 84            exp.DataType.Type.SMALLINT: "INTEGER",
    - 85            exp.DataType.Type.INT: "INTEGER",
    - 86            exp.DataType.Type.BIGINT: "INTEGER",
    - 87            exp.DataType.Type.FLOAT: "REAL",
    - 88            exp.DataType.Type.DOUBLE: "REAL",
    - 89            exp.DataType.Type.DECIMAL: "REAL",
    - 90            exp.DataType.Type.CHAR: "TEXT",
    - 91            exp.DataType.Type.NCHAR: "TEXT",
    - 92            exp.DataType.Type.VARCHAR: "TEXT",
    - 93            exp.DataType.Type.NVARCHAR: "TEXT",
    - 94            exp.DataType.Type.BINARY: "BLOB",
    - 95            exp.DataType.Type.VARBINARY: "BLOB",
    - 96        }
    - 97
    - 98        TOKEN_MAPPING = {
    - 99            TokenType.AUTO_INCREMENT: "AUTOINCREMENT",
    -100        }
    -101
    -102        TRANSFORMS = {
    -103            **generator.Generator.TRANSFORMS,
    -104            exp.CountIf: count_if_to_sum,
    -105            exp.Create: transforms.preprocess([_transform_create]),
    -106            exp.CurrentDate: lambda *_: "CURRENT_DATE",
    -107            exp.CurrentTime: lambda *_: "CURRENT_TIME",
    -108            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
    -109            exp.DateAdd: _date_add_sql,
    -110            exp.DateStrToDate: lambda self, e: self.sql(e, "this"),
    -111            exp.ILike: no_ilike_sql,
    -112            exp.JSONExtract: arrow_json_extract_sql,
    -113            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
    -114            exp.JSONBExtract: arrow_json_extract_sql,
    -115            exp.JSONBExtractScalar: arrow_json_extract_scalar_sql,
    -116            exp.Levenshtein: rename_func("EDITDIST3"),
    -117            exp.LogicalOr: rename_func("MAX"),
    -118            exp.LogicalAnd: rename_func("MIN"),
    -119            exp.Pivot: no_pivot_sql,
    -120            exp.Select: transforms.preprocess(
    -121                [transforms.eliminate_distinct_on, transforms.eliminate_qualify]
    -122            ),
    -123            exp.TableSample: no_tablesample_sql,
    -124            exp.TimeStrToTime: lambda self, e: self.sql(e, "this"),
    -125            exp.TryCast: no_trycast_sql,
    -126        }
    -127
    -128        PROPERTIES_LOCATION = {
    -129            k: exp.Properties.Location.UNSUPPORTED
    -130            for k, v in generator.Generator.PROPERTIES_LOCATION.items()
    -131        }
    -132
    -133        LIMIT_FETCH = "LIMIT"
    -134
    -135        def cast_sql(self, expression: exp.Cast) -> str:
    -136            if expression.is_type("date"):
    -137                return self.func("DATE", expression.this)
    -138
    -139            return super().cast_sql(expression)
    -140
    -141        def datediff_sql(self, expression: exp.DateDiff) -> str:
    -142            unit = expression.args.get("unit")
    -143            unit = unit.name.upper() if unit else "DAY"
    -144
    -145            sql = f"(JULIANDAY({self.sql(expression, 'this')}) - JULIANDAY({self.sql(expression, 'expression')}))"
    -146
    -147            if unit == "MONTH":
    -148                sql = f"{sql} / 30.0"
    -149            elif unit == "YEAR":
    -150                sql = f"{sql} / 365.0"
    -151            elif unit == "HOUR":
    -152                sql = f"{sql} * 24.0"
    -153            elif unit == "MINUTE":
    -154                sql = f"{sql} * 1440.0"
    -155            elif unit == "SECOND":
    -156                sql = f"{sql} * 86400.0"
    -157            elif unit == "MILLISECOND":
    -158                sql = f"{sql} * 86400000.0"
    -159            elif unit == "MICROSECOND":
    -160                sql = f"{sql} * 86400000000.0"
    -161            elif unit == "NANOSECOND":
    -162                sql = f"{sql} * 8640000000000.0"
    -163            else:
    -164                self.unsupported("DATEDIFF unsupported for '{unit}'.")
    -165
    -166            return f"CAST({sql} AS INTEGER)"
    -167
    -168        # https://www.sqlite.org/lang_aggfunc.html#group_concat
    -169        def groupconcat_sql(self, expression: exp.GroupConcat) -> str:
    -170            this = expression.this
    -171            distinct = expression.find(exp.Distinct)
    -172
    -173            if distinct:
    -174                this = distinct.expressions[0]
    -175                distinct_sql = "DISTINCT "
    -176            else:
    -177                distinct_sql = ""
    -178
    -179            if isinstance(expression.this, exp.Order):
    -180                self.unsupported("SQLite GROUP_CONCAT doesn't support ORDER BY.")
    -181                if expression.this.this and not distinct:
    -182                    this = expression.this.this
    -183
    -184            separator = expression.args.get("separator")
    -185            return f"GROUP_CONCAT({distinct_sql}{self.format_args(this, separator)})"
    -186
    -187        def least_sql(self, expression: exp.Least) -> str:
    -188            if len(expression.expressions) > 1:
    -189                return rename_func("MIN")(self, expression)
    -190
    -191            return self.expressions(expression)
    -192
    -193        def transaction_sql(self, expression: exp.Transaction) -> str:
    -194            this = expression.this
    -195            this = f" {this}" if this else ""
    -196            return f"BEGIN{this} TRANSACTION"
    +            
     62class SQLite(Dialect):
    + 63    class Tokenizer(tokens.Tokenizer):
    + 64        IDENTIFIERS = ['"', ("[", "]"), "`"]
    + 65        HEX_STRINGS = [("x'", "'"), ("X'", "'"), ("0x", ""), ("0X", "")]
    + 66
    + 67    class Parser(parser.Parser):
    + 68        FUNCTIONS = {
    + 69            **parser.Parser.FUNCTIONS,
    + 70            "EDITDIST3": exp.Levenshtein.from_arg_list,
    + 71        }
    + 72
    + 73    class Generator(generator.Generator):
    + 74        JOIN_HINTS = False
    + 75        TABLE_HINTS = False
    + 76
    + 77        TYPE_MAPPING = {
    + 78            **generator.Generator.TYPE_MAPPING,
    + 79            exp.DataType.Type.BOOLEAN: "INTEGER",
    + 80            exp.DataType.Type.TINYINT: "INTEGER",
    + 81            exp.DataType.Type.SMALLINT: "INTEGER",
    + 82            exp.DataType.Type.INT: "INTEGER",
    + 83            exp.DataType.Type.BIGINT: "INTEGER",
    + 84            exp.DataType.Type.FLOAT: "REAL",
    + 85            exp.DataType.Type.DOUBLE: "REAL",
    + 86            exp.DataType.Type.DECIMAL: "REAL",
    + 87            exp.DataType.Type.CHAR: "TEXT",
    + 88            exp.DataType.Type.NCHAR: "TEXT",
    + 89            exp.DataType.Type.VARCHAR: "TEXT",
    + 90            exp.DataType.Type.NVARCHAR: "TEXT",
    + 91            exp.DataType.Type.BINARY: "BLOB",
    + 92            exp.DataType.Type.VARBINARY: "BLOB",
    + 93        }
    + 94
    + 95        TOKEN_MAPPING = {
    + 96            TokenType.AUTO_INCREMENT: "AUTOINCREMENT",
    + 97        }
    + 98
    + 99        TRANSFORMS = {
    +100            **generator.Generator.TRANSFORMS,
    +101            exp.Concat: concat_to_dpipe_sql,
    +102            exp.CountIf: count_if_to_sum,
    +103            exp.Create: transforms.preprocess([_transform_create]),
    +104            exp.CurrentDate: lambda *_: "CURRENT_DATE",
    +105            exp.CurrentTime: lambda *_: "CURRENT_TIME",
    +106            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
    +107            exp.DateAdd: _date_add_sql,
    +108            exp.DateStrToDate: lambda self, e: self.sql(e, "this"),
    +109            exp.ILike: no_ilike_sql,
    +110            exp.JSONExtract: arrow_json_extract_sql,
    +111            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
    +112            exp.JSONBExtract: arrow_json_extract_sql,
    +113            exp.JSONBExtractScalar: arrow_json_extract_scalar_sql,
    +114            exp.Levenshtein: rename_func("EDITDIST3"),
    +115            exp.LogicalOr: rename_func("MAX"),
    +116            exp.LogicalAnd: rename_func("MIN"),
    +117            exp.Pivot: no_pivot_sql,
    +118            exp.SafeConcat: concat_to_dpipe_sql,
    +119            exp.Select: transforms.preprocess(
    +120                [transforms.eliminate_distinct_on, transforms.eliminate_qualify]
    +121            ),
    +122            exp.TableSample: no_tablesample_sql,
    +123            exp.TimeStrToTime: lambda self, e: self.sql(e, "this"),
    +124            exp.TryCast: no_trycast_sql,
    +125        }
    +126
    +127        PROPERTIES_LOCATION = {
    +128            k: exp.Properties.Location.UNSUPPORTED
    +129            for k, v in generator.Generator.PROPERTIES_LOCATION.items()
    +130        }
    +131
    +132        LIMIT_FETCH = "LIMIT"
    +133
    +134        def cast_sql(self, expression: exp.Cast) -> str:
    +135            if expression.is_type("date"):
    +136                return self.func("DATE", expression.this)
    +137
    +138            return super().cast_sql(expression)
    +139
    +140        def datediff_sql(self, expression: exp.DateDiff) -> str:
    +141            unit = expression.args.get("unit")
    +142            unit = unit.name.upper() if unit else "DAY"
    +143
    +144            sql = f"(JULIANDAY({self.sql(expression, 'this')}) - JULIANDAY({self.sql(expression, 'expression')}))"
    +145
    +146            if unit == "MONTH":
    +147                sql = f"{sql} / 30.0"
    +148            elif unit == "YEAR":
    +149                sql = f"{sql} / 365.0"
    +150            elif unit == "HOUR":
    +151                sql = f"{sql} * 24.0"
    +152            elif unit == "MINUTE":
    +153                sql = f"{sql} * 1440.0"
    +154            elif unit == "SECOND":
    +155                sql = f"{sql} * 86400.0"
    +156            elif unit == "MILLISECOND":
    +157                sql = f"{sql} * 86400000.0"
    +158            elif unit == "MICROSECOND":
    +159                sql = f"{sql} * 86400000000.0"
    +160            elif unit == "NANOSECOND":
    +161                sql = f"{sql} * 8640000000000.0"
    +162            else:
    +163                self.unsupported("DATEDIFF unsupported for '{unit}'.")
    +164
    +165            return f"CAST({sql} AS INTEGER)"
    +166
    +167        # https://www.sqlite.org/lang_aggfunc.html#group_concat
    +168        def groupconcat_sql(self, expression: exp.GroupConcat) -> str:
    +169            this = expression.this
    +170            distinct = expression.find(exp.Distinct)
    +171
    +172            if distinct:
    +173                this = distinct.expressions[0]
    +174                distinct_sql = "DISTINCT "
    +175            else:
    +176                distinct_sql = ""
    +177
    +178            if isinstance(expression.this, exp.Order):
    +179                self.unsupported("SQLite GROUP_CONCAT doesn't support ORDER BY.")
    +180                if expression.this.this and not distinct:
    +181                    this = expression.this.this
    +182
    +183            separator = expression.args.get("separator")
    +184            return f"GROUP_CONCAT({distinct_sql}{self.format_args(this, separator)})"
    +185
    +186        def least_sql(self, expression: exp.Least) -> str:
    +187            if len(expression.expressions) > 1:
    +188                return rename_func("MIN")(self, expression)
    +189
    +190            return self.expressions(expression)
    +191
    +192        def transaction_sql(self, expression: exp.Transaction) -> str:
    +193            this = expression.this
    +194            this = f" {this}" if this else ""
    +195            return f"BEGIN{this} TRANSACTION"
     
    @@ -472,13 +469,9 @@
    -
    62    class Tokenizer(tokens.Tokenizer):
    -63        IDENTIFIERS = ['"', ("[", "]"), "`"]
    -64        HEX_STRINGS = [("x'", "'"), ("X'", "'"), ("0x", ""), ("0X", "")]
    -65
    -66        KEYWORDS = {
    -67            **tokens.Tokenizer.KEYWORDS,
    -68        }
    +            
    63    class Tokenizer(tokens.Tokenizer):
    +64        IDENTIFIERS = ['"', ("[", "]"), "`"]
    +65        HEX_STRINGS = [("x'", "'"), ("X'", "'"), ("0x", ""), ("0X", "")]
     
    @@ -490,6 +483,7 @@ @@ -506,35 +500,27 @@
    -
    70    class Parser(parser.Parser):
    -71        FUNCTIONS = {
    -72            **parser.Parser.FUNCTIONS,
    -73            "EDITDIST3": exp.Levenshtein.from_arg_list,
    -74        }
    +            
    67    class Parser(parser.Parser):
    +68        FUNCTIONS = {
    +69            **parser.Parser.FUNCTIONS,
    +70            "EDITDIST3": exp.Levenshtein.from_arg_list,
    +71        }
     
    -

    Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces -a parsed syntax tree.

    +

    Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

    Arguments:
      -
    • error_level: the desired error level. +
    • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
    • -
    • error_message_context: determines the amount of context to capture from a +
    • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). -Default: 50.
    • -
    • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. -Default: 0
    • -
    • alias_post_tablesample: If the table alias comes after tablesample. -Default: False
    • +Default: 100
    • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
    • -
    • null_ordering: Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    @@ -567,173 +553,160 @@ Default: "nulls_are_small"
    -
     76    class Generator(generator.Generator):
    - 77        JOIN_HINTS = False
    - 78        TABLE_HINTS = False
    - 79
    - 80        TYPE_MAPPING = {
    - 81            **generator.Generator.TYPE_MAPPING,
    - 82            exp.DataType.Type.BOOLEAN: "INTEGER",
    - 83            exp.DataType.Type.TINYINT: "INTEGER",
    - 84            exp.DataType.Type.SMALLINT: "INTEGER",
    - 85            exp.DataType.Type.INT: "INTEGER",
    - 86            exp.DataType.Type.BIGINT: "INTEGER",
    - 87            exp.DataType.Type.FLOAT: "REAL",
    - 88            exp.DataType.Type.DOUBLE: "REAL",
    - 89            exp.DataType.Type.DECIMAL: "REAL",
    - 90            exp.DataType.Type.CHAR: "TEXT",
    - 91            exp.DataType.Type.NCHAR: "TEXT",
    - 92            exp.DataType.Type.VARCHAR: "TEXT",
    - 93            exp.DataType.Type.NVARCHAR: "TEXT",
    - 94            exp.DataType.Type.BINARY: "BLOB",
    - 95            exp.DataType.Type.VARBINARY: "BLOB",
    - 96        }
    - 97
    - 98        TOKEN_MAPPING = {
    - 99            TokenType.AUTO_INCREMENT: "AUTOINCREMENT",
    -100        }
    -101
    -102        TRANSFORMS = {
    -103            **generator.Generator.TRANSFORMS,
    -104            exp.CountIf: count_if_to_sum,
    -105            exp.Create: transforms.preprocess([_transform_create]),
    -106            exp.CurrentDate: lambda *_: "CURRENT_DATE",
    -107            exp.CurrentTime: lambda *_: "CURRENT_TIME",
    -108            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
    -109            exp.DateAdd: _date_add_sql,
    -110            exp.DateStrToDate: lambda self, e: self.sql(e, "this"),
    -111            exp.ILike: no_ilike_sql,
    -112            exp.JSONExtract: arrow_json_extract_sql,
    -113            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
    -114            exp.JSONBExtract: arrow_json_extract_sql,
    -115            exp.JSONBExtractScalar: arrow_json_extract_scalar_sql,
    -116            exp.Levenshtein: rename_func("EDITDIST3"),
    -117            exp.LogicalOr: rename_func("MAX"),
    -118            exp.LogicalAnd: rename_func("MIN"),
    -119            exp.Pivot: no_pivot_sql,
    -120            exp.Select: transforms.preprocess(
    -121                [transforms.eliminate_distinct_on, transforms.eliminate_qualify]
    -122            ),
    -123            exp.TableSample: no_tablesample_sql,
    -124            exp.TimeStrToTime: lambda self, e: self.sql(e, "this"),
    -125            exp.TryCast: no_trycast_sql,
    -126        }
    -127
    -128        PROPERTIES_LOCATION = {
    -129            k: exp.Properties.Location.UNSUPPORTED
    -130            for k, v in generator.Generator.PROPERTIES_LOCATION.items()
    -131        }
    -132
    -133        LIMIT_FETCH = "LIMIT"
    -134
    -135        def cast_sql(self, expression: exp.Cast) -> str:
    -136            if expression.is_type("date"):
    -137                return self.func("DATE", expression.this)
    -138
    -139            return super().cast_sql(expression)
    -140
    -141        def datediff_sql(self, expression: exp.DateDiff) -> str:
    -142            unit = expression.args.get("unit")
    -143            unit = unit.name.upper() if unit else "DAY"
    -144
    -145            sql = f"(JULIANDAY({self.sql(expression, 'this')}) - JULIANDAY({self.sql(expression, 'expression')}))"
    -146
    -147            if unit == "MONTH":
    -148                sql = f"{sql} / 30.0"
    -149            elif unit == "YEAR":
    -150                sql = f"{sql} / 365.0"
    -151            elif unit == "HOUR":
    -152                sql = f"{sql} * 24.0"
    -153            elif unit == "MINUTE":
    -154                sql = f"{sql} * 1440.0"
    -155            elif unit == "SECOND":
    -156                sql = f"{sql} * 86400.0"
    -157            elif unit == "MILLISECOND":
    -158                sql = f"{sql} * 86400000.0"
    -159            elif unit == "MICROSECOND":
    -160                sql = f"{sql} * 86400000000.0"
    -161            elif unit == "NANOSECOND":
    -162                sql = f"{sql} * 8640000000000.0"
    -163            else:
    -164                self.unsupported("DATEDIFF unsupported for '{unit}'.")
    -165
    -166            return f"CAST({sql} AS INTEGER)"
    -167
    -168        # https://www.sqlite.org/lang_aggfunc.html#group_concat
    -169        def groupconcat_sql(self, expression: exp.GroupConcat) -> str:
    -170            this = expression.this
    -171            distinct = expression.find(exp.Distinct)
    -172
    -173            if distinct:
    -174                this = distinct.expressions[0]
    -175                distinct_sql = "DISTINCT "
    -176            else:
    -177                distinct_sql = ""
    -178
    -179            if isinstance(expression.this, exp.Order):
    -180                self.unsupported("SQLite GROUP_CONCAT doesn't support ORDER BY.")
    -181                if expression.this.this and not distinct:
    -182                    this = expression.this.this
    -183
    -184            separator = expression.args.get("separator")
    -185            return f"GROUP_CONCAT({distinct_sql}{self.format_args(this, separator)})"
    -186
    -187        def least_sql(self, expression: exp.Least) -> str:
    -188            if len(expression.expressions) > 1:
    -189                return rename_func("MIN")(self, expression)
    -190
    -191            return self.expressions(expression)
    -192
    -193        def transaction_sql(self, expression: exp.Transaction) -> str:
    -194            this = expression.this
    -195            this = f" {this}" if this else ""
    -196            return f"BEGIN{this} TRANSACTION"
    +            
     73    class Generator(generator.Generator):
    + 74        JOIN_HINTS = False
    + 75        TABLE_HINTS = False
    + 76
    + 77        TYPE_MAPPING = {
    + 78            **generator.Generator.TYPE_MAPPING,
    + 79            exp.DataType.Type.BOOLEAN: "INTEGER",
    + 80            exp.DataType.Type.TINYINT: "INTEGER",
    + 81            exp.DataType.Type.SMALLINT: "INTEGER",
    + 82            exp.DataType.Type.INT: "INTEGER",
    + 83            exp.DataType.Type.BIGINT: "INTEGER",
    + 84            exp.DataType.Type.FLOAT: "REAL",
    + 85            exp.DataType.Type.DOUBLE: "REAL",
    + 86            exp.DataType.Type.DECIMAL: "REAL",
    + 87            exp.DataType.Type.CHAR: "TEXT",
    + 88            exp.DataType.Type.NCHAR: "TEXT",
    + 89            exp.DataType.Type.VARCHAR: "TEXT",
    + 90            exp.DataType.Type.NVARCHAR: "TEXT",
    + 91            exp.DataType.Type.BINARY: "BLOB",
    + 92            exp.DataType.Type.VARBINARY: "BLOB",
    + 93        }
    + 94
    + 95        TOKEN_MAPPING = {
    + 96            TokenType.AUTO_INCREMENT: "AUTOINCREMENT",
    + 97        }
    + 98
    + 99        TRANSFORMS = {
    +100            **generator.Generator.TRANSFORMS,
    +101            exp.Concat: concat_to_dpipe_sql,
    +102            exp.CountIf: count_if_to_sum,
    +103            exp.Create: transforms.preprocess([_transform_create]),
    +104            exp.CurrentDate: lambda *_: "CURRENT_DATE",
    +105            exp.CurrentTime: lambda *_: "CURRENT_TIME",
    +106            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
    +107            exp.DateAdd: _date_add_sql,
    +108            exp.DateStrToDate: lambda self, e: self.sql(e, "this"),
    +109            exp.ILike: no_ilike_sql,
    +110            exp.JSONExtract: arrow_json_extract_sql,
    +111            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
    +112            exp.JSONBExtract: arrow_json_extract_sql,
    +113            exp.JSONBExtractScalar: arrow_json_extract_scalar_sql,
    +114            exp.Levenshtein: rename_func("EDITDIST3"),
    +115            exp.LogicalOr: rename_func("MAX"),
    +116            exp.LogicalAnd: rename_func("MIN"),
    +117            exp.Pivot: no_pivot_sql,
    +118            exp.SafeConcat: concat_to_dpipe_sql,
    +119            exp.Select: transforms.preprocess(
    +120                [transforms.eliminate_distinct_on, transforms.eliminate_qualify]
    +121            ),
    +122            exp.TableSample: no_tablesample_sql,
    +123            exp.TimeStrToTime: lambda self, e: self.sql(e, "this"),
    +124            exp.TryCast: no_trycast_sql,
    +125        }
    +126
    +127        PROPERTIES_LOCATION = {
    +128            k: exp.Properties.Location.UNSUPPORTED
    +129            for k, v in generator.Generator.PROPERTIES_LOCATION.items()
    +130        }
    +131
    +132        LIMIT_FETCH = "LIMIT"
    +133
    +134        def cast_sql(self, expression: exp.Cast) -> str:
    +135            if expression.is_type("date"):
    +136                return self.func("DATE", expression.this)
    +137
    +138            return super().cast_sql(expression)
    +139
    +140        def datediff_sql(self, expression: exp.DateDiff) -> str:
    +141            unit = expression.args.get("unit")
    +142            unit = unit.name.upper() if unit else "DAY"
    +143
    +144            sql = f"(JULIANDAY({self.sql(expression, 'this')}) - JULIANDAY({self.sql(expression, 'expression')}))"
    +145
    +146            if unit == "MONTH":
    +147                sql = f"{sql} / 30.0"
    +148            elif unit == "YEAR":
    +149                sql = f"{sql} / 365.0"
    +150            elif unit == "HOUR":
    +151                sql = f"{sql} * 24.0"
    +152            elif unit == "MINUTE":
    +153                sql = f"{sql} * 1440.0"
    +154            elif unit == "SECOND":
    +155                sql = f"{sql} * 86400.0"
    +156            elif unit == "MILLISECOND":
    +157                sql = f"{sql} * 86400000.0"
    +158            elif unit == "MICROSECOND":
    +159                sql = f"{sql} * 86400000000.0"
    +160            elif unit == "NANOSECOND":
    +161                sql = f"{sql} * 8640000000000.0"
    +162            else:
    +163                self.unsupported("DATEDIFF unsupported for '{unit}'.")
    +164
    +165            return f"CAST({sql} AS INTEGER)"
    +166
    +167        # https://www.sqlite.org/lang_aggfunc.html#group_concat
    +168        def groupconcat_sql(self, expression: exp.GroupConcat) -> str:
    +169            this = expression.this
    +170            distinct = expression.find(exp.Distinct)
    +171
    +172            if distinct:
    +173                this = distinct.expressions[0]
    +174                distinct_sql = "DISTINCT "
    +175            else:
    +176                distinct_sql = ""
    +177
    +178            if isinstance(expression.this, exp.Order):
    +179                self.unsupported("SQLite GROUP_CONCAT doesn't support ORDER BY.")
    +180                if expression.this.this and not distinct:
    +181                    this = expression.this.this
    +182
    +183            separator = expression.args.get("separator")
    +184            return f"GROUP_CONCAT({distinct_sql}{self.format_args(this, separator)})"
    +185
    +186        def least_sql(self, expression: exp.Least) -> str:
    +187            if len(expression.expressions) > 1:
    +188                return rename_func("MIN")(self, expression)
    +189
    +190            return self.expressions(expression)
    +191
    +192        def transaction_sql(self, expression: exp.Transaction) -> str:
    +193            this = expression.this
    +194            this = f" {this}" if this else ""
    +195            return f"BEGIN{this} TRANSACTION"
     
    -

    Generator interprets the given syntax tree and produces a SQL string as an output.

    +

    Generator converts a given syntax tree to the corresponding SQL string.

    Arguments:
      -
    • time_mapping (dict): the dictionary of custom time mappings in which the key -represents a python time format and the output the target time format
    • -
    • time_trie (trie): a trie of the time_mapping keys
    • -
    • pretty (bool): if set to True the returned string will be formatted. Default: False.
    • -
    • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
    • -
    • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
    • -
    • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
    • -
    • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
    • -
    • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
    • -
    • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
    • -
    • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
    • -
    • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
    • -
    • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
    • -
    • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
    • -
    • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
    • -
    • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
    • -
    • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
    • -
    • normalize (bool): if set to True all identifiers will lower cased
    • -
    • string_escape (str): specifies a string escape character. Default: '.
    • -
    • identifier_escape (str): specifies an identifier escape character. Default: ".
    • -
    • pad (int): determines padding in a formatted string. Default: 2.
    • -
    • indent (int): determines the size of indentation in a formatted string. Default: 4.
    • -
    • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
    • -
    • normalize_functions (str): normalize function names, "upper", "lower", or None -Default: "upper"
    • -
    • alias_post_tablesample (bool): if the table alias comes after tablesample -Default: False
    • -
    • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit -Default: False
    • -
    • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters -unsupported expressions. Default ErrorLevel.WARN.
    • -
    • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    • -
    • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +
    • pretty: Whether or not to format the produced SQL string. +Default: False.
    • +
    • identify: Determines when an identifier should be quoted. Possible values are: +False (default): Never quote, except in cases where it's mandatory by the dialect. +True or 'always': Always quote. +'safe': Only quote identifiers that are case insensitive.
    • +
    • normalize: Whether or not to normalize identifiers to lowercase. +Default: False.
    • +
    • pad: Determines the pad size in a formatted string. +Default: 2.
    • +
    • indent: Determines the indentation size in a formatted string. +Default: 2.
    • +
    • normalize_functions: Whether or not to normalize all function names. Possible values are: +"upper" or True (default): Convert names to uppercase. +"lower": Convert names to lowercase. +False: Disables function name normalization.
    • +
    • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. +Default ErrorLevel.WARN.
    • +
    • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
    • -
    • leading_comma (bool): if the the comma is leading or trailing in select statements +
    • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. +This is only relevant when generating in pretty mode. Default: False
    • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true @@ -756,11 +729,11 @@ Default: True
    -
    135        def cast_sql(self, expression: exp.Cast) -> str:
    -136            if expression.is_type("date"):
    -137                return self.func("DATE", expression.this)
    -138
    -139            return super().cast_sql(expression)
    +            
    134        def cast_sql(self, expression: exp.Cast) -> str:
    +135            if expression.is_type("date"):
    +136                return self.func("DATE", expression.this)
    +137
    +138            return super().cast_sql(expression)
     
    @@ -778,32 +751,32 @@ Default: True
    -
    141        def datediff_sql(self, expression: exp.DateDiff) -> str:
    -142            unit = expression.args.get("unit")
    -143            unit = unit.name.upper() if unit else "DAY"
    -144
    -145            sql = f"(JULIANDAY({self.sql(expression, 'this')}) - JULIANDAY({self.sql(expression, 'expression')}))"
    -146
    -147            if unit == "MONTH":
    -148                sql = f"{sql} / 30.0"
    -149            elif unit == "YEAR":
    -150                sql = f"{sql} / 365.0"
    -151            elif unit == "HOUR":
    -152                sql = f"{sql} * 24.0"
    -153            elif unit == "MINUTE":
    -154                sql = f"{sql} * 1440.0"
    -155            elif unit == "SECOND":
    -156                sql = f"{sql} * 86400.0"
    -157            elif unit == "MILLISECOND":
    -158                sql = f"{sql} * 86400000.0"
    -159            elif unit == "MICROSECOND":
    -160                sql = f"{sql} * 86400000000.0"
    -161            elif unit == "NANOSECOND":
    -162                sql = f"{sql} * 8640000000000.0"
    -163            else:
    -164                self.unsupported("DATEDIFF unsupported for '{unit}'.")
    -165
    -166            return f"CAST({sql} AS INTEGER)"
    +            
    140        def datediff_sql(self, expression: exp.DateDiff) -> str:
    +141            unit = expression.args.get("unit")
    +142            unit = unit.name.upper() if unit else "DAY"
    +143
    +144            sql = f"(JULIANDAY({self.sql(expression, 'this')}) - JULIANDAY({self.sql(expression, 'expression')}))"
    +145
    +146            if unit == "MONTH":
    +147                sql = f"{sql} / 30.0"
    +148            elif unit == "YEAR":
    +149                sql = f"{sql} / 365.0"
    +150            elif unit == "HOUR":
    +151                sql = f"{sql} * 24.0"
    +152            elif unit == "MINUTE":
    +153                sql = f"{sql} * 1440.0"
    +154            elif unit == "SECOND":
    +155                sql = f"{sql} * 86400.0"
    +156            elif unit == "MILLISECOND":
    +157                sql = f"{sql} * 86400000.0"
    +158            elif unit == "MICROSECOND":
    +159                sql = f"{sql} * 86400000000.0"
    +160            elif unit == "NANOSECOND":
    +161                sql = f"{sql} * 8640000000000.0"
    +162            else:
    +163                self.unsupported("DATEDIFF unsupported for '{unit}'.")
    +164
    +165            return f"CAST({sql} AS INTEGER)"
     
    @@ -821,23 +794,23 @@ Default: True
    -
    169        def groupconcat_sql(self, expression: exp.GroupConcat) -> str:
    -170            this = expression.this
    -171            distinct = expression.find(exp.Distinct)
    -172
    -173            if distinct:
    -174                this = distinct.expressions[0]
    -175                distinct_sql = "DISTINCT "
    -176            else:
    -177                distinct_sql = ""
    -178
    -179            if isinstance(expression.this, exp.Order):
    -180                self.unsupported("SQLite GROUP_CONCAT doesn't support ORDER BY.")
    -181                if expression.this.this and not distinct:
    -182                    this = expression.this.this
    -183
    -184            separator = expression.args.get("separator")
    -185            return f"GROUP_CONCAT({distinct_sql}{self.format_args(this, separator)})"
    +            
    168        def groupconcat_sql(self, expression: exp.GroupConcat) -> str:
    +169            this = expression.this
    +170            distinct = expression.find(exp.Distinct)
    +171
    +172            if distinct:
    +173                this = distinct.expressions[0]
    +174                distinct_sql = "DISTINCT "
    +175            else:
    +176                distinct_sql = ""
    +177
    +178            if isinstance(expression.this, exp.Order):
    +179                self.unsupported("SQLite GROUP_CONCAT doesn't support ORDER BY.")
    +180                if expression.this.this and not distinct:
    +181                    this = expression.this.this
    +182
    +183            separator = expression.args.get("separator")
    +184            return f"GROUP_CONCAT({distinct_sql}{self.format_args(this, separator)})"
     
    @@ -855,11 +828,11 @@ Default: True
    -
    187        def least_sql(self, expression: exp.Least) -> str:
    -188            if len(expression.expressions) > 1:
    -189                return rename_func("MIN")(self, expression)
    -190
    -191            return self.expressions(expression)
    +            
    186        def least_sql(self, expression: exp.Least) -> str:
    +187            if len(expression.expressions) > 1:
    +188                return rename_func("MIN")(self, expression)
    +189
    +190            return self.expressions(expression)
     
    @@ -877,10 +850,10 @@ Default: True
    -
    193        def transaction_sql(self, expression: exp.Transaction) -> str:
    -194            this = expression.this
    -195            this = f" {this}" if this else ""
    -196            return f"BEGIN{this} TRANSACTION"
    +            
    192        def transaction_sql(self, expression: exp.Transaction) -> str:
    +193            this = expression.this
    +194            this = f" {this}" if this else ""
    +195            return f"BEGIN{this} TRANSACTION"
     
    @@ -916,6 +889,7 @@ Default: True
    notnullcolumnconstraint_sql
    primarykeycolumnconstraint_sql
    uniquecolumnconstraint_sql
    +
    createable_sql
    create_sql
    clone_sql
    describe_sql
    @@ -998,10 +972,12 @@ Default: True
    ordered_sql
    matchrecognize_sql
    query_modifiers
    +
    offset_limit_modifiers
    after_having_modifiers
    after_limit_modifiers
    select_sql
    schema_sql
    +
    schema_columns_sql
    star_sql
    parameter_sql
    sessionparameter_sql
    @@ -1026,7 +1002,7 @@ Default: True
    nextvaluefor_sql
    extract_sql
    trim_sql
    -
    concat_sql
    +
    safeconcat_sql
    check_sql
    foreignkey_sql
    primarykey_sql
    @@ -1075,6 +1051,7 @@ Default: True
    respectnulls_sql
    intdiv_sql
    dpipe_sql
    +
    safedpipe_sql
    div_sql
    overlaps_sql
    distance_sql
    @@ -1123,6 +1100,7 @@ Default: True
    dictproperty_sql
    dictrange_sql
    dictsubproperty_sql
    +
    oncluster_sql
    diff --git a/docs/sqlglot/dialects/starrocks.html b/docs/sqlglot/dialects/starrocks.html index 40b4e05..a6c391d 100644 --- a/docs/sqlglot/dialects/starrocks.html +++ b/docs/sqlglot/dialects/starrocks.html @@ -214,27 +214,19 @@
    -

    Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces -a parsed syntax tree.

    +

    Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

    Arguments:
      -
    • error_level: the desired error level. +
    • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
    • -
    • error_message_context: determines the amount of context to capture from a +
    • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). -Default: 50.
    • -
    • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. -Default: 0
    • -
    • alias_post_tablesample: If the table alias comes after tablesample. -Default: False
    • +Default: 100
    • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
    • -
    • null_ordering: Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    @@ -295,49 +287,34 @@ Default: "nulls_are_small"
    -

    Generator interprets the given syntax tree and produces a SQL string as an output.

    +

    Generator converts a given syntax tree to the corresponding SQL string.

    Arguments:
      -
    • time_mapping (dict): the dictionary of custom time mappings in which the key -represents a python time format and the output the target time format
    • -
    • time_trie (trie): a trie of the time_mapping keys
    • -
    • pretty (bool): if set to True the returned string will be formatted. Default: False.
    • -
    • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
    • -
    • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
    • -
    • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
    • -
    • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
    • -
    • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
    • -
    • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
    • -
    • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
    • -
    • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
    • -
    • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
    • -
    • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
    • -
    • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
    • -
    • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
    • -
    • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
    • -
    • normalize (bool): if set to True all identifiers will lower cased
    • -
    • string_escape (str): specifies a string escape character. Default: '.
    • -
    • identifier_escape (str): specifies an identifier escape character. Default: ".
    • -
    • pad (int): determines padding in a formatted string. Default: 2.
    • -
    • indent (int): determines the size of indentation in a formatted string. Default: 4.
    • -
    • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
    • -
    • normalize_functions (str): normalize function names, "upper", "lower", or None -Default: "upper"
    • -
    • alias_post_tablesample (bool): if the table alias comes after tablesample -Default: False
    • -
    • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit -Default: False
    • -
    • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters -unsupported expressions. Default ErrorLevel.WARN.
    • -
    • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    • -
    • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +
    • pretty: Whether or not to format the produced SQL string. +Default: False.
    • +
    • identify: Determines when an identifier should be quoted. Possible values are: +False (default): Never quote, except in cases where it's mandatory by the dialect. +True or 'always': Always quote. +'safe': Only quote identifiers that are case insensitive.
    • +
    • normalize: Whether or not to normalize identifiers to lowercase. +Default: False.
    • +
    • pad: Determines the pad size in a formatted string. +Default: 2.
    • +
    • indent: Determines the indentation size in a formatted string. +Default: 2.
    • +
    • normalize_functions: Whether or not to normalize all function names. Possible values are: +"upper" or True (default): Convert names to uppercase. +"lower": Convert names to lowercase. +False: Disables function name normalization.
    • +
    • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. +Default ErrorLevel.WARN.
    • +
    • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
    • -
    • leading_comma (bool): if the the comma is leading or trailing in select statements +
    • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. +This is only relevant when generating in pretty mode. Default: False
    • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true @@ -378,6 +355,7 @@ Default: True
    • notnullcolumnconstraint_sql
      primarykeycolumnconstraint_sql
      uniquecolumnconstraint_sql
      +
      createable_sql
      create_sql
      clone_sql
      describe_sql
      @@ -460,10 +438,12 @@ Default: True
      ordered_sql
      matchrecognize_sql
      query_modifiers
      +
      offset_limit_modifiers
      after_having_modifiers
      after_limit_modifiers
      select_sql
      schema_sql
      +
      schema_columns_sql
      star_sql
      parameter_sql
      sessionparameter_sql
      @@ -488,7 +468,7 @@ Default: True
      nextvaluefor_sql
      extract_sql
      trim_sql
      -
      concat_sql
      +
      safeconcat_sql
      check_sql
      foreignkey_sql
      primarykey_sql
      @@ -539,6 +519,7 @@ Default: True
      respectnulls_sql
      intdiv_sql
      dpipe_sql
      +
      safedpipe_sql
      div_sql
      overlaps_sql
      distance_sql
      @@ -587,6 +568,7 @@ Default: True
      dictproperty_sql
      dictrange_sql
      dictsubproperty_sql
      +
      oncluster_sql
    sqlglot.dialects.mysql.MySQL.Generator
    diff --git a/docs/sqlglot/dialects/tableau.html b/docs/sqlglot/dialects/tableau.html index 0e99348..b30f96d 100644 --- a/docs/sqlglot/dialects/tableau.html +++ b/docs/sqlglot/dialects/tableau.html @@ -39,9 +39,6 @@
  • if_sql
  • -
  • - coalesce_sql -
  • count_sql
  • @@ -83,7 +80,7 @@
     1from __future__ import annotations
      2
      3from sqlglot import exp, generator, parser, transforms
    - 4from sqlglot.dialects.dialect import Dialect
    + 4from sqlglot.dialects.dialect import Dialect, rename_func
      5
      6
      7class Tableau(Dialect):
    @@ -93,34 +90,32 @@
     11
     12        TRANSFORMS = {
     13            **generator.Generator.TRANSFORMS,
    -14            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    -15        }
    -16
    -17        PROPERTIES_LOCATION = {
    -18            **generator.Generator.PROPERTIES_LOCATION,
    -19            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    -20        }
    -21
    -22        def if_sql(self, expression: exp.If) -> str:
    -23            this = self.sql(expression, "this")
    -24            true = self.sql(expression, "true")
    -25            false = self.sql(expression, "false")
    -26            return f"IF {this} THEN {true} ELSE {false} END"
    -27
    -28        def coalesce_sql(self, expression: exp.Coalesce) -> str:
    -29            return f"IFNULL({self.sql(expression, 'this')}, {self.expressions(expression)})"
    -30
    -31        def count_sql(self, expression: exp.Count) -> str:
    -32            this = expression.this
    -33            if isinstance(this, exp.Distinct):
    -34                return f"COUNTD({self.expressions(this, flat=True)})"
    -35            return f"COUNT({self.sql(expression, 'this')})"
    -36
    -37    class Parser(parser.Parser):
    -38        FUNCTIONS = {
    -39            **parser.Parser.FUNCTIONS,
    -40            "COUNTD": lambda args: exp.Count(this=exp.Distinct(expressions=args)),
    -41        }
    +14            exp.Coalesce: rename_func("IFNULL"),
    +15            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    +16        }
    +17
    +18        PROPERTIES_LOCATION = {
    +19            **generator.Generator.PROPERTIES_LOCATION,
    +20            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +21        }
    +22
    +23        def if_sql(self, expression: exp.If) -> str:
    +24            this = self.sql(expression, "this")
    +25            true = self.sql(expression, "true")
    +26            false = self.sql(expression, "false")
    +27            return f"IF {this} THEN {true} ELSE {false} END"
    +28
    +29        def count_sql(self, expression: exp.Count) -> str:
    +30            this = expression.this
    +31            if isinstance(this, exp.Distinct):
    +32                return f"COUNTD({self.expressions(this, flat=True)})"
    +33            return f"COUNT({self.sql(expression, 'this')})"
    +34
    +35    class Parser(parser.Parser):
    +36        FUNCTIONS = {
    +37            **parser.Parser.FUNCTIONS,
    +38            "COUNTD": lambda args: exp.Count(this=exp.Distinct(expressions=args)),
    +39        }
     
    @@ -143,34 +138,32 @@ 12 13 TRANSFORMS = { 14 **generator.Generator.TRANSFORMS, -15 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), -16 } -17 -18 PROPERTIES_LOCATION = { -19 **generator.Generator.PROPERTIES_LOCATION, -20 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, -21 } -22 -23 def if_sql(self, expression: exp.If) -> str: -24 this = self.sql(expression, "this") -25 true = self.sql(expression, "true") -26 false = self.sql(expression, "false") -27 return f"IF {this} THEN {true} ELSE {false} END" -28 -29 def coalesce_sql(self, expression: exp.Coalesce) -> str: -30 return f"IFNULL({self.sql(expression, 'this')}, {self.expressions(expression)})" -31 -32 def count_sql(self, expression: exp.Count) -> str: -33 this = expression.this -34 if isinstance(this, exp.Distinct): -35 return f"COUNTD({self.expressions(this, flat=True)})" -36 return f"COUNT({self.sql(expression, 'this')})" -37 -38 class Parser(parser.Parser): -39 FUNCTIONS = { -40 **parser.Parser.FUNCTIONS, -41 "COUNTD": lambda args: exp.Count(this=exp.Distinct(expressions=args)), -42 } +15 exp.Coalesce: rename_func("IFNULL"), +16 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), +17 } +18 +19 PROPERTIES_LOCATION = { +20 **generator.Generator.PROPERTIES_LOCATION, +21 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, +22 } +23 +24 def if_sql(self, expression: exp.If) -> str: +25 this = self.sql(expression, "this") +26 true = self.sql(expression, "true") +27 false = self.sql(expression, "false") +28 return f"IF {this} THEN {true} ELSE {false} END" +29 +30 def count_sql(self, expression: exp.Count) -> str: +31 this = expression.this +32 if isinstance(this, exp.Distinct): +33 return f"COUNTD({self.expressions(this, flat=True)})" +34 return f"COUNT({self.sql(expression, 'this')})" +35 +36 class Parser(parser.Parser): +37 FUNCTIONS = { +38 **parser.Parser.FUNCTIONS, +39 "COUNTD": lambda args: exp.Count(this=exp.Distinct(expressions=args)), +40 }
    @@ -211,74 +204,57 @@ 12 13 TRANSFORMS = { 14 **generator.Generator.TRANSFORMS, -15 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), -16 } -17 -18 PROPERTIES_LOCATION = { -19 **generator.Generator.PROPERTIES_LOCATION, -20 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, -21 } -22 -23 def if_sql(self, expression: exp.If) -> str: -24 this = self.sql(expression, "this") -25 true = self.sql(expression, "true") -26 false = self.sql(expression, "false") -27 return f"IF {this} THEN {true} ELSE {false} END" -28 -29 def coalesce_sql(self, expression: exp.Coalesce) -> str: -30 return f"IFNULL({self.sql(expression, 'this')}, {self.expressions(expression)})" -31 -32 def count_sql(self, expression: exp.Count) -> str: -33 this = expression.this -34 if isinstance(this, exp.Distinct): -35 return f"COUNTD({self.expressions(this, flat=True)})" -36 return f"COUNT({self.sql(expression, 'this')})" +15 exp.Coalesce: rename_func("IFNULL"), +16 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), +17 } +18 +19 PROPERTIES_LOCATION = { +20 **generator.Generator.PROPERTIES_LOCATION, +21 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, +22 } +23 +24 def if_sql(self, expression: exp.If) -> str: +25 this = self.sql(expression, "this") +26 true = self.sql(expression, "true") +27 false = self.sql(expression, "false") +28 return f"IF {this} THEN {true} ELSE {false} END" +29 +30 def count_sql(self, expression: exp.Count) -> str: +31 this = expression.this +32 if isinstance(this, exp.Distinct): +33 return f"COUNTD({self.expressions(this, flat=True)})" +34 return f"COUNT({self.sql(expression, 'this')})"
    -

    Generator interprets the given syntax tree and produces a SQL string as an output.

    +

    Generator converts a given syntax tree to the corresponding SQL string.

    Arguments:
      -
    • time_mapping (dict): the dictionary of custom time mappings in which the key -represents a python time format and the output the target time format
    • -
    • time_trie (trie): a trie of the time_mapping keys
    • -
    • pretty (bool): if set to True the returned string will be formatted. Default: False.
    • -
    • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
    • -
    • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
    • -
    • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
    • -
    • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
    • -
    • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
    • -
    • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
    • -
    • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
    • -
    • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
    • -
    • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
    • -
    • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
    • -
    • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
    • -
    • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
    • -
    • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
    • -
    • normalize (bool): if set to True all identifiers will lower cased
    • -
    • string_escape (str): specifies a string escape character. Default: '.
    • -
    • identifier_escape (str): specifies an identifier escape character. Default: ".
    • -
    • pad (int): determines padding in a formatted string. Default: 2.
    • -
    • indent (int): determines the size of indentation in a formatted string. Default: 4.
    • -
    • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
    • -
    • normalize_functions (str): normalize function names, "upper", "lower", or None -Default: "upper"
    • -
    • alias_post_tablesample (bool): if the table alias comes after tablesample -Default: False
    • -
    • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit -Default: False
    • -
    • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters -unsupported expressions. Default ErrorLevel.WARN.
    • -
    • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    • -
    • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +
    • pretty: Whether or not to format the produced SQL string. +Default: False.
    • +
    • identify: Determines when an identifier should be quoted. Possible values are: +False (default): Never quote, except in cases where it's mandatory by the dialect. +True or 'always': Always quote. +'safe': Only quote identifiers that are case insensitive.
    • +
    • normalize: Whether or not to normalize identifiers to lowercase. +Default: False.
    • +
    • pad: Determines the pad size in a formatted string. +Default: 2.
    • +
    • indent: Determines the indentation size in a formatted string. +Default: 2.
    • +
    • normalize_functions: Whether or not to normalize all function names. Possible values are: +"upper" or True (default): Convert names to uppercase. +"lower": Convert names to lowercase. +False: Disables function name normalization.
    • +
    • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. +Default ErrorLevel.WARN.
    • +
    • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
    • -
    • leading_comma (bool): if the the comma is leading or trailing in select statements +
    • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. +This is only relevant when generating in pretty mode. Default: False
    • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true @@ -301,30 +277,11 @@ Default: True
    -
    23        def if_sql(self, expression: exp.If) -> str:
    -24            this = self.sql(expression, "this")
    -25            true = self.sql(expression, "true")
    -26            false = self.sql(expression, "false")
    -27            return f"IF {this} THEN {true} ELSE {false} END"
    -
    - - - - -
    -
    - -
    - - def - coalesce_sql(self, expression: sqlglot.expressions.Coalesce) -> str: - - - -
    - -
    29        def coalesce_sql(self, expression: exp.Coalesce) -> str:
    -30            return f"IFNULL({self.sql(expression, 'this')}, {self.expressions(expression)})"
    +            
    24        def if_sql(self, expression: exp.If) -> str:
    +25            this = self.sql(expression, "this")
    +26            true = self.sql(expression, "true")
    +27            false = self.sql(expression, "false")
    +28            return f"IF {this} THEN {true} ELSE {false} END"
     
    @@ -342,11 +299,11 @@ Default: True
    -
    32        def count_sql(self, expression: exp.Count) -> str:
    -33            this = expression.this
    -34            if isinstance(this, exp.Distinct):
    -35                return f"COUNTD({self.expressions(this, flat=True)})"
    -36            return f"COUNT({self.sql(expression, 'this')})"
    +            
    30        def count_sql(self, expression: exp.Count) -> str:
    +31            this = expression.this
    +32            if isinstance(this, exp.Distinct):
    +33                return f"COUNTD({self.expressions(this, flat=True)})"
    +34            return f"COUNT({self.sql(expression, 'this')})"
     
    @@ -382,6 +339,7 @@ Default: True
    notnullcolumnconstraint_sql
    primarykeycolumnconstraint_sql
    uniquecolumnconstraint_sql
    +
    createable_sql
    create_sql
    clone_sql
    describe_sql
    @@ -464,10 +422,12 @@ Default: True
    ordered_sql
    matchrecognize_sql
    query_modifiers
    +
    offset_limit_modifiers
    after_having_modifiers
    after_limit_modifiers
    select_sql
    schema_sql
    +
    schema_columns_sql
    star_sql
    parameter_sql
    sessionparameter_sql
    @@ -492,7 +452,7 @@ Default: True
    nextvaluefor_sql
    extract_sql
    trim_sql
    -
    concat_sql
    +
    safeconcat_sql
    check_sql
    foreignkey_sql
    primarykey_sql
    @@ -542,6 +502,7 @@ Default: True
    respectnulls_sql
    intdiv_sql
    dpipe_sql
    +
    safedpipe_sql
    div_sql
    overlaps_sql
    distance_sql
    @@ -590,6 +551,7 @@ Default: True
    dictproperty_sql
    dictrange_sql
    dictsubproperty_sql
    +
    oncluster_sql
    @@ -606,35 +568,27 @@ Default: True
    -
    38    class Parser(parser.Parser):
    -39        FUNCTIONS = {
    -40            **parser.Parser.FUNCTIONS,
    -41            "COUNTD": lambda args: exp.Count(this=exp.Distinct(expressions=args)),
    -42        }
    +            
    36    class Parser(parser.Parser):
    +37        FUNCTIONS = {
    +38            **parser.Parser.FUNCTIONS,
    +39            "COUNTD": lambda args: exp.Count(this=exp.Distinct(expressions=args)),
    +40        }
     
    -

    Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces -a parsed syntax tree.

    +

    Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

    Arguments:
      -
    • error_level: the desired error level. +
    • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
    • -
    • error_message_context: determines the amount of context to capture from a +
    • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). -Default: 50.
    • -
    • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. -Default: 0
    • -
    • alias_post_tablesample: If the table alias comes after tablesample. -Default: False
    • +Default: 100
    • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
    • -
    • null_ordering: Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    diff --git a/docs/sqlglot/dialects/teradata.html b/docs/sqlglot/dialects/teradata.html index e4f7333..3d0f933 100644 --- a/docs/sqlglot/dialects/teradata.html +++ b/docs/sqlglot/dialects/teradata.html @@ -63,6 +63,9 @@
  • rangen_sql
  • +
  • + createable_sql +
  • @@ -94,189 +97,212 @@
      1from __future__ import annotations
       2
    -  3import typing as t
    -  4
    -  5from sqlglot import exp, generator, parser, tokens, transforms
    -  6from sqlglot.dialects.dialect import (
    -  7    Dialect,
    -  8    format_time_lambda,
    -  9    max_or_greatest,
    - 10    min_or_least,
    - 11)
    - 12from sqlglot.tokens import TokenType
    - 13
    - 14
    - 15class Teradata(Dialect):
    - 16    class Tokenizer(tokens.Tokenizer):
    - 17        # https://docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Comparison-Operators-and-Functions/Comparison-Operators/ANSI-Compliance
    - 18        KEYWORDS = {
    - 19            **tokens.Tokenizer.KEYWORDS,
    - 20            "BYTEINT": TokenType.SMALLINT,
    - 21            "SEL": TokenType.SELECT,
    - 22            "INS": TokenType.INSERT,
    - 23            "MOD": TokenType.MOD,
    - 24            "LT": TokenType.LT,
    - 25            "LE": TokenType.LTE,
    - 26            "GT": TokenType.GT,
    - 27            "GE": TokenType.GTE,
    - 28            "^=": TokenType.NEQ,
    - 29            "NE": TokenType.NEQ,
    - 30            "NOT=": TokenType.NEQ,
    - 31            "ST_GEOMETRY": TokenType.GEOMETRY,
    - 32        }
    - 33
    - 34        # teradata does not support % for modulus
    - 35        SINGLE_TOKENS = {**tokens.Tokenizer.SINGLE_TOKENS}
    - 36        SINGLE_TOKENS.pop("%")
    - 37
    - 38    class Parser(parser.Parser):
    - 39        CHARSET_TRANSLATORS = {
    - 40            "GRAPHIC_TO_KANJISJIS",
    - 41            "GRAPHIC_TO_LATIN",
    - 42            "GRAPHIC_TO_UNICODE",
    - 43            "GRAPHIC_TO_UNICODE_PadSpace",
    - 44            "KANJI1_KanjiEBCDIC_TO_UNICODE",
    - 45            "KANJI1_KanjiEUC_TO_UNICODE",
    - 46            "KANJI1_KANJISJIS_TO_UNICODE",
    - 47            "KANJI1_SBC_TO_UNICODE",
    - 48            "KANJISJIS_TO_GRAPHIC",
    - 49            "KANJISJIS_TO_LATIN",
    - 50            "KANJISJIS_TO_UNICODE",
    - 51            "LATIN_TO_GRAPHIC",
    - 52            "LATIN_TO_KANJISJIS",
    - 53            "LATIN_TO_UNICODE",
    - 54            "LOCALE_TO_UNICODE",
    - 55            "UNICODE_TO_GRAPHIC",
    - 56            "UNICODE_TO_GRAPHIC_PadGraphic",
    - 57            "UNICODE_TO_GRAPHIC_VarGraphic",
    - 58            "UNICODE_TO_KANJI1_KanjiEBCDIC",
    - 59            "UNICODE_TO_KANJI1_KanjiEUC",
    - 60            "UNICODE_TO_KANJI1_KANJISJIS",
    - 61            "UNICODE_TO_KANJI1_SBC",
    - 62            "UNICODE_TO_KANJISJIS",
    - 63            "UNICODE_TO_LATIN",
    - 64            "UNICODE_TO_LOCALE",
    - 65            "UNICODE_TO_UNICODE_FoldSpace",
    - 66            "UNICODE_TO_UNICODE_Fullwidth",
    - 67            "UNICODE_TO_UNICODE_Halfwidth",
    - 68            "UNICODE_TO_UNICODE_NFC",
    - 69            "UNICODE_TO_UNICODE_NFD",
    - 70            "UNICODE_TO_UNICODE_NFKC",
    - 71            "UNICODE_TO_UNICODE_NFKD",
    - 72        }
    - 73
    - 74        FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS}
    - 75        FUNC_TOKENS.remove(TokenType.REPLACE)
    - 76
    - 77        STATEMENT_PARSERS = {
    - 78            **parser.Parser.STATEMENT_PARSERS,
    - 79            TokenType.REPLACE: lambda self: self._parse_create(),
    - 80        }
    - 81
    - 82        FUNCTION_PARSERS = {
    - 83            **parser.Parser.FUNCTION_PARSERS,
    - 84            "RANGE_N": lambda self: self._parse_rangen(),
    - 85            "TRANSLATE": lambda self: self._parse_translate(self.STRICT_CAST),
    +  3from sqlglot import exp, generator, parser, tokens, transforms
    +  4from sqlglot.dialects.dialect import Dialect, max_or_greatest, min_or_least
    +  5from sqlglot.tokens import TokenType
    +  6
    +  7
    +  8class Teradata(Dialect):
    +  9    TIME_MAPPING = {
    + 10        "Y": "%Y",
    + 11        "YYYY": "%Y",
    + 12        "YY": "%y",
    + 13        "MMMM": "%B",
    + 14        "MMM": "%b",
    + 15        "DD": "%d",
    + 16        "D": "%-d",
    + 17        "HH": "%H",
    + 18        "H": "%-H",
    + 19        "MM": "%M",
    + 20        "M": "%-M",
    + 21        "SS": "%S",
    + 22        "S": "%-S",
    + 23        "SSSSSS": "%f",
    + 24        "E": "%a",
    + 25        "EE": "%a",
    + 26        "EEE": "%a",
    + 27        "EEEE": "%A",
    + 28    }
    + 29
    + 30    class Tokenizer(tokens.Tokenizer):
    + 31        # https://docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Comparison-Operators-and-Functions/Comparison-Operators/ANSI-Compliance
    + 32        KEYWORDS = {
    + 33            **tokens.Tokenizer.KEYWORDS,
    + 34            "BYTEINT": TokenType.SMALLINT,
    + 35            "SEL": TokenType.SELECT,
    + 36            "INS": TokenType.INSERT,
    + 37            "MOD": TokenType.MOD,
    + 38            "LT": TokenType.LT,
    + 39            "LE": TokenType.LTE,
    + 40            "GT": TokenType.GT,
    + 41            "GE": TokenType.GTE,
    + 42            "^=": TokenType.NEQ,
    + 43            "NE": TokenType.NEQ,
    + 44            "NOT=": TokenType.NEQ,
    + 45            "ST_GEOMETRY": TokenType.GEOMETRY,
    + 46        }
    + 47
    + 48        # Teradata does not support % as a modulo operator
    + 49        SINGLE_TOKENS = {**tokens.Tokenizer.SINGLE_TOKENS}
    + 50        SINGLE_TOKENS.pop("%")
    + 51
    + 52    class Parser(parser.Parser):
    + 53        CHARSET_TRANSLATORS = {
    + 54            "GRAPHIC_TO_KANJISJIS",
    + 55            "GRAPHIC_TO_LATIN",
    + 56            "GRAPHIC_TO_UNICODE",
    + 57            "GRAPHIC_TO_UNICODE_PadSpace",
    + 58            "KANJI1_KanjiEBCDIC_TO_UNICODE",
    + 59            "KANJI1_KanjiEUC_TO_UNICODE",
    + 60            "KANJI1_KANJISJIS_TO_UNICODE",
    + 61            "KANJI1_SBC_TO_UNICODE",
    + 62            "KANJISJIS_TO_GRAPHIC",
    + 63            "KANJISJIS_TO_LATIN",
    + 64            "KANJISJIS_TO_UNICODE",
    + 65            "LATIN_TO_GRAPHIC",
    + 66            "LATIN_TO_KANJISJIS",
    + 67            "LATIN_TO_UNICODE",
    + 68            "LOCALE_TO_UNICODE",
    + 69            "UNICODE_TO_GRAPHIC",
    + 70            "UNICODE_TO_GRAPHIC_PadGraphic",
    + 71            "UNICODE_TO_GRAPHIC_VarGraphic",
    + 72            "UNICODE_TO_KANJI1_KanjiEBCDIC",
    + 73            "UNICODE_TO_KANJI1_KanjiEUC",
    + 74            "UNICODE_TO_KANJI1_KANJISJIS",
    + 75            "UNICODE_TO_KANJI1_SBC",
    + 76            "UNICODE_TO_KANJISJIS",
    + 77            "UNICODE_TO_LATIN",
    + 78            "UNICODE_TO_LOCALE",
    + 79            "UNICODE_TO_UNICODE_FoldSpace",
    + 80            "UNICODE_TO_UNICODE_Fullwidth",
    + 81            "UNICODE_TO_UNICODE_Halfwidth",
    + 82            "UNICODE_TO_UNICODE_NFC",
    + 83            "UNICODE_TO_UNICODE_NFD",
    + 84            "UNICODE_TO_UNICODE_NFKC",
    + 85            "UNICODE_TO_UNICODE_NFKD",
      86        }
      87
    - 88        def _parse_translate(self, strict: bool) -> exp.Expression:
    - 89            this = self._parse_conjunction()
    + 88        FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS}
    + 89        FUNC_TOKENS.remove(TokenType.REPLACE)
      90
    - 91            if not self._match(TokenType.USING):
    - 92                self.raise_error("Expected USING in TRANSLATE")
    - 93
    - 94            if self._match_texts(self.CHARSET_TRANSLATORS):
    - 95                charset_split = self._prev.text.split("_TO_")
    - 96                to = self.expression(exp.CharacterSet, this=charset_split[1])
    - 97            else:
    - 98                self.raise_error("Expected a character set translator after USING in TRANSLATE")
    - 99
    -100            return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
    + 91        STATEMENT_PARSERS = {
    + 92            **parser.Parser.STATEMENT_PARSERS,
    + 93            TokenType.REPLACE: lambda self: self._parse_create(),
    + 94        }
    + 95
    + 96        FUNCTION_PARSERS = {
    + 97            **parser.Parser.FUNCTION_PARSERS,
    + 98            "RANGE_N": lambda self: self._parse_rangen(),
    + 99            "TRANSLATE": lambda self: self._parse_translate(self.STRICT_CAST),
    +100        }
     101
    -102        # FROM before SET in Teradata UPDATE syntax
    -103        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
    -104        def _parse_update(self) -> exp.Expression:
    -105            return self.expression(
    -106                exp.Update,
    -107                **{  # type: ignore
    -108                    "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS),
    -109                    "from": self._parse_from(modifiers=True),
    -110                    "expressions": self._match(TokenType.SET)
    -111                    and self._parse_csv(self._parse_equality),
    -112                    "where": self._parse_where(),
    -113                },
    -114            )
    +102        def _parse_translate(self, strict: bool) -> exp.Expression:
    +103            this = self._parse_conjunction()
    +104
    +105            if not self._match(TokenType.USING):
    +106                self.raise_error("Expected USING in TRANSLATE")
    +107
    +108            if self._match_texts(self.CHARSET_TRANSLATORS):
    +109                charset_split = self._prev.text.split("_TO_")
    +110                to = self.expression(exp.CharacterSet, this=charset_split[1])
    +111            else:
    +112                self.raise_error("Expected a character set translator after USING in TRANSLATE")
    +113
    +114            return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
     115
    -116        def _parse_rangen(self):
    -117            this = self._parse_id_var()
    -118            self._match(TokenType.BETWEEN)
    -119
    -120            expressions = self._parse_csv(self._parse_conjunction)
    -121            each = self._match_text_seq("EACH") and self._parse_conjunction()
    -122
    -123            return self.expression(exp.RangeN, this=this, expressions=expressions, each=each)
    -124
    -125        def _parse_cast(self, strict: bool) -> exp.Expression:
    -126            cast = t.cast(exp.Cast, super()._parse_cast(strict))
    -127            if cast.to.this == exp.DataType.Type.DATE and self._match(TokenType.FORMAT):
    -128                return format_time_lambda(exp.TimeToStr, "teradata")(
    -129                    [cast.this, self._parse_string()]
    -130                )
    -131            return cast
    -132
    -133    class Generator(generator.Generator):
    -134        JOIN_HINTS = False
    -135        TABLE_HINTS = False
    +116        # FROM before SET in Teradata UPDATE syntax
    +117        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
    +118        def _parse_update(self) -> exp.Update:
    +119            return self.expression(
    +120                exp.Update,
    +121                **{  # type: ignore
    +122                    "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS),
    +123                    "from": self._parse_from(modifiers=True),
    +124                    "expressions": self._match(TokenType.SET)
    +125                    and self._parse_csv(self._parse_equality),
    +126                    "where": self._parse_where(),
    +127                },
    +128            )
    +129
    +130        def _parse_rangen(self):
    +131            this = self._parse_id_var()
    +132            self._match(TokenType.BETWEEN)
    +133
    +134            expressions = self._parse_csv(self._parse_conjunction)
    +135            each = self._match_text_seq("EACH") and self._parse_conjunction()
     136
    -137        TYPE_MAPPING = {
    -138            **generator.Generator.TYPE_MAPPING,
    -139            exp.DataType.Type.GEOMETRY: "ST_GEOMETRY",
    -140        }
    -141
    -142        PROPERTIES_LOCATION = {
    -143            **generator.Generator.PROPERTIES_LOCATION,
    -144            exp.OnCommitProperty: exp.Properties.Location.POST_INDEX,
    -145            exp.PartitionedByProperty: exp.Properties.Location.POST_EXPRESSION,
    -146            exp.StabilityProperty: exp.Properties.Location.POST_CREATE,
    -147        }
    -148
    -149        TRANSFORMS = {
    -150            **generator.Generator.TRANSFORMS,
    -151            exp.Max: max_or_greatest,
    -152            exp.Min: min_or_least,
    -153            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    -154            exp.TimeToStr: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})",
    -155            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    -156        }
    -157
    -158        def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
    -159            return f"PARTITION BY {self.sql(expression, 'this')}"
    -160
    -161        # FROM before SET in Teradata UPDATE syntax
    -162        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
    -163        def update_sql(self, expression: exp.Update) -> str:
    -164            this = self.sql(expression, "this")
    -165            from_sql = self.sql(expression, "from")
    -166            set_sql = self.expressions(expression, flat=True)
    -167            where_sql = self.sql(expression, "where")
    -168            sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
    -169            return self.prepend_ctes(expression, sql)
    -170
    -171        def mod_sql(self, expression: exp.Mod) -> str:
    -172            return self.binary(expression, "MOD")
    -173
    -174        def datatype_sql(self, expression: exp.DataType) -> str:
    -175            type_sql = super().datatype_sql(expression)
    -176            prefix_sql = expression.args.get("prefix")
    -177            return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql
    -178
    -179        def rangen_sql(self, expression: exp.RangeN) -> str:
    -180            this = self.sql(expression, "this")
    -181            expressions_sql = self.expressions(expression)
    -182            each_sql = self.sql(expression, "each")
    -183            each_sql = f" EACH {each_sql}" if each_sql else ""
    +137            return self.expression(exp.RangeN, this=this, expressions=expressions, each=each)
    +138
    +139    class Generator(generator.Generator):
    +140        JOIN_HINTS = False
    +141        TABLE_HINTS = False
    +142
    +143        TYPE_MAPPING = {
    +144            **generator.Generator.TYPE_MAPPING,
    +145            exp.DataType.Type.GEOMETRY: "ST_GEOMETRY",
    +146        }
    +147
    +148        PROPERTIES_LOCATION = {
    +149            **generator.Generator.PROPERTIES_LOCATION,
    +150            exp.OnCommitProperty: exp.Properties.Location.POST_INDEX,
    +151            exp.PartitionedByProperty: exp.Properties.Location.POST_EXPRESSION,
    +152            exp.StabilityProperty: exp.Properties.Location.POST_CREATE,
    +153        }
    +154
    +155        TRANSFORMS = {
    +156            **generator.Generator.TRANSFORMS,
    +157            exp.Max: max_or_greatest,
    +158            exp.Min: min_or_least,
    +159            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    +160            exp.StrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})",
    +161            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    +162        }
    +163
    +164        def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
    +165            return f"PARTITION BY {self.sql(expression, 'this')}"
    +166
    +167        # FROM before SET in Teradata UPDATE syntax
    +168        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
    +169        def update_sql(self, expression: exp.Update) -> str:
    +170            this = self.sql(expression, "this")
    +171            from_sql = self.sql(expression, "from")
    +172            set_sql = self.expressions(expression, flat=True)
    +173            where_sql = self.sql(expression, "where")
    +174            sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
    +175            return self.prepend_ctes(expression, sql)
    +176
    +177        def mod_sql(self, expression: exp.Mod) -> str:
    +178            return self.binary(expression, "MOD")
    +179
    +180        def datatype_sql(self, expression: exp.DataType) -> str:
    +181            type_sql = super().datatype_sql(expression)
    +182            prefix_sql = expression.args.get("prefix")
    +183            return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql
     184
    -185            return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})"
    +185        def rangen_sql(self, expression: exp.RangeN) -> str:
    +186            this = self.sql(expression, "this")
    +187            expressions_sql = self.expressions(expression)
    +188            each_sql = self.sql(expression, "each")
    +189            each_sql = f" EACH {each_sql}" if each_sql else ""
    +190
    +191            return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})"
    +192
    +193        def createable_sql(
    +194            self,
    +195            expression: exp.Create,
    +196            locations: dict[exp.Properties.Location, list[exp.Property]],
    +197        ) -> str:
    +198            kind = self.sql(expression, "kind").upper()
    +199            if kind == "TABLE" and locations.get(exp.Properties.Location.POST_NAME):
    +200                this_name = self.sql(expression.this, "this")
    +201                this_properties = self.properties(
    +202                    exp.Properties(expressions=locations[exp.Properties.Location.POST_NAME]),
    +203                    wrapped=False,
    +204                    prefix=",",
    +205                )
    +206                this_schema = self.schema_columns_sql(expression.this)
    +207                return f"{this_name}{this_properties}{self.sep()}{this_schema}"
    +208            return super().createable_sql(expression, locations)
     
    @@ -292,177 +318,207 @@
    -
     16class Teradata(Dialect):
    - 17    class Tokenizer(tokens.Tokenizer):
    - 18        # https://docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Comparison-Operators-and-Functions/Comparison-Operators/ANSI-Compliance
    - 19        KEYWORDS = {
    - 20            **tokens.Tokenizer.KEYWORDS,
    - 21            "BYTEINT": TokenType.SMALLINT,
    - 22            "SEL": TokenType.SELECT,
    - 23            "INS": TokenType.INSERT,
    - 24            "MOD": TokenType.MOD,
    - 25            "LT": TokenType.LT,
    - 26            "LE": TokenType.LTE,
    - 27            "GT": TokenType.GT,
    - 28            "GE": TokenType.GTE,
    - 29            "^=": TokenType.NEQ,
    - 30            "NE": TokenType.NEQ,
    - 31            "NOT=": TokenType.NEQ,
    - 32            "ST_GEOMETRY": TokenType.GEOMETRY,
    - 33        }
    - 34
    - 35        # teradata does not support % for modulus
    - 36        SINGLE_TOKENS = {**tokens.Tokenizer.SINGLE_TOKENS}
    - 37        SINGLE_TOKENS.pop("%")
    - 38
    - 39    class Parser(parser.Parser):
    - 40        CHARSET_TRANSLATORS = {
    - 41            "GRAPHIC_TO_KANJISJIS",
    - 42            "GRAPHIC_TO_LATIN",
    - 43            "GRAPHIC_TO_UNICODE",
    - 44            "GRAPHIC_TO_UNICODE_PadSpace",
    - 45            "KANJI1_KanjiEBCDIC_TO_UNICODE",
    - 46            "KANJI1_KanjiEUC_TO_UNICODE",
    - 47            "KANJI1_KANJISJIS_TO_UNICODE",
    - 48            "KANJI1_SBC_TO_UNICODE",
    - 49            "KANJISJIS_TO_GRAPHIC",
    - 50            "KANJISJIS_TO_LATIN",
    - 51            "KANJISJIS_TO_UNICODE",
    - 52            "LATIN_TO_GRAPHIC",
    - 53            "LATIN_TO_KANJISJIS",
    - 54            "LATIN_TO_UNICODE",
    - 55            "LOCALE_TO_UNICODE",
    - 56            "UNICODE_TO_GRAPHIC",
    - 57            "UNICODE_TO_GRAPHIC_PadGraphic",
    - 58            "UNICODE_TO_GRAPHIC_VarGraphic",
    - 59            "UNICODE_TO_KANJI1_KanjiEBCDIC",
    - 60            "UNICODE_TO_KANJI1_KanjiEUC",
    - 61            "UNICODE_TO_KANJI1_KANJISJIS",
    - 62            "UNICODE_TO_KANJI1_SBC",
    - 63            "UNICODE_TO_KANJISJIS",
    - 64            "UNICODE_TO_LATIN",
    - 65            "UNICODE_TO_LOCALE",
    - 66            "UNICODE_TO_UNICODE_FoldSpace",
    - 67            "UNICODE_TO_UNICODE_Fullwidth",
    - 68            "UNICODE_TO_UNICODE_Halfwidth",
    - 69            "UNICODE_TO_UNICODE_NFC",
    - 70            "UNICODE_TO_UNICODE_NFD",
    - 71            "UNICODE_TO_UNICODE_NFKC",
    - 72            "UNICODE_TO_UNICODE_NFKD",
    - 73        }
    - 74
    - 75        FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS}
    - 76        FUNC_TOKENS.remove(TokenType.REPLACE)
    - 77
    - 78        STATEMENT_PARSERS = {
    - 79            **parser.Parser.STATEMENT_PARSERS,
    - 80            TokenType.REPLACE: lambda self: self._parse_create(),
    - 81        }
    - 82
    - 83        FUNCTION_PARSERS = {
    - 84            **parser.Parser.FUNCTION_PARSERS,
    - 85            "RANGE_N": lambda self: self._parse_rangen(),
    - 86            "TRANSLATE": lambda self: self._parse_translate(self.STRICT_CAST),
    +            
      9class Teradata(Dialect):
    + 10    TIME_MAPPING = {
    + 11        "Y": "%Y",
    + 12        "YYYY": "%Y",
    + 13        "YY": "%y",
    + 14        "MMMM": "%B",
    + 15        "MMM": "%b",
    + 16        "DD": "%d",
    + 17        "D": "%-d",
    + 18        "HH": "%H",
    + 19        "H": "%-H",
    + 20        "MM": "%M",
    + 21        "M": "%-M",
    + 22        "SS": "%S",
    + 23        "S": "%-S",
    + 24        "SSSSSS": "%f",
    + 25        "E": "%a",
    + 26        "EE": "%a",
    + 27        "EEE": "%a",
    + 28        "EEEE": "%A",
    + 29    }
    + 30
    + 31    class Tokenizer(tokens.Tokenizer):
    + 32        # https://docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Comparison-Operators-and-Functions/Comparison-Operators/ANSI-Compliance
    + 33        KEYWORDS = {
    + 34            **tokens.Tokenizer.KEYWORDS,
    + 35            "BYTEINT": TokenType.SMALLINT,
    + 36            "SEL": TokenType.SELECT,
    + 37            "INS": TokenType.INSERT,
    + 38            "MOD": TokenType.MOD,
    + 39            "LT": TokenType.LT,
    + 40            "LE": TokenType.LTE,
    + 41            "GT": TokenType.GT,
    + 42            "GE": TokenType.GTE,
    + 43            "^=": TokenType.NEQ,
    + 44            "NE": TokenType.NEQ,
    + 45            "NOT=": TokenType.NEQ,
    + 46            "ST_GEOMETRY": TokenType.GEOMETRY,
    + 47        }
    + 48
    + 49        # Teradata does not support % as a modulo operator
    + 50        SINGLE_TOKENS = {**tokens.Tokenizer.SINGLE_TOKENS}
    + 51        SINGLE_TOKENS.pop("%")
    + 52
    + 53    class Parser(parser.Parser):
    + 54        CHARSET_TRANSLATORS = {
    + 55            "GRAPHIC_TO_KANJISJIS",
    + 56            "GRAPHIC_TO_LATIN",
    + 57            "GRAPHIC_TO_UNICODE",
    + 58            "GRAPHIC_TO_UNICODE_PadSpace",
    + 59            "KANJI1_KanjiEBCDIC_TO_UNICODE",
    + 60            "KANJI1_KanjiEUC_TO_UNICODE",
    + 61            "KANJI1_KANJISJIS_TO_UNICODE",
    + 62            "KANJI1_SBC_TO_UNICODE",
    + 63            "KANJISJIS_TO_GRAPHIC",
    + 64            "KANJISJIS_TO_LATIN",
    + 65            "KANJISJIS_TO_UNICODE",
    + 66            "LATIN_TO_GRAPHIC",
    + 67            "LATIN_TO_KANJISJIS",
    + 68            "LATIN_TO_UNICODE",
    + 69            "LOCALE_TO_UNICODE",
    + 70            "UNICODE_TO_GRAPHIC",
    + 71            "UNICODE_TO_GRAPHIC_PadGraphic",
    + 72            "UNICODE_TO_GRAPHIC_VarGraphic",
    + 73            "UNICODE_TO_KANJI1_KanjiEBCDIC",
    + 74            "UNICODE_TO_KANJI1_KanjiEUC",
    + 75            "UNICODE_TO_KANJI1_KANJISJIS",
    + 76            "UNICODE_TO_KANJI1_SBC",
    + 77            "UNICODE_TO_KANJISJIS",
    + 78            "UNICODE_TO_LATIN",
    + 79            "UNICODE_TO_LOCALE",
    + 80            "UNICODE_TO_UNICODE_FoldSpace",
    + 81            "UNICODE_TO_UNICODE_Fullwidth",
    + 82            "UNICODE_TO_UNICODE_Halfwidth",
    + 83            "UNICODE_TO_UNICODE_NFC",
    + 84            "UNICODE_TO_UNICODE_NFD",
    + 85            "UNICODE_TO_UNICODE_NFKC",
    + 86            "UNICODE_TO_UNICODE_NFKD",
      87        }
      88
    - 89        def _parse_translate(self, strict: bool) -> exp.Expression:
    - 90            this = self._parse_conjunction()
    + 89        FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS}
    + 90        FUNC_TOKENS.remove(TokenType.REPLACE)
      91
    - 92            if not self._match(TokenType.USING):
    - 93                self.raise_error("Expected USING in TRANSLATE")
    - 94
    - 95            if self._match_texts(self.CHARSET_TRANSLATORS):
    - 96                charset_split = self._prev.text.split("_TO_")
    - 97                to = self.expression(exp.CharacterSet, this=charset_split[1])
    - 98            else:
    - 99                self.raise_error("Expected a character set translator after USING in TRANSLATE")
    -100
    -101            return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
    + 92        STATEMENT_PARSERS = {
    + 93            **parser.Parser.STATEMENT_PARSERS,
    + 94            TokenType.REPLACE: lambda self: self._parse_create(),
    + 95        }
    + 96
    + 97        FUNCTION_PARSERS = {
    + 98            **parser.Parser.FUNCTION_PARSERS,
    + 99            "RANGE_N": lambda self: self._parse_rangen(),
    +100            "TRANSLATE": lambda self: self._parse_translate(self.STRICT_CAST),
    +101        }
     102
    -103        # FROM before SET in Teradata UPDATE syntax
    -104        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
    -105        def _parse_update(self) -> exp.Expression:
    -106            return self.expression(
    -107                exp.Update,
    -108                **{  # type: ignore
    -109                    "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS),
    -110                    "from": self._parse_from(modifiers=True),
    -111                    "expressions": self._match(TokenType.SET)
    -112                    and self._parse_csv(self._parse_equality),
    -113                    "where": self._parse_where(),
    -114                },
    -115            )
    +103        def _parse_translate(self, strict: bool) -> exp.Expression:
    +104            this = self._parse_conjunction()
    +105
    +106            if not self._match(TokenType.USING):
    +107                self.raise_error("Expected USING in TRANSLATE")
    +108
    +109            if self._match_texts(self.CHARSET_TRANSLATORS):
    +110                charset_split = self._prev.text.split("_TO_")
    +111                to = self.expression(exp.CharacterSet, this=charset_split[1])
    +112            else:
    +113                self.raise_error("Expected a character set translator after USING in TRANSLATE")
    +114
    +115            return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
     116
    -117        def _parse_rangen(self):
    -118            this = self._parse_id_var()
    -119            self._match(TokenType.BETWEEN)
    -120
    -121            expressions = self._parse_csv(self._parse_conjunction)
    -122            each = self._match_text_seq("EACH") and self._parse_conjunction()
    -123
    -124            return self.expression(exp.RangeN, this=this, expressions=expressions, each=each)
    -125
    -126        def _parse_cast(self, strict: bool) -> exp.Expression:
    -127            cast = t.cast(exp.Cast, super()._parse_cast(strict))
    -128            if cast.to.this == exp.DataType.Type.DATE and self._match(TokenType.FORMAT):
    -129                return format_time_lambda(exp.TimeToStr, "teradata")(
    -130                    [cast.this, self._parse_string()]
    -131                )
    -132            return cast
    -133
    -134    class Generator(generator.Generator):
    -135        JOIN_HINTS = False
    -136        TABLE_HINTS = False
    +117        # FROM before SET in Teradata UPDATE syntax
    +118        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
    +119        def _parse_update(self) -> exp.Update:
    +120            return self.expression(
    +121                exp.Update,
    +122                **{  # type: ignore
    +123                    "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS),
    +124                    "from": self._parse_from(modifiers=True),
    +125                    "expressions": self._match(TokenType.SET)
    +126                    and self._parse_csv(self._parse_equality),
    +127                    "where": self._parse_where(),
    +128                },
    +129            )
    +130
    +131        def _parse_rangen(self):
    +132            this = self._parse_id_var()
    +133            self._match(TokenType.BETWEEN)
    +134
    +135            expressions = self._parse_csv(self._parse_conjunction)
    +136            each = self._match_text_seq("EACH") and self._parse_conjunction()
     137
    -138        TYPE_MAPPING = {
    -139            **generator.Generator.TYPE_MAPPING,
    -140            exp.DataType.Type.GEOMETRY: "ST_GEOMETRY",
    -141        }
    -142
    -143        PROPERTIES_LOCATION = {
    -144            **generator.Generator.PROPERTIES_LOCATION,
    -145            exp.OnCommitProperty: exp.Properties.Location.POST_INDEX,
    -146            exp.PartitionedByProperty: exp.Properties.Location.POST_EXPRESSION,
    -147            exp.StabilityProperty: exp.Properties.Location.POST_CREATE,
    -148        }
    -149
    -150        TRANSFORMS = {
    -151            **generator.Generator.TRANSFORMS,
    -152            exp.Max: max_or_greatest,
    -153            exp.Min: min_or_least,
    -154            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    -155            exp.TimeToStr: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})",
    -156            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    -157        }
    -158
    -159        def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
    -160            return f"PARTITION BY {self.sql(expression, 'this')}"
    -161
    -162        # FROM before SET in Teradata UPDATE syntax
    -163        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
    -164        def update_sql(self, expression: exp.Update) -> str:
    -165            this = self.sql(expression, "this")
    -166            from_sql = self.sql(expression, "from")
    -167            set_sql = self.expressions(expression, flat=True)
    -168            where_sql = self.sql(expression, "where")
    -169            sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
    -170            return self.prepend_ctes(expression, sql)
    -171
    -172        def mod_sql(self, expression: exp.Mod) -> str:
    -173            return self.binary(expression, "MOD")
    -174
    -175        def datatype_sql(self, expression: exp.DataType) -> str:
    -176            type_sql = super().datatype_sql(expression)
    -177            prefix_sql = expression.args.get("prefix")
    -178            return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql
    -179
    -180        def rangen_sql(self, expression: exp.RangeN) -> str:
    -181            this = self.sql(expression, "this")
    -182            expressions_sql = self.expressions(expression)
    -183            each_sql = self.sql(expression, "each")
    -184            each_sql = f" EACH {each_sql}" if each_sql else ""
    +138            return self.expression(exp.RangeN, this=this, expressions=expressions, each=each)
    +139
    +140    class Generator(generator.Generator):
    +141        JOIN_HINTS = False
    +142        TABLE_HINTS = False
    +143
    +144        TYPE_MAPPING = {
    +145            **generator.Generator.TYPE_MAPPING,
    +146            exp.DataType.Type.GEOMETRY: "ST_GEOMETRY",
    +147        }
    +148
    +149        PROPERTIES_LOCATION = {
    +150            **generator.Generator.PROPERTIES_LOCATION,
    +151            exp.OnCommitProperty: exp.Properties.Location.POST_INDEX,
    +152            exp.PartitionedByProperty: exp.Properties.Location.POST_EXPRESSION,
    +153            exp.StabilityProperty: exp.Properties.Location.POST_CREATE,
    +154        }
    +155
    +156        TRANSFORMS = {
    +157            **generator.Generator.TRANSFORMS,
    +158            exp.Max: max_or_greatest,
    +159            exp.Min: min_or_least,
    +160            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    +161            exp.StrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})",
    +162            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    +163        }
    +164
    +165        def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
    +166            return f"PARTITION BY {self.sql(expression, 'this')}"
    +167
    +168        # FROM before SET in Teradata UPDATE syntax
    +169        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
    +170        def update_sql(self, expression: exp.Update) -> str:
    +171            this = self.sql(expression, "this")
    +172            from_sql = self.sql(expression, "from")
    +173            set_sql = self.expressions(expression, flat=True)
    +174            where_sql = self.sql(expression, "where")
    +175            sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
    +176            return self.prepend_ctes(expression, sql)
    +177
    +178        def mod_sql(self, expression: exp.Mod) -> str:
    +179            return self.binary(expression, "MOD")
    +180
    +181        def datatype_sql(self, expression: exp.DataType) -> str:
    +182            type_sql = super().datatype_sql(expression)
    +183            prefix_sql = expression.args.get("prefix")
    +184            return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql
     185
    -186            return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})"
    +186        def rangen_sql(self, expression: exp.RangeN) -> str:
    +187            this = self.sql(expression, "this")
    +188            expressions_sql = self.expressions(expression)
    +189            each_sql = self.sql(expression, "each")
    +190            each_sql = f" EACH {each_sql}" if each_sql else ""
    +191
    +192            return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})"
    +193
    +194        def createable_sql(
    +195            self,
    +196            expression: exp.Create,
    +197            locations: dict[exp.Properties.Location, list[exp.Property]],
    +198        ) -> str:
    +199            kind = self.sql(expression, "kind").upper()
    +200            if kind == "TABLE" and locations.get(exp.Properties.Location.POST_NAME):
    +201                this_name = self.sql(expression.this, "this")
    +202                this_properties = self.properties(
    +203                    exp.Properties(expressions=locations[exp.Properties.Location.POST_NAME]),
    +204                    wrapped=False,
    +205                    prefix=",",
    +206                )
    +207                this_schema = self.schema_columns_sql(expression.this)
    +208                return f"{this_name}{this_properties}{self.sep()}{this_schema}"
    +209            return super().createable_sql(expression, locations)
     
    @@ -497,27 +553,27 @@
    -
    17    class Tokenizer(tokens.Tokenizer):
    -18        # https://docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Comparison-Operators-and-Functions/Comparison-Operators/ANSI-Compliance
    -19        KEYWORDS = {
    -20            **tokens.Tokenizer.KEYWORDS,
    -21            "BYTEINT": TokenType.SMALLINT,
    -22            "SEL": TokenType.SELECT,
    -23            "INS": TokenType.INSERT,
    -24            "MOD": TokenType.MOD,
    -25            "LT": TokenType.LT,
    -26            "LE": TokenType.LTE,
    -27            "GT": TokenType.GT,
    -28            "GE": TokenType.GTE,
    -29            "^=": TokenType.NEQ,
    -30            "NE": TokenType.NEQ,
    -31            "NOT=": TokenType.NEQ,
    -32            "ST_GEOMETRY": TokenType.GEOMETRY,
    -33        }
    -34
    -35        # teradata does not support % for modulus
    -36        SINGLE_TOKENS = {**tokens.Tokenizer.SINGLE_TOKENS}
    -37        SINGLE_TOKENS.pop("%")
    +            
    31    class Tokenizer(tokens.Tokenizer):
    +32        # https://docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Comparison-Operators-and-Functions/Comparison-Operators/ANSI-Compliance
    +33        KEYWORDS = {
    +34            **tokens.Tokenizer.KEYWORDS,
    +35            "BYTEINT": TokenType.SMALLINT,
    +36            "SEL": TokenType.SELECT,
    +37            "INS": TokenType.INSERT,
    +38            "MOD": TokenType.MOD,
    +39            "LT": TokenType.LT,
    +40            "LE": TokenType.LTE,
    +41            "GT": TokenType.GT,
    +42            "GE": TokenType.GTE,
    +43            "^=": TokenType.NEQ,
    +44            "NE": TokenType.NEQ,
    +45            "NOT=": TokenType.NEQ,
    +46            "ST_GEOMETRY": TokenType.GEOMETRY,
    +47        }
    +48
    +49        # Teradata does not support % as a modulo operator
    +50        SINGLE_TOKENS = {**tokens.Tokenizer.SINGLE_TOKENS}
    +51        SINGLE_TOKENS.pop("%")
     
    @@ -529,6 +585,7 @@ @@ -545,124 +602,108 @@
    -
     39    class Parser(parser.Parser):
    - 40        CHARSET_TRANSLATORS = {
    - 41            "GRAPHIC_TO_KANJISJIS",
    - 42            "GRAPHIC_TO_LATIN",
    - 43            "GRAPHIC_TO_UNICODE",
    - 44            "GRAPHIC_TO_UNICODE_PadSpace",
    - 45            "KANJI1_KanjiEBCDIC_TO_UNICODE",
    - 46            "KANJI1_KanjiEUC_TO_UNICODE",
    - 47            "KANJI1_KANJISJIS_TO_UNICODE",
    - 48            "KANJI1_SBC_TO_UNICODE",
    - 49            "KANJISJIS_TO_GRAPHIC",
    - 50            "KANJISJIS_TO_LATIN",
    - 51            "KANJISJIS_TO_UNICODE",
    - 52            "LATIN_TO_GRAPHIC",
    - 53            "LATIN_TO_KANJISJIS",
    - 54            "LATIN_TO_UNICODE",
    - 55            "LOCALE_TO_UNICODE",
    - 56            "UNICODE_TO_GRAPHIC",
    - 57            "UNICODE_TO_GRAPHIC_PadGraphic",
    - 58            "UNICODE_TO_GRAPHIC_VarGraphic",
    - 59            "UNICODE_TO_KANJI1_KanjiEBCDIC",
    - 60            "UNICODE_TO_KANJI1_KanjiEUC",
    - 61            "UNICODE_TO_KANJI1_KANJISJIS",
    - 62            "UNICODE_TO_KANJI1_SBC",
    - 63            "UNICODE_TO_KANJISJIS",
    - 64            "UNICODE_TO_LATIN",
    - 65            "UNICODE_TO_LOCALE",
    - 66            "UNICODE_TO_UNICODE_FoldSpace",
    - 67            "UNICODE_TO_UNICODE_Fullwidth",
    - 68            "UNICODE_TO_UNICODE_Halfwidth",
    - 69            "UNICODE_TO_UNICODE_NFC",
    - 70            "UNICODE_TO_UNICODE_NFD",
    - 71            "UNICODE_TO_UNICODE_NFKC",
    - 72            "UNICODE_TO_UNICODE_NFKD",
    - 73        }
    - 74
    - 75        FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS}
    - 76        FUNC_TOKENS.remove(TokenType.REPLACE)
    - 77
    - 78        STATEMENT_PARSERS = {
    - 79            **parser.Parser.STATEMENT_PARSERS,
    - 80            TokenType.REPLACE: lambda self: self._parse_create(),
    - 81        }
    - 82
    - 83        FUNCTION_PARSERS = {
    - 84            **parser.Parser.FUNCTION_PARSERS,
    - 85            "RANGE_N": lambda self: self._parse_rangen(),
    - 86            "TRANSLATE": lambda self: self._parse_translate(self.STRICT_CAST),
    +            
     53    class Parser(parser.Parser):
    + 54        CHARSET_TRANSLATORS = {
    + 55            "GRAPHIC_TO_KANJISJIS",
    + 56            "GRAPHIC_TO_LATIN",
    + 57            "GRAPHIC_TO_UNICODE",
    + 58            "GRAPHIC_TO_UNICODE_PadSpace",
    + 59            "KANJI1_KanjiEBCDIC_TO_UNICODE",
    + 60            "KANJI1_KanjiEUC_TO_UNICODE",
    + 61            "KANJI1_KANJISJIS_TO_UNICODE",
    + 62            "KANJI1_SBC_TO_UNICODE",
    + 63            "KANJISJIS_TO_GRAPHIC",
    + 64            "KANJISJIS_TO_LATIN",
    + 65            "KANJISJIS_TO_UNICODE",
    + 66            "LATIN_TO_GRAPHIC",
    + 67            "LATIN_TO_KANJISJIS",
    + 68            "LATIN_TO_UNICODE",
    + 69            "LOCALE_TO_UNICODE",
    + 70            "UNICODE_TO_GRAPHIC",
    + 71            "UNICODE_TO_GRAPHIC_PadGraphic",
    + 72            "UNICODE_TO_GRAPHIC_VarGraphic",
    + 73            "UNICODE_TO_KANJI1_KanjiEBCDIC",
    + 74            "UNICODE_TO_KANJI1_KanjiEUC",
    + 75            "UNICODE_TO_KANJI1_KANJISJIS",
    + 76            "UNICODE_TO_KANJI1_SBC",
    + 77            "UNICODE_TO_KANJISJIS",
    + 78            "UNICODE_TO_LATIN",
    + 79            "UNICODE_TO_LOCALE",
    + 80            "UNICODE_TO_UNICODE_FoldSpace",
    + 81            "UNICODE_TO_UNICODE_Fullwidth",
    + 82            "UNICODE_TO_UNICODE_Halfwidth",
    + 83            "UNICODE_TO_UNICODE_NFC",
    + 84            "UNICODE_TO_UNICODE_NFD",
    + 85            "UNICODE_TO_UNICODE_NFKC",
    + 86            "UNICODE_TO_UNICODE_NFKD",
      87        }
      88
    - 89        def _parse_translate(self, strict: bool) -> exp.Expression:
    - 90            this = self._parse_conjunction()
    + 89        FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS}
    + 90        FUNC_TOKENS.remove(TokenType.REPLACE)
      91
    - 92            if not self._match(TokenType.USING):
    - 93                self.raise_error("Expected USING in TRANSLATE")
    - 94
    - 95            if self._match_texts(self.CHARSET_TRANSLATORS):
    - 96                charset_split = self._prev.text.split("_TO_")
    - 97                to = self.expression(exp.CharacterSet, this=charset_split[1])
    - 98            else:
    - 99                self.raise_error("Expected a character set translator after USING in TRANSLATE")
    -100
    -101            return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
    + 92        STATEMENT_PARSERS = {
    + 93            **parser.Parser.STATEMENT_PARSERS,
    + 94            TokenType.REPLACE: lambda self: self._parse_create(),
    + 95        }
    + 96
    + 97        FUNCTION_PARSERS = {
    + 98            **parser.Parser.FUNCTION_PARSERS,
    + 99            "RANGE_N": lambda self: self._parse_rangen(),
    +100            "TRANSLATE": lambda self: self._parse_translate(self.STRICT_CAST),
    +101        }
     102
    -103        # FROM before SET in Teradata UPDATE syntax
    -104        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
    -105        def _parse_update(self) -> exp.Expression:
    -106            return self.expression(
    -107                exp.Update,
    -108                **{  # type: ignore
    -109                    "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS),
    -110                    "from": self._parse_from(modifiers=True),
    -111                    "expressions": self._match(TokenType.SET)
    -112                    and self._parse_csv(self._parse_equality),
    -113                    "where": self._parse_where(),
    -114                },
    -115            )
    +103        def _parse_translate(self, strict: bool) -> exp.Expression:
    +104            this = self._parse_conjunction()
    +105
    +106            if not self._match(TokenType.USING):
    +107                self.raise_error("Expected USING in TRANSLATE")
    +108
    +109            if self._match_texts(self.CHARSET_TRANSLATORS):
    +110                charset_split = self._prev.text.split("_TO_")
    +111                to = self.expression(exp.CharacterSet, this=charset_split[1])
    +112            else:
    +113                self.raise_error("Expected a character set translator after USING in TRANSLATE")
    +114
    +115            return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
     116
    -117        def _parse_rangen(self):
    -118            this = self._parse_id_var()
    -119            self._match(TokenType.BETWEEN)
    -120
    -121            expressions = self._parse_csv(self._parse_conjunction)
    -122            each = self._match_text_seq("EACH") and self._parse_conjunction()
    -123
    -124            return self.expression(exp.RangeN, this=this, expressions=expressions, each=each)
    -125
    -126        def _parse_cast(self, strict: bool) -> exp.Expression:
    -127            cast = t.cast(exp.Cast, super()._parse_cast(strict))
    -128            if cast.to.this == exp.DataType.Type.DATE and self._match(TokenType.FORMAT):
    -129                return format_time_lambda(exp.TimeToStr, "teradata")(
    -130                    [cast.this, self._parse_string()]
    -131                )
    -132            return cast
    +117        # FROM before SET in Teradata UPDATE syntax
    +118        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
    +119        def _parse_update(self) -> exp.Update:
    +120            return self.expression(
    +121                exp.Update,
    +122                **{  # type: ignore
    +123                    "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS),
    +124                    "from": self._parse_from(modifiers=True),
    +125                    "expressions": self._match(TokenType.SET)
    +126                    and self._parse_csv(self._parse_equality),
    +127                    "where": self._parse_where(),
    +128                },
    +129            )
    +130
    +131        def _parse_rangen(self):
    +132            this = self._parse_id_var()
    +133            self._match(TokenType.BETWEEN)
    +134
    +135            expressions = self._parse_csv(self._parse_conjunction)
    +136            each = self._match_text_seq("EACH") and self._parse_conjunction()
    +137
    +138            return self.expression(exp.RangeN, this=this, expressions=expressions, each=each)
     
    -

    Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces -a parsed syntax tree.

    +

    Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

    Arguments:
      -
    • error_level: the desired error level. +
    • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
    • -
    • error_message_context: determines the amount of context to capture from a +
    • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). -Default: 50.
    • -
    • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. -Default: 0
    • -
    • alias_post_tablesample: If the table alias comes after tablesample. -Default: False
    • +Default: 100
    • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
    • -
    • null_ordering: Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    @@ -695,105 +736,107 @@ Default: "nulls_are_small"
    -
    134    class Generator(generator.Generator):
    -135        JOIN_HINTS = False
    -136        TABLE_HINTS = False
    -137
    -138        TYPE_MAPPING = {
    -139            **generator.Generator.TYPE_MAPPING,
    -140            exp.DataType.Type.GEOMETRY: "ST_GEOMETRY",
    -141        }
    -142
    -143        PROPERTIES_LOCATION = {
    -144            **generator.Generator.PROPERTIES_LOCATION,
    -145            exp.OnCommitProperty: exp.Properties.Location.POST_INDEX,
    -146            exp.PartitionedByProperty: exp.Properties.Location.POST_EXPRESSION,
    -147            exp.StabilityProperty: exp.Properties.Location.POST_CREATE,
    -148        }
    -149
    -150        TRANSFORMS = {
    -151            **generator.Generator.TRANSFORMS,
    -152            exp.Max: max_or_greatest,
    -153            exp.Min: min_or_least,
    -154            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    -155            exp.TimeToStr: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})",
    -156            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    -157        }
    -158
    -159        def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
    -160            return f"PARTITION BY {self.sql(expression, 'this')}"
    -161
    -162        # FROM before SET in Teradata UPDATE syntax
    -163        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
    -164        def update_sql(self, expression: exp.Update) -> str:
    -165            this = self.sql(expression, "this")
    -166            from_sql = self.sql(expression, "from")
    -167            set_sql = self.expressions(expression, flat=True)
    -168            where_sql = self.sql(expression, "where")
    -169            sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
    -170            return self.prepend_ctes(expression, sql)
    -171
    -172        def mod_sql(self, expression: exp.Mod) -> str:
    -173            return self.binary(expression, "MOD")
    -174
    -175        def datatype_sql(self, expression: exp.DataType) -> str:
    -176            type_sql = super().datatype_sql(expression)
    -177            prefix_sql = expression.args.get("prefix")
    -178            return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql
    -179
    -180        def rangen_sql(self, expression: exp.RangeN) -> str:
    -181            this = self.sql(expression, "this")
    -182            expressions_sql = self.expressions(expression)
    -183            each_sql = self.sql(expression, "each")
    -184            each_sql = f" EACH {each_sql}" if each_sql else ""
    +            
    140    class Generator(generator.Generator):
    +141        JOIN_HINTS = False
    +142        TABLE_HINTS = False
    +143
    +144        TYPE_MAPPING = {
    +145            **generator.Generator.TYPE_MAPPING,
    +146            exp.DataType.Type.GEOMETRY: "ST_GEOMETRY",
    +147        }
    +148
    +149        PROPERTIES_LOCATION = {
    +150            **generator.Generator.PROPERTIES_LOCATION,
    +151            exp.OnCommitProperty: exp.Properties.Location.POST_INDEX,
    +152            exp.PartitionedByProperty: exp.Properties.Location.POST_EXPRESSION,
    +153            exp.StabilityProperty: exp.Properties.Location.POST_CREATE,
    +154        }
    +155
    +156        TRANSFORMS = {
    +157            **generator.Generator.TRANSFORMS,
    +158            exp.Max: max_or_greatest,
    +159            exp.Min: min_or_least,
    +160            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
    +161            exp.StrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})",
    +162            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    +163        }
    +164
    +165        def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
    +166            return f"PARTITION BY {self.sql(expression, 'this')}"
    +167
    +168        # FROM before SET in Teradata UPDATE syntax
    +169        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
    +170        def update_sql(self, expression: exp.Update) -> str:
    +171            this = self.sql(expression, "this")
    +172            from_sql = self.sql(expression, "from")
    +173            set_sql = self.expressions(expression, flat=True)
    +174            where_sql = self.sql(expression, "where")
    +175            sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
    +176            return self.prepend_ctes(expression, sql)
    +177
    +178        def mod_sql(self, expression: exp.Mod) -> str:
    +179            return self.binary(expression, "MOD")
    +180
    +181        def datatype_sql(self, expression: exp.DataType) -> str:
    +182            type_sql = super().datatype_sql(expression)
    +183            prefix_sql = expression.args.get("prefix")
    +184            return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql
     185
    -186            return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})"
    +186        def rangen_sql(self, expression: exp.RangeN) -> str:
    +187            this = self.sql(expression, "this")
    +188            expressions_sql = self.expressions(expression)
    +189            each_sql = self.sql(expression, "each")
    +190            each_sql = f" EACH {each_sql}" if each_sql else ""
    +191
    +192            return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})"
    +193
    +194        def createable_sql(
    +195            self,
    +196            expression: exp.Create,
    +197            locations: dict[exp.Properties.Location, list[exp.Property]],
    +198        ) -> str:
    +199            kind = self.sql(expression, "kind").upper()
    +200            if kind == "TABLE" and locations.get(exp.Properties.Location.POST_NAME):
    +201                this_name = self.sql(expression.this, "this")
    +202                this_properties = self.properties(
    +203                    exp.Properties(expressions=locations[exp.Properties.Location.POST_NAME]),
    +204                    wrapped=False,
    +205                    prefix=",",
    +206                )
    +207                this_schema = self.schema_columns_sql(expression.this)
    +208                return f"{this_name}{this_properties}{self.sep()}{this_schema}"
    +209            return super().createable_sql(expression, locations)
     
    -

    Generator interprets the given syntax tree and produces a SQL string as an output.

    +

    Generator converts a given syntax tree to the corresponding SQL string.

    Arguments:
      -
    • time_mapping (dict): the dictionary of custom time mappings in which the key -represents a python time format and the output the target time format
    • -
    • time_trie (trie): a trie of the time_mapping keys
    • -
    • pretty (bool): if set to True the returned string will be formatted. Default: False.
    • -
    • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
    • -
    • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
    • -
    • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
    • -
    • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
    • -
    • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
    • -
    • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
    • -
    • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
    • -
    • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
    • -
    • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
    • -
    • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
    • -
    • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
    • -
    • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
    • -
    • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
    • -
    • normalize (bool): if set to True all identifiers will lower cased
    • -
    • string_escape (str): specifies a string escape character. Default: '.
    • -
    • identifier_escape (str): specifies an identifier escape character. Default: ".
    • -
    • pad (int): determines padding in a formatted string. Default: 2.
    • -
    • indent (int): determines the size of indentation in a formatted string. Default: 4.
    • -
    • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
    • -
    • normalize_functions (str): normalize function names, "upper", "lower", or None -Default: "upper"
    • -
    • alias_post_tablesample (bool): if the table alias comes after tablesample -Default: False
    • -
    • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit -Default: False
    • -
    • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters -unsupported expressions. Default ErrorLevel.WARN.
    • -
    • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    • -
    • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +
    • pretty: Whether or not to format the produced SQL string. +Default: False.
    • +
    • identify: Determines when an identifier should be quoted. Possible values are: +False (default): Never quote, except in cases where it's mandatory by the dialect. +True or 'always': Always quote. +'safe': Only quote identifiers that are case insensitive.
    • +
    • normalize: Whether or not to normalize identifiers to lowercase. +Default: False.
    • +
    • pad: Determines the pad size in a formatted string. +Default: 2.
    • +
    • indent: Determines the indentation size in a formatted string. +Default: 2.
    • +
    • normalize_functions: Whether or not to normalize all function names. Possible values are: +"upper" or True (default): Convert names to uppercase. +"lower": Convert names to lowercase. +False: Disables function name normalization.
    • +
    • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. +Default ErrorLevel.WARN.
    • +
    • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
    • -
    • leading_comma (bool): if the the comma is leading or trailing in select statements +
    • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. +This is only relevant when generating in pretty mode. Default: False
    • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true @@ -816,8 +859,8 @@ Default: True
    -
    159        def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
    -160            return f"PARTITION BY {self.sql(expression, 'this')}"
    +            
    165        def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
    +166            return f"PARTITION BY {self.sql(expression, 'this')}"
     
    @@ -835,13 +878,13 @@ Default: True
    -
    164        def update_sql(self, expression: exp.Update) -> str:
    -165            this = self.sql(expression, "this")
    -166            from_sql = self.sql(expression, "from")
    -167            set_sql = self.expressions(expression, flat=True)
    -168            where_sql = self.sql(expression, "where")
    -169            sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
    -170            return self.prepend_ctes(expression, sql)
    +            
    170        def update_sql(self, expression: exp.Update) -> str:
    +171            this = self.sql(expression, "this")
    +172            from_sql = self.sql(expression, "from")
    +173            set_sql = self.expressions(expression, flat=True)
    +174            where_sql = self.sql(expression, "where")
    +175            sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
    +176            return self.prepend_ctes(expression, sql)
     
    @@ -859,8 +902,8 @@ Default: True
    -
    172        def mod_sql(self, expression: exp.Mod) -> str:
    -173            return self.binary(expression, "MOD")
    +            
    178        def mod_sql(self, expression: exp.Mod) -> str:
    +179            return self.binary(expression, "MOD")
     
    @@ -878,10 +921,10 @@ Default: True
    -
    175        def datatype_sql(self, expression: exp.DataType) -> str:
    -176            type_sql = super().datatype_sql(expression)
    -177            prefix_sql = expression.args.get("prefix")
    -178            return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql
    +            
    181        def datatype_sql(self, expression: exp.DataType) -> str:
    +182            type_sql = super().datatype_sql(expression)
    +183            prefix_sql = expression.args.get("prefix")
    +184            return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql
     
    @@ -899,13 +942,46 @@ Default: True
    -
    180        def rangen_sql(self, expression: exp.RangeN) -> str:
    -181            this = self.sql(expression, "this")
    -182            expressions_sql = self.expressions(expression)
    -183            each_sql = self.sql(expression, "each")
    -184            each_sql = f" EACH {each_sql}" if each_sql else ""
    -185
    -186            return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})"
    +            
    186        def rangen_sql(self, expression: exp.RangeN) -> str:
    +187            this = self.sql(expression, "this")
    +188            expressions_sql = self.expressions(expression)
    +189            each_sql = self.sql(expression, "each")
    +190            each_sql = f" EACH {each_sql}" if each_sql else ""
    +191
    +192            return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})"
    +
    + + + + +
    +
    + +
    + + def + createable_sql( self, expression: sqlglot.expressions.Create, locations: dict[sqlglot.expressions.Properties.Location, list[sqlglot.expressions.Property]]) -> str: + + + +
    + +
    194        def createable_sql(
    +195            self,
    +196            expression: exp.Create,
    +197            locations: dict[exp.Properties.Location, list[exp.Property]],
    +198        ) -> str:
    +199            kind = self.sql(expression, "kind").upper()
    +200            if kind == "TABLE" and locations.get(exp.Properties.Location.POST_NAME):
    +201                this_name = self.sql(expression.this, "this")
    +202                this_properties = self.properties(
    +203                    exp.Properties(expressions=locations[exp.Properties.Location.POST_NAME]),
    +204                    wrapped=False,
    +205                    prefix=",",
    +206                )
    +207                this_schema = self.schema_columns_sql(expression.this)
    +208                return f"{this_name}{this_properties}{self.sep()}{this_schema}"
    +209            return super().createable_sql(expression, locations)
     
    @@ -1021,10 +1097,12 @@ Default: True
    ordered_sql
    matchrecognize_sql
    query_modifiers
    +
    offset_limit_modifiers
    after_having_modifiers
    after_limit_modifiers
    select_sql
    schema_sql
    +
    schema_columns_sql
    star_sql
    parameter_sql
    sessionparameter_sql
    @@ -1049,7 +1127,7 @@ Default: True
    nextvaluefor_sql
    extract_sql
    trim_sql
    -
    concat_sql
    +
    safeconcat_sql
    check_sql
    foreignkey_sql
    primarykey_sql
    @@ -1100,6 +1178,7 @@ Default: True
    respectnulls_sql
    intdiv_sql
    dpipe_sql
    +
    safedpipe_sql
    div_sql
    overlaps_sql
    distance_sql
    @@ -1147,6 +1226,7 @@ Default: True
    dictproperty_sql
    dictrange_sql
    dictsubproperty_sql
    +
    oncluster_sql
    diff --git a/docs/sqlglot/dialects/trino.html b/docs/sqlglot/dialects/trino.html index 15f0491..ac73d6a 100644 --- a/docs/sqlglot/dialects/trino.html +++ b/docs/sqlglot/dialects/trino.html @@ -156,49 +156,34 @@
    -

    Generator interprets the given syntax tree and produces a SQL string as an output.

    +

    Generator converts a given syntax tree to the corresponding SQL string.

    Arguments:
      -
    • time_mapping (dict): the dictionary of custom time mappings in which the key -represents a python time format and the output the target time format
    • -
    • time_trie (trie): a trie of the time_mapping keys
    • -
    • pretty (bool): if set to True the returned string will be formatted. Default: False.
    • -
    • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
    • -
    • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
    • -
    • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
    • -
    • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
    • -
    • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
    • -
    • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
    • -
    • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
    • -
    • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
    • -
    • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
    • -
    • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
    • -
    • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
    • -
    • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
    • -
    • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
    • -
    • normalize (bool): if set to True all identifiers will lower cased
    • -
    • string_escape (str): specifies a string escape character. Default: '.
    • -
    • identifier_escape (str): specifies an identifier escape character. Default: ".
    • -
    • pad (int): determines padding in a formatted string. Default: 2.
    • -
    • indent (int): determines the size of indentation in a formatted string. Default: 4.
    • -
    • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
    • -
    • normalize_functions (str): normalize function names, "upper", "lower", or None -Default: "upper"
    • -
    • alias_post_tablesample (bool): if the table alias comes after tablesample -Default: False
    • -
    • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit -Default: False
    • -
    • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters -unsupported expressions. Default ErrorLevel.WARN.
    • -
    • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    • -
    • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +
    • pretty: Whether or not to format the produced SQL string. +Default: False.
    • +
    • identify: Determines when an identifier should be quoted. Possible values are: +False (default): Never quote, except in cases where it's mandatory by the dialect. +True or 'always': Always quote. +'safe': Only quote identifiers that are case insensitive.
    • +
    • normalize: Whether or not to normalize identifiers to lowercase. +Default: False.
    • +
    • pad: Determines the pad size in a formatted string. +Default: 2.
    • +
    • indent: Determines the indentation size in a formatted string. +Default: 2.
    • +
    • normalize_functions: Whether or not to normalize all function names. Possible values are: +"upper" or True (default): Convert names to uppercase. +"lower": Convert names to lowercase. +False: Disables function name normalization.
    • +
    • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. +Default ErrorLevel.WARN.
    • +
    • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
    • -
    • leading_comma (bool): if the the comma is leading or trailing in select statements +
    • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. +This is only relevant when generating in pretty mode. Default: False
    • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true @@ -239,6 +224,7 @@ Default: True
    • notnullcolumnconstraint_sql
      primarykeycolumnconstraint_sql
      uniquecolumnconstraint_sql
      +
      createable_sql
      create_sql
      clone_sql
      describe_sql
      @@ -325,6 +311,7 @@ Default: True
      after_limit_modifiers
      select_sql
      schema_sql
      +
      schema_columns_sql
      star_sql
      parameter_sql
      sessionparameter_sql
      @@ -349,7 +336,7 @@ Default: True
      nextvaluefor_sql
      extract_sql
      trim_sql
      -
      concat_sql
      +
      safeconcat_sql
      check_sql
      foreignkey_sql
      primarykey_sql
      @@ -398,6 +385,7 @@ Default: True
      respectnulls_sql
      intdiv_sql
      dpipe_sql
      +
      safedpipe_sql
      div_sql
      overlaps_sql
      distance_sql
      @@ -446,12 +434,14 @@ Default: True
      dictproperty_sql
      dictrange_sql
      dictsubproperty_sql
      +
      oncluster_sql
    @@ -481,6 +471,7 @@ Default: True diff --git a/docs/sqlglot/dialects/tsql.html b/docs/sqlglot/dialects/tsql.html index 88de4be..ff7988f 100644 --- a/docs/sqlglot/dialects/tsql.html +++ b/docs/sqlglot/dialects/tsql.html @@ -155,9 +155,9 @@ 64 format=exp.Literal.string( 65 format_time( 66 args[0].name, - 67 {**TSQL.time_mapping, **FULL_FORMAT_TIME_MAPPING} + 67 {**TSQL.TIME_MAPPING, **FULL_FORMAT_TIME_MAPPING} 68 if full_format_mapping - 69 else TSQL.time_mapping, + 69 else TSQL.TIME_MAPPING, 70 ) 71 ), 72 ) @@ -177,9 +177,9 @@ 86 return exp.TimeToStr( 87 this=args[0], 88 format=exp.Literal.string( - 89 format_time(fmt.name, TSQL.format_time_mapping) + 89 format_time(fmt.name, TSQL.FORMAT_TIME_MAPPING) 90 if len(fmt.name) == 1 - 91 else format_time(fmt.name, TSQL.time_mapping) + 91 else format_time(fmt.name, TSQL.TIME_MAPPING) 92 ), 93 ) 94 @@ -229,7 +229,7 @@ 138 if isinstance(expression, exp.NumberToStr) 139 else exp.Literal.string( 140 format_time( -141 expression.text("format"), t.cast(t.Dict[str, str], TSQL.inverse_time_mapping) +141 expression.text("format"), t.cast(t.Dict[str, str], TSQL.INVERSE_TIME_MAPPING) 142 ) 143 ) 144 ) @@ -257,10 +257,10 @@ 166 167 168class TSQL(Dialect): -169 null_ordering = "nulls_are_small" -170 time_format = "'yyyy-mm-dd hh:mm:ss'" +169 NULL_ORDERING = "nulls_are_small" +170 TIME_FORMAT = "'yyyy-mm-dd hh:mm:ss'" 171 -172 time_mapping = { +172 TIME_MAPPING = { 173 "year": "%Y", 174 "qq": "%q", 175 "q": "%q", @@ -304,7 +304,7 @@ 213 "yy": "%y", 214 } 215 -216 convert_format_mapping = { +216 CONVERT_FORMAT_MAPPING = { 217 "0": "%b %d %Y %-I:%M%p", 218 "1": "%m/%d/%y", 219 "2": "%y.%m.%d", @@ -344,8 +344,8 @@ 253 "120": "%Y-%m-%d %H:%M:%S", 254 "121": "%Y-%m-%d %H:%M:%S.%f", 255 } -256 # not sure if complete -257 format_time_mapping = { +256 +257 FORMAT_TIME_MAPPING = { 258 "y": "%B %Y", 259 "d": "%m/%d/%Y", 260 "H": "%-H", @@ -403,56 +403,56 @@ 312 FUNCTIONS = { 313 **parser.Parser.FUNCTIONS, 314 "CHARINDEX": lambda args: exp.StrPosition( -315 this=seq_get(args, 1), -316 substr=seq_get(args, 0), -317 position=seq_get(args, 2), -318 ), -319 "DATEADD": parse_date_delta(exp.DateAdd, unit_mapping=DATE_DELTA_INTERVAL), -320 "DATEDIFF": parse_date_delta(exp.DateDiff, unit_mapping=DATE_DELTA_INTERVAL), -321 "DATENAME": _format_time_lambda(exp.TimeToStr, full_format_mapping=True), -322 "DATEPART": _format_time_lambda(exp.TimeToStr), -323 "EOMONTH": _parse_eomonth, -324 "FORMAT": _parse_format, -325 "GETDATE": exp.CurrentTimestamp.from_arg_list, -326 "HASHBYTES": _parse_hashbytes, -327 "IIF": exp.If.from_arg_list, -328 "ISNULL": exp.Coalesce.from_arg_list, -329 "JSON_VALUE": exp.JSONExtractScalar.from_arg_list, -330 "LEN": exp.Length.from_arg_list, -331 "REPLICATE": exp.Repeat.from_arg_list, -332 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), -333 "SYSDATETIME": exp.CurrentTimestamp.from_arg_list, -334 "SUSER_NAME": exp.CurrentUser.from_arg_list, -335 "SUSER_SNAME": exp.CurrentUser.from_arg_list, -336 "SYSTEM_USER": exp.CurrentUser.from_arg_list, -337 } -338 -339 JOIN_HINTS = { -340 "LOOP", -341 "HASH", -342 "MERGE", -343 "REMOTE", -344 } -345 -346 VAR_LENGTH_DATATYPES = { -347 DataType.Type.NVARCHAR, -348 DataType.Type.VARCHAR, -349 DataType.Type.CHAR, -350 DataType.Type.NCHAR, -351 } -352 -353 RETURNS_TABLE_TOKENS = parser.Parser.ID_VAR_TOKENS - { -354 TokenType.TABLE, -355 *parser.Parser.TYPE_TOKENS, -356 } -357 -358 STATEMENT_PARSERS = { -359 **parser.Parser.STATEMENT_PARSERS, -360 TokenType.END: lambda self: self._parse_command(), -361 } -362 -363 LOG_BASE_FIRST = False -364 LOG_DEFAULTS_TO_LN = True +315 this=seq_get(args, 1), substr=seq_get(args, 0), position=seq_get(args, 2) +316 ), +317 "DATEADD": parse_date_delta(exp.DateAdd, unit_mapping=DATE_DELTA_INTERVAL), +318 "DATEDIFF": parse_date_delta(exp.DateDiff, unit_mapping=DATE_DELTA_INTERVAL), +319 "DATENAME": _format_time_lambda(exp.TimeToStr, full_format_mapping=True), +320 "DATEPART": _format_time_lambda(exp.TimeToStr), +321 "EOMONTH": _parse_eomonth, +322 "FORMAT": _parse_format, +323 "GETDATE": exp.CurrentTimestamp.from_arg_list, +324 "HASHBYTES": _parse_hashbytes, +325 "IIF": exp.If.from_arg_list, +326 "ISNULL": exp.Coalesce.from_arg_list, +327 "JSON_VALUE": exp.JSONExtractScalar.from_arg_list, +328 "LEN": exp.Length.from_arg_list, +329 "REPLICATE": exp.Repeat.from_arg_list, +330 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), +331 "SYSDATETIME": exp.CurrentTimestamp.from_arg_list, +332 "SUSER_NAME": exp.CurrentUser.from_arg_list, +333 "SUSER_SNAME": exp.CurrentUser.from_arg_list, +334 "SYSTEM_USER": exp.CurrentUser.from_arg_list, +335 } +336 +337 JOIN_HINTS = { +338 "LOOP", +339 "HASH", +340 "MERGE", +341 "REMOTE", +342 } +343 +344 VAR_LENGTH_DATATYPES = { +345 DataType.Type.NVARCHAR, +346 DataType.Type.VARCHAR, +347 DataType.Type.CHAR, +348 DataType.Type.NCHAR, +349 } +350 +351 RETURNS_TABLE_TOKENS = parser.Parser.ID_VAR_TOKENS - { +352 TokenType.TABLE, +353 *parser.Parser.TYPE_TOKENS, +354 } +355 +356 STATEMENT_PARSERS = { +357 **parser.Parser.STATEMENT_PARSERS, +358 TokenType.END: lambda self: self._parse_command(), +359 } +360 +361 LOG_BASE_FIRST = False +362 LOG_DEFAULTS_TO_LN = True +363 +364 CONCAT_NULL_OUTPUTS_STRING = True 365 366 def _parse_system_time(self) -> t.Optional[exp.Expression]: 367 if not self._match_text_seq("FOR", "SYSTEM_TIME"): @@ -491,7 +491,7 @@ 400 table.set("system_time", self._parse_system_time()) 401 return table 402 -403 def _parse_returns(self) -> exp.Expression: +403 def _parse_returns(self) -> exp.ReturnsProperty: 404 table = self._parse_id_var(any_token=False, tokens=self.RETURNS_TABLE_TOKENS) 405 returns = super()._parse_returns() 406 returns.set("table", table) @@ -514,12 +514,12 @@ 423 format_val = self._parse_number() 424 format_val_name = format_val.name if format_val else "" 425 -426 if format_val_name not in TSQL.convert_format_mapping: +426 if format_val_name not in TSQL.CONVERT_FORMAT_MAPPING: 427 raise ValueError( 428 f"CONVERT function at T-SQL does not support format style {format_val_name}" 429 ) 430 -431 format_norm = exp.Literal.string(TSQL.convert_format_mapping[format_val_name]) +431 format_norm = exp.Literal.string(TSQL.CONVERT_FORMAT_MAPPING[format_val_name]) 432 433 # Check whether the convert entails a string to date format 434 if to.this == DataType.Type.DATE: @@ -657,10 +657,10 @@
    169class TSQL(Dialect):
    -170    null_ordering = "nulls_are_small"
    -171    time_format = "'yyyy-mm-dd hh:mm:ss'"
    +170    NULL_ORDERING = "nulls_are_small"
    +171    TIME_FORMAT = "'yyyy-mm-dd hh:mm:ss'"
     172
    -173    time_mapping = {
    +173    TIME_MAPPING = {
     174        "year": "%Y",
     175        "qq": "%q",
     176        "q": "%q",
    @@ -704,7 +704,7 @@
     214        "yy": "%y",
     215    }
     216
    -217    convert_format_mapping = {
    +217    CONVERT_FORMAT_MAPPING = {
     218        "0": "%b %d %Y %-I:%M%p",
     219        "1": "%m/%d/%y",
     220        "2": "%y.%m.%d",
    @@ -744,8 +744,8 @@
     254        "120": "%Y-%m-%d %H:%M:%S",
     255        "121": "%Y-%m-%d %H:%M:%S.%f",
     256    }
    -257    # not sure if complete
    -258    format_time_mapping = {
    +257
    +258    FORMAT_TIME_MAPPING = {
     259        "y": "%B %Y",
     260        "d": "%m/%d/%Y",
     261        "H": "%-H",
    @@ -803,56 +803,56 @@
     313        FUNCTIONS = {
     314            **parser.Parser.FUNCTIONS,
     315            "CHARINDEX": lambda args: exp.StrPosition(
    -316                this=seq_get(args, 1),
    -317                substr=seq_get(args, 0),
    -318                position=seq_get(args, 2),
    -319            ),
    -320            "DATEADD": parse_date_delta(exp.DateAdd, unit_mapping=DATE_DELTA_INTERVAL),
    -321            "DATEDIFF": parse_date_delta(exp.DateDiff, unit_mapping=DATE_DELTA_INTERVAL),
    -322            "DATENAME": _format_time_lambda(exp.TimeToStr, full_format_mapping=True),
    -323            "DATEPART": _format_time_lambda(exp.TimeToStr),
    -324            "EOMONTH": _parse_eomonth,
    -325            "FORMAT": _parse_format,
    -326            "GETDATE": exp.CurrentTimestamp.from_arg_list,
    -327            "HASHBYTES": _parse_hashbytes,
    -328            "IIF": exp.If.from_arg_list,
    -329            "ISNULL": exp.Coalesce.from_arg_list,
    -330            "JSON_VALUE": exp.JSONExtractScalar.from_arg_list,
    -331            "LEN": exp.Length.from_arg_list,
    -332            "REPLICATE": exp.Repeat.from_arg_list,
    -333            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
    -334            "SYSDATETIME": exp.CurrentTimestamp.from_arg_list,
    -335            "SUSER_NAME": exp.CurrentUser.from_arg_list,
    -336            "SUSER_SNAME": exp.CurrentUser.from_arg_list,
    -337            "SYSTEM_USER": exp.CurrentUser.from_arg_list,
    -338        }
    -339
    -340        JOIN_HINTS = {
    -341            "LOOP",
    -342            "HASH",
    -343            "MERGE",
    -344            "REMOTE",
    -345        }
    -346
    -347        VAR_LENGTH_DATATYPES = {
    -348            DataType.Type.NVARCHAR,
    -349            DataType.Type.VARCHAR,
    -350            DataType.Type.CHAR,
    -351            DataType.Type.NCHAR,
    -352        }
    -353
    -354        RETURNS_TABLE_TOKENS = parser.Parser.ID_VAR_TOKENS - {
    -355            TokenType.TABLE,
    -356            *parser.Parser.TYPE_TOKENS,
    -357        }
    -358
    -359        STATEMENT_PARSERS = {
    -360            **parser.Parser.STATEMENT_PARSERS,
    -361            TokenType.END: lambda self: self._parse_command(),
    -362        }
    -363
    -364        LOG_BASE_FIRST = False
    -365        LOG_DEFAULTS_TO_LN = True
    +316                this=seq_get(args, 1), substr=seq_get(args, 0), position=seq_get(args, 2)
    +317            ),
    +318            "DATEADD": parse_date_delta(exp.DateAdd, unit_mapping=DATE_DELTA_INTERVAL),
    +319            "DATEDIFF": parse_date_delta(exp.DateDiff, unit_mapping=DATE_DELTA_INTERVAL),
    +320            "DATENAME": _format_time_lambda(exp.TimeToStr, full_format_mapping=True),
    +321            "DATEPART": _format_time_lambda(exp.TimeToStr),
    +322            "EOMONTH": _parse_eomonth,
    +323            "FORMAT": _parse_format,
    +324            "GETDATE": exp.CurrentTimestamp.from_arg_list,
    +325            "HASHBYTES": _parse_hashbytes,
    +326            "IIF": exp.If.from_arg_list,
    +327            "ISNULL": exp.Coalesce.from_arg_list,
    +328            "JSON_VALUE": exp.JSONExtractScalar.from_arg_list,
    +329            "LEN": exp.Length.from_arg_list,
    +330            "REPLICATE": exp.Repeat.from_arg_list,
    +331            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
    +332            "SYSDATETIME": exp.CurrentTimestamp.from_arg_list,
    +333            "SUSER_NAME": exp.CurrentUser.from_arg_list,
    +334            "SUSER_SNAME": exp.CurrentUser.from_arg_list,
    +335            "SYSTEM_USER": exp.CurrentUser.from_arg_list,
    +336        }
    +337
    +338        JOIN_HINTS = {
    +339            "LOOP",
    +340            "HASH",
    +341            "MERGE",
    +342            "REMOTE",
    +343        }
    +344
    +345        VAR_LENGTH_DATATYPES = {
    +346            DataType.Type.NVARCHAR,
    +347            DataType.Type.VARCHAR,
    +348            DataType.Type.CHAR,
    +349            DataType.Type.NCHAR,
    +350        }
    +351
    +352        RETURNS_TABLE_TOKENS = parser.Parser.ID_VAR_TOKENS - {
    +353            TokenType.TABLE,
    +354            *parser.Parser.TYPE_TOKENS,
    +355        }
    +356
    +357        STATEMENT_PARSERS = {
    +358            **parser.Parser.STATEMENT_PARSERS,
    +359            TokenType.END: lambda self: self._parse_command(),
    +360        }
    +361
    +362        LOG_BASE_FIRST = False
    +363        LOG_DEFAULTS_TO_LN = True
    +364
    +365        CONCAT_NULL_OUTPUTS_STRING = True
     366
     367        def _parse_system_time(self) -> t.Optional[exp.Expression]:
     368            if not self._match_text_seq("FOR", "SYSTEM_TIME"):
    @@ -891,7 +891,7 @@
     401            table.set("system_time", self._parse_system_time())
     402            return table
     403
    -404        def _parse_returns(self) -> exp.Expression:
    +404        def _parse_returns(self) -> exp.ReturnsProperty:
     405            table = self._parse_id_var(any_token=False, tokens=self.RETURNS_TABLE_TOKENS)
     406            returns = super()._parse_returns()
     407            returns.set("table", table)
    @@ -914,12 +914,12 @@
     424                format_val = self._parse_number()
     425                format_val_name = format_val.name if format_val else ""
     426
    -427                if format_val_name not in TSQL.convert_format_mapping:
    +427                if format_val_name not in TSQL.CONVERT_FORMAT_MAPPING:
     428                    raise ValueError(
     429                        f"CONVERT function at T-SQL does not support format style {format_val_name}"
     430                    )
     431
    -432                format_norm = exp.Literal.string(TSQL.convert_format_mapping[format_val_name])
    +432                format_norm = exp.Literal.string(TSQL.CONVERT_FORMAT_MAPPING[format_val_name])
     433
     434                # Check whether the convert entails a string to date format
     435                if to.this == DataType.Type.DATE:
    @@ -1096,6 +1096,7 @@
                                         
                                     
    @@ -1116,56 +1117,56 @@
     313        FUNCTIONS = {
     314            **parser.Parser.FUNCTIONS,
     315            "CHARINDEX": lambda args: exp.StrPosition(
    -316                this=seq_get(args, 1),
    -317                substr=seq_get(args, 0),
    -318                position=seq_get(args, 2),
    -319            ),
    -320            "DATEADD": parse_date_delta(exp.DateAdd, unit_mapping=DATE_DELTA_INTERVAL),
    -321            "DATEDIFF": parse_date_delta(exp.DateDiff, unit_mapping=DATE_DELTA_INTERVAL),
    -322            "DATENAME": _format_time_lambda(exp.TimeToStr, full_format_mapping=True),
    -323            "DATEPART": _format_time_lambda(exp.TimeToStr),
    -324            "EOMONTH": _parse_eomonth,
    -325            "FORMAT": _parse_format,
    -326            "GETDATE": exp.CurrentTimestamp.from_arg_list,
    -327            "HASHBYTES": _parse_hashbytes,
    -328            "IIF": exp.If.from_arg_list,
    -329            "ISNULL": exp.Coalesce.from_arg_list,
    -330            "JSON_VALUE": exp.JSONExtractScalar.from_arg_list,
    -331            "LEN": exp.Length.from_arg_list,
    -332            "REPLICATE": exp.Repeat.from_arg_list,
    -333            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
    -334            "SYSDATETIME": exp.CurrentTimestamp.from_arg_list,
    -335            "SUSER_NAME": exp.CurrentUser.from_arg_list,
    -336            "SUSER_SNAME": exp.CurrentUser.from_arg_list,
    -337            "SYSTEM_USER": exp.CurrentUser.from_arg_list,
    -338        }
    -339
    -340        JOIN_HINTS = {
    -341            "LOOP",
    -342            "HASH",
    -343            "MERGE",
    -344            "REMOTE",
    -345        }
    -346
    -347        VAR_LENGTH_DATATYPES = {
    -348            DataType.Type.NVARCHAR,
    -349            DataType.Type.VARCHAR,
    -350            DataType.Type.CHAR,
    -351            DataType.Type.NCHAR,
    -352        }
    -353
    -354        RETURNS_TABLE_TOKENS = parser.Parser.ID_VAR_TOKENS - {
    -355            TokenType.TABLE,
    -356            *parser.Parser.TYPE_TOKENS,
    -357        }
    -358
    -359        STATEMENT_PARSERS = {
    -360            **parser.Parser.STATEMENT_PARSERS,
    -361            TokenType.END: lambda self: self._parse_command(),
    -362        }
    -363
    -364        LOG_BASE_FIRST = False
    -365        LOG_DEFAULTS_TO_LN = True
    +316                this=seq_get(args, 1), substr=seq_get(args, 0), position=seq_get(args, 2)
    +317            ),
    +318            "DATEADD": parse_date_delta(exp.DateAdd, unit_mapping=DATE_DELTA_INTERVAL),
    +319            "DATEDIFF": parse_date_delta(exp.DateDiff, unit_mapping=DATE_DELTA_INTERVAL),
    +320            "DATENAME": _format_time_lambda(exp.TimeToStr, full_format_mapping=True),
    +321            "DATEPART": _format_time_lambda(exp.TimeToStr),
    +322            "EOMONTH": _parse_eomonth,
    +323            "FORMAT": _parse_format,
    +324            "GETDATE": exp.CurrentTimestamp.from_arg_list,
    +325            "HASHBYTES": _parse_hashbytes,
    +326            "IIF": exp.If.from_arg_list,
    +327            "ISNULL": exp.Coalesce.from_arg_list,
    +328            "JSON_VALUE": exp.JSONExtractScalar.from_arg_list,
    +329            "LEN": exp.Length.from_arg_list,
    +330            "REPLICATE": exp.Repeat.from_arg_list,
    +331            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
    +332            "SYSDATETIME": exp.CurrentTimestamp.from_arg_list,
    +333            "SUSER_NAME": exp.CurrentUser.from_arg_list,
    +334            "SUSER_SNAME": exp.CurrentUser.from_arg_list,
    +335            "SYSTEM_USER": exp.CurrentUser.from_arg_list,
    +336        }
    +337
    +338        JOIN_HINTS = {
    +339            "LOOP",
    +340            "HASH",
    +341            "MERGE",
    +342            "REMOTE",
    +343        }
    +344
    +345        VAR_LENGTH_DATATYPES = {
    +346            DataType.Type.NVARCHAR,
    +347            DataType.Type.VARCHAR,
    +348            DataType.Type.CHAR,
    +349            DataType.Type.NCHAR,
    +350        }
    +351
    +352        RETURNS_TABLE_TOKENS = parser.Parser.ID_VAR_TOKENS - {
    +353            TokenType.TABLE,
    +354            *parser.Parser.TYPE_TOKENS,
    +355        }
    +356
    +357        STATEMENT_PARSERS = {
    +358            **parser.Parser.STATEMENT_PARSERS,
    +359            TokenType.END: lambda self: self._parse_command(),
    +360        }
    +361
    +362        LOG_BASE_FIRST = False
    +363        LOG_DEFAULTS_TO_LN = True
    +364
    +365        CONCAT_NULL_OUTPUTS_STRING = True
     366
     367        def _parse_system_time(self) -> t.Optional[exp.Expression]:
     368            if not self._match_text_seq("FOR", "SYSTEM_TIME"):
    @@ -1204,7 +1205,7 @@
     401            table.set("system_time", self._parse_system_time())
     402            return table
     403
    -404        def _parse_returns(self) -> exp.Expression:
    +404        def _parse_returns(self) -> exp.ReturnsProperty:
     405            table = self._parse_id_var(any_token=False, tokens=self.RETURNS_TABLE_TOKENS)
     406            returns = super()._parse_returns()
     407            returns.set("table", table)
    @@ -1227,12 +1228,12 @@
     424                format_val = self._parse_number()
     425                format_val_name = format_val.name if format_val else ""
     426
    -427                if format_val_name not in TSQL.convert_format_mapping:
    +427                if format_val_name not in TSQL.CONVERT_FORMAT_MAPPING:
     428                    raise ValueError(
     429                        f"CONVERT function at T-SQL does not support format style {format_val_name}"
     430                    )
     431
    -432                format_norm = exp.Literal.string(TSQL.convert_format_mapping[format_val_name])
    +432                format_norm = exp.Literal.string(TSQL.CONVERT_FORMAT_MAPPING[format_val_name])
     433
     434                # Check whether the convert entails a string to date format
     435                if to.this == DataType.Type.DATE:
    @@ -1270,27 +1271,19 @@
     
    -

    Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces -a parsed syntax tree.

    +

    Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

    Arguments:
      -
    • error_level: the desired error level. +
    • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
    • -
    • error_message_context: determines the amount of context to capture from a +
    • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). -Default: 50.
    • -
    • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. -Default: 0
    • -
    • alias_post_tablesample: If the table alias comes after tablesample. -Default: False
    • +Default: 100
    • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
    • -
    • null_ordering: Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    @@ -1390,49 +1383,34 @@ Default: "nulls_are_small"
    -

    Generator interprets the given syntax tree and produces a SQL string as an output.

    +

    Generator converts a given syntax tree to the corresponding SQL string.

    Arguments:
      -
    • time_mapping (dict): the dictionary of custom time mappings in which the key -represents a python time format and the output the target time format
    • -
    • time_trie (trie): a trie of the time_mapping keys
    • -
    • pretty (bool): if set to True the returned string will be formatted. Default: False.
    • -
    • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
    • -
    • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
    • -
    • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
    • -
    • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
    • -
    • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
    • -
    • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
    • -
    • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
    • -
    • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
    • -
    • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
    • -
    • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
    • -
    • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
    • -
    • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
    • -
    • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
    • -
    • normalize (bool): if set to True all identifiers will lower cased
    • -
    • string_escape (str): specifies a string escape character. Default: '.
    • -
    • identifier_escape (str): specifies an identifier escape character. Default: ".
    • -
    • pad (int): determines padding in a formatted string. Default: 2.
    • -
    • indent (int): determines the size of indentation in a formatted string. Default: 4.
    • -
    • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
    • -
    • normalize_functions (str): normalize function names, "upper", "lower", or None -Default: "upper"
    • -
    • alias_post_tablesample (bool): if the table alias comes after tablesample -Default: False
    • -
    • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit -Default: False
    • -
    • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters -unsupported expressions. Default ErrorLevel.WARN.
    • -
    • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    • -
    • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +
    • pretty: Whether or not to format the produced SQL string. +Default: False.
    • +
    • identify: Determines when an identifier should be quoted. Possible values are: +False (default): Never quote, except in cases where it's mandatory by the dialect. +True or 'always': Always quote. +'safe': Only quote identifiers that are case insensitive.
    • +
    • normalize: Whether or not to normalize identifiers to lowercase. +Default: False.
    • +
    • pad: Determines the pad size in a formatted string. +Default: 2.
    • +
    • indent: Determines the indentation size in a formatted string. +Default: 2.
    • +
    • normalize_functions: Whether or not to normalize all function names. Possible values are: +"upper" or True (default): Convert names to uppercase. +"lower": Convert names to lowercase. +False: Disables function name normalization.
    • +
    • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. +Default ErrorLevel.WARN.
    • +
    • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
    • -
    • leading_comma (bool): if the the comma is leading or trailing in select statements +
    • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. +This is only relevant when generating in pretty mode. Default: False
    • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true @@ -1546,6 +1524,7 @@ Default: True
    • notnullcolumnconstraint_sql
      primarykeycolumnconstraint_sql
      uniquecolumnconstraint_sql
      +
      createable_sql
      create_sql
      clone_sql
      describe_sql
      @@ -1627,10 +1606,12 @@ Default: True
      ordered_sql
      matchrecognize_sql
      query_modifiers
      +
      offset_limit_modifiers
      after_having_modifiers
      after_limit_modifiers
      select_sql
      schema_sql
      +
      schema_columns_sql
      star_sql
      parameter_sql
      sessionparameter_sql
      @@ -1655,7 +1636,7 @@ Default: True
      nextvaluefor_sql
      extract_sql
      trim_sql
      -
      concat_sql
      +
      safeconcat_sql
      check_sql
      foreignkey_sql
      primarykey_sql
      @@ -1706,6 +1687,7 @@ Default: True
      respectnulls_sql
      intdiv_sql
      dpipe_sql
      +
      safedpipe_sql
      div_sql
      overlaps_sql
      distance_sql
      @@ -1754,6 +1736,7 @@ Default: True
      dictproperty_sql
      dictrange_sql
      dictsubproperty_sql
      +
      oncluster_sql
    diff --git a/docs/sqlglot/executor/env.html b/docs/sqlglot/executor/env.html index f768f47..04965a6 100644 --- a/docs/sqlglot/executor/env.html +++ b/docs/sqlglot/executor/env.html @@ -236,15 +236,15 @@ 151 "CAST": cast, 152 "COALESCE": lambda *args: next((a for a in args if a is not None), None), 153 "CONCAT": null_if_any(lambda *args: "".join(args)), -154 "CONCATWS": null_if_any(lambda this, *args: this.join(args)), -155 "DATESTRTODATE": null_if_any(lambda arg: datetime.date.fromisoformat(arg)), -156 "DIV": null_if_any(lambda e, this: e / this), -157 "DOT": null_if_any(lambda e, this: e[this]), -158 "EQ": null_if_any(lambda this, e: this == e), -159 "EXTRACT": null_if_any(lambda this, e: getattr(e, this)), -160 "GT": null_if_any(lambda this, e: this > e), -161 "GTE": null_if_any(lambda this, e: this >= e), -162 "IFNULL": lambda e, alt: alt if e is None else e, +154 "SAFECONCAT": null_if_any(lambda *args: "".join(str(arg) for arg in args)), +155 "CONCATWS": null_if_any(lambda this, *args: this.join(args)), +156 "DATESTRTODATE": null_if_any(lambda arg: datetime.date.fromisoformat(arg)), +157 "DIV": null_if_any(lambda e, this: e / this), +158 "DOT": null_if_any(lambda e, this: e[this]), +159 "EQ": null_if_any(lambda this, e: this == e), +160 "EXTRACT": null_if_any(lambda this, e: getattr(e, this)), +161 "GT": null_if_any(lambda this, e: this > e), +162 "GTE": null_if_any(lambda this, e: this >= e), 163 "IF": lambda predicate, true, false: true if predicate else false, 164 "INTDIV": null_if_any(lambda e, this: e // this), 165 "INTERVAL": interval, diff --git a/docs/sqlglot/executor/python.html b/docs/sqlglot/executor/python.html index c096edf..d54561d 100644 --- a/docs/sqlglot/executor/python.html +++ b/docs/sqlglot/executor/python.html @@ -521,7 +521,7 @@ 394 names = {e.name.lower() for e in e.expressions} 395 396 e = e.transform( -397 lambda n: exp.Var(this=n.name) +397 lambda n: exp.var(n.name) 398 if isinstance(n, exp.Identifier) and n.name.lower() in names 399 else n 400 ) @@ -1578,6 +1578,7 @@ @@ -1621,49 +1622,34 @@
    -

    Generator interprets the given syntax tree and produces a SQL string as an output.

    +

    Generator converts a given syntax tree to the corresponding SQL string.

    Arguments:
      -
    • time_mapping (dict): the dictionary of custom time mappings in which the key -represents a python time format and the output the target time format
    • -
    • time_trie (trie): a trie of the time_mapping keys
    • -
    • pretty (bool): if set to True the returned string will be formatted. Default: False.
    • -
    • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
    • -
    • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
    • -
    • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
    • -
    • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
    • -
    • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
    • -
    • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
    • -
    • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
    • -
    • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
    • -
    • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
    • -
    • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
    • -
    • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
    • -
    • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
    • -
    • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
    • -
    • normalize (bool): if set to True all identifiers will lower cased
    • -
    • string_escape (str): specifies a string escape character. Default: '.
    • -
    • identifier_escape (str): specifies an identifier escape character. Default: ".
    • -
    • pad (int): determines padding in a formatted string. Default: 2.
    • -
    • indent (int): determines the size of indentation in a formatted string. Default: 4.
    • -
    • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
    • -
    • normalize_functions (str): normalize function names, "upper", "lower", or None -Default: "upper"
    • -
    • alias_post_tablesample (bool): if the table alias comes after tablesample -Default: False
    • -
    • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit -Default: False
    • -
    • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters -unsupported expressions. Default ErrorLevel.WARN.
    • -
    • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    • -
    • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +
    • pretty: Whether or not to format the produced SQL string. +Default: False.
    • +
    • identify: Determines when an identifier should be quoted. Possible values are: +False (default): Never quote, except in cases where it's mandatory by the dialect. +True or 'always': Always quote. +'safe': Only quote identifiers that are case insensitive.
    • +
    • normalize: Whether or not to normalize identifiers to lowercase. +Default: False.
    • +
    • pad: Determines the pad size in a formatted string. +Default: 2.
    • +
    • indent: Determines the indentation size in a formatted string. +Default: 2.
    • +
    • normalize_functions: Whether or not to normalize all function names. Possible values are: +"upper" or True (default): Convert names to uppercase. +"lower": Convert names to lowercase. +False: Disables function name normalization.
    • +
    • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. +Default ErrorLevel.WARN.
    • +
    • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
    • -
    • leading_comma (bool): if the the comma is leading or trailing in select statements +
    • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. +This is only relevant when generating in pretty mode. Default: False
    • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true @@ -1704,6 +1690,7 @@ Default: True
    • notnullcolumnconstraint_sql
      primarykeycolumnconstraint_sql
      uniquecolumnconstraint_sql
      +
      createable_sql
      create_sql
      clone_sql
      describe_sql
      @@ -1786,10 +1773,12 @@ Default: True
      ordered_sql
      matchrecognize_sql
      query_modifiers
      +
      offset_limit_modifiers
      after_having_modifiers
      after_limit_modifiers
      select_sql
      schema_sql
      +
      schema_columns_sql
      star_sql
      parameter_sql
      sessionparameter_sql
      @@ -1814,7 +1803,7 @@ Default: True
      nextvaluefor_sql
      extract_sql
      trim_sql
      -
      concat_sql
      +
      safeconcat_sql
      check_sql
      foreignkey_sql
      primarykey_sql
      @@ -1865,6 +1854,7 @@ Default: True
      respectnulls_sql
      intdiv_sql
      dpipe_sql
      +
      safedpipe_sql
      div_sql
      overlaps_sql
      distance_sql
      @@ -1913,6 +1903,7 @@ Default: True
      dictproperty_sql
      dictrange_sql
      dictsubproperty_sql
      +
      oncluster_sql
    diff --git a/docs/sqlglot/expressions.html b/docs/sqlglot/expressions.html index 8ea59aa..82b8c21 100644 --- a/docs/sqlglot/expressions.html +++ b/docs/sqlglot/expressions.html @@ -856,6 +856,12 @@
    + +
  • + ToTableProperty +
      +
    +
  • ExecuteAsProperty @@ -928,6 +934,12 @@
    +
  • +
  • + OnCluster +
      +
    +
  • LikeProperty @@ -1292,6 +1304,9 @@
  • lock
  • +
  • + hint +
  • is_star
  • @@ -1433,6 +1448,9 @@
  • DATETIME64
  • +
  • + ENUM +
  • INT4RANGE
  • @@ -1553,6 +1571,9 @@
  • SERIAL
  • +
  • + SET +
  • SMALLINT
  • @@ -1784,6 +1805,9 @@
  • Dot
      +
    • + output_name +
    • build
    • @@ -1795,6 +1819,12 @@
      + +
    • + SafeDPipe +
        +
      +
    • EQ @@ -1961,6 +1991,9 @@
    • Paren
    • @@ -2257,6 +2290,12 @@
      + +
    • + SafeConcat +
        +
      +
    • ConcatWs @@ -2461,6 +2500,12 @@
      +
    • +
    • + Date +
        +
      +
    • Day @@ -2539,12 +2584,6 @@
      -
    • -
    • - IfNull -
        -
      -
    • Initcap @@ -2911,6 +2950,12 @@
      +
    • +
    • + FromBase +
        +
      +
    • Struct @@ -4748,4317 +4793,4377 @@ SQL expressions, such as sqlglot.expressions.select< 1500 arg_types = { 1501 "this": False, 1502 "table": False, -1503 "where": False, -1504 "columns": False, -1505 "unique": False, -1506 "primary": False, -1507 "amp": False, # teradata -1508 "partition_by": False, # teradata -1509 } -1510 +1503 "using": False, +1504 "where": False, +1505 "columns": False, +1506 "unique": False, +1507 "primary": False, +1508 "amp": False, # teradata +1509 "partition_by": False, # teradata +1510 } 1511 -1512class Insert(Expression): -1513 arg_types = { -1514 "with": False, -1515 "this": True, -1516 "expression": False, -1517 "conflict": False, -1518 "returning": False, -1519 "overwrite": False, -1520 "exists": False, -1521 "partition": False, -1522 "alternative": False, -1523 } -1524 -1525 def with_( -1526 self, -1527 alias: ExpOrStr, -1528 as_: ExpOrStr, -1529 recursive: t.Optional[bool] = None, -1530 append: bool = True, -1531 dialect: DialectType = None, -1532 copy: bool = True, -1533 **opts, -1534 ) -> Insert: -1535 """ -1536 Append to or set the common table expressions. -1537 -1538 Example: -1539 >>> insert("SELECT x FROM cte", "t").with_("cte", as_="SELECT * FROM tbl").sql() -1540 'WITH cte AS (SELECT * FROM tbl) INSERT INTO t SELECT x FROM cte' -1541 -1542 Args: -1543 alias: the SQL code string to parse as the table name. -1544 If an `Expression` instance is passed, this is used as-is. -1545 as_: the SQL code string to parse as the table expression. -1546 If an `Expression` instance is passed, it will be used as-is. -1547 recursive: set the RECURSIVE part of the expression. Defaults to `False`. -1548 append: if `True`, add to any existing expressions. -1549 Otherwise, this resets the expressions. -1550 dialect: the dialect used to parse the input expression. -1551 copy: if `False`, modify this expression instance in-place. -1552 opts: other options to use to parse the input expressions. -1553 -1554 Returns: -1555 The modified expression. -1556 """ -1557 return _apply_cte_builder( -1558 self, alias, as_, recursive=recursive, append=append, dialect=dialect, copy=copy, **opts -1559 ) -1560 +1512 +1513class Insert(Expression): +1514 arg_types = { +1515 "with": False, +1516 "this": True, +1517 "expression": False, +1518 "conflict": False, +1519 "returning": False, +1520 "overwrite": False, +1521 "exists": False, +1522 "partition": False, +1523 "alternative": False, +1524 } +1525 +1526 def with_( +1527 self, +1528 alias: ExpOrStr, +1529 as_: ExpOrStr, +1530 recursive: t.Optional[bool] = None, +1531 append: bool = True, +1532 dialect: DialectType = None, +1533 copy: bool = True, +1534 **opts, +1535 ) -> Insert: +1536 """ +1537 Append to or set the common table expressions. +1538 +1539 Example: +1540 >>> insert("SELECT x FROM cte", "t").with_("cte", as_="SELECT * FROM tbl").sql() +1541 'WITH cte AS (SELECT * FROM tbl) INSERT INTO t SELECT x FROM cte' +1542 +1543 Args: +1544 alias: the SQL code string to parse as the table name. +1545 If an `Expression` instance is passed, this is used as-is. +1546 as_: the SQL code string to parse as the table expression. +1547 If an `Expression` instance is passed, it will be used as-is. +1548 recursive: set the RECURSIVE part of the expression. Defaults to `False`. +1549 append: if `True`, add to any existing expressions. +1550 Otherwise, this resets the expressions. +1551 dialect: the dialect used to parse the input expression. +1552 copy: if `False`, modify this expression instance in-place. +1553 opts: other options to use to parse the input expressions. +1554 +1555 Returns: +1556 The modified expression. +1557 """ +1558 return _apply_cte_builder( +1559 self, alias, as_, recursive=recursive, append=append, dialect=dialect, copy=copy, **opts +1560 ) 1561 -1562class OnConflict(Expression): -1563 arg_types = { -1564 "duplicate": False, -1565 "expressions": False, -1566 "nothing": False, -1567 "key": False, -1568 "constraint": False, -1569 } -1570 +1562 +1563class OnConflict(Expression): +1564 arg_types = { +1565 "duplicate": False, +1566 "expressions": False, +1567 "nothing": False, +1568 "key": False, +1569 "constraint": False, +1570 } 1571 -1572class Returning(Expression): -1573 arg_types = {"expressions": True} -1574 +1572 +1573class Returning(Expression): +1574 arg_types = {"expressions": True} 1575 -1576# https://dev.mysql.com/doc/refman/8.0/en/charset-introducer.html -1577class Introducer(Expression): -1578 arg_types = {"this": True, "expression": True} -1579 +1576 +1577# https://dev.mysql.com/doc/refman/8.0/en/charset-introducer.html +1578class Introducer(Expression): +1579 arg_types = {"this": True, "expression": True} 1580 -1581# national char, like n'utf8' -1582class National(Expression): -1583 pass -1584 +1581 +1582# national char, like n'utf8' +1583class National(Expression): +1584 pass 1585 -1586class LoadData(Expression): -1587 arg_types = { -1588 "this": True, -1589 "local": False, -1590 "overwrite": False, -1591 "inpath": True, -1592 "partition": False, -1593 "input_format": False, -1594 "serde": False, -1595 } -1596 +1586 +1587class LoadData(Expression): +1588 arg_types = { +1589 "this": True, +1590 "local": False, +1591 "overwrite": False, +1592 "inpath": True, +1593 "partition": False, +1594 "input_format": False, +1595 "serde": False, +1596 } 1597 -1598class Partition(Expression): -1599 arg_types = {"expressions": True} -1600 +1598 +1599class Partition(Expression): +1600 arg_types = {"expressions": True} 1601 -1602class Fetch(Expression): -1603 arg_types = { -1604 "direction": False, -1605 "count": False, -1606 "percent": False, -1607 "with_ties": False, -1608 } -1609 +1602 +1603class Fetch(Expression): +1604 arg_types = { +1605 "direction": False, +1606 "count": False, +1607 "percent": False, +1608 "with_ties": False, +1609 } 1610 -1611class Group(Expression): -1612 arg_types = { -1613 "expressions": False, -1614 "grouping_sets": False, -1615 "cube": False, -1616 "rollup": False, -1617 "totals": False, -1618 } -1619 +1611 +1612class Group(Expression): +1613 arg_types = { +1614 "expressions": False, +1615 "grouping_sets": False, +1616 "cube": False, +1617 "rollup": False, +1618 "totals": False, +1619 } 1620 -1621class Lambda(Expression): -1622 arg_types = {"this": True, "expressions": True} -1623 +1621 +1622class Lambda(Expression): +1623 arg_types = {"this": True, "expressions": True} 1624 -1625class Limit(Expression): -1626 arg_types = {"this": False, "expression": True} -1627 +1625 +1626class Limit(Expression): +1627 arg_types = {"this": False, "expression": True, "offset": False} 1628 -1629class Literal(Condition): -1630 arg_types = {"this": True, "is_string": True} -1631 -1632 @property -1633 def hashable_args(self) -> t.Any: -1634 return (self.this, self.args.get("is_string")) -1635 -1636 @classmethod -1637 def number(cls, number) -> Literal: -1638 return cls(this=str(number), is_string=False) -1639 -1640 @classmethod -1641 def string(cls, string) -> Literal: -1642 return cls(this=str(string), is_string=True) -1643 -1644 @property -1645 def output_name(self) -> str: -1646 return self.name -1647 +1629 +1630class Literal(Condition): +1631 arg_types = {"this": True, "is_string": True} +1632 +1633 @property +1634 def hashable_args(self) -> t.Any: +1635 return (self.this, self.args.get("is_string")) +1636 +1637 @classmethod +1638 def number(cls, number) -> Literal: +1639 return cls(this=str(number), is_string=False) +1640 +1641 @classmethod +1642 def string(cls, string) -> Literal: +1643 return cls(this=str(string), is_string=True) +1644 +1645 @property +1646 def output_name(self) -> str: +1647 return self.name 1648 -1649class Join(Expression): -1650 arg_types = { -1651 "this": True, -1652 "on": False, -1653 "side": False, -1654 "kind": False, -1655 "using": False, -1656 "method": False, -1657 "global": False, -1658 "hint": False, -1659 } -1660 -1661 @property -1662 def method(self) -> str: -1663 return self.text("method").upper() -1664 -1665 @property -1666 def kind(self) -> str: -1667 return self.text("kind").upper() -1668 -1669 @property -1670 def side(self) -> str: -1671 return self.text("side").upper() -1672 -1673 @property -1674 def hint(self) -> str: -1675 return self.text("hint").upper() -1676 -1677 @property -1678 def alias_or_name(self) -> str: -1679 return self.this.alias_or_name -1680 -1681 def on( -1682 self, -1683 *expressions: t.Optional[ExpOrStr], -1684 append: bool = True, -1685 dialect: DialectType = None, -1686 copy: bool = True, -1687 **opts, -1688 ) -> Join: -1689 """ -1690 Append to or set the ON expressions. -1691 -1692 Example: -1693 >>> import sqlglot -1694 >>> sqlglot.parse_one("JOIN x", into=Join).on("y = 1").sql() -1695 'JOIN x ON y = 1' -1696 -1697 Args: -1698 *expressions: the SQL code strings to parse. -1699 If an `Expression` instance is passed, it will be used as-is. -1700 Multiple expressions are combined with an AND operator. -1701 append: if `True`, AND the new expressions to any existing expression. -1702 Otherwise, this resets the expression. -1703 dialect: the dialect used to parse the input expressions. -1704 copy: if `False`, modify this expression instance in-place. -1705 opts: other options to use to parse the input expressions. -1706 -1707 Returns: -1708 The modified Join expression. -1709 """ -1710 join = _apply_conjunction_builder( -1711 *expressions, -1712 instance=self, -1713 arg="on", -1714 append=append, -1715 dialect=dialect, -1716 copy=copy, -1717 **opts, -1718 ) -1719 -1720 if join.kind == "CROSS": -1721 join.set("kind", None) -1722 -1723 return join -1724 -1725 def using( -1726 self, -1727 *expressions: t.Optional[ExpOrStr], -1728 append: bool = True, -1729 dialect: DialectType = None, -1730 copy: bool = True, -1731 **opts, -1732 ) -> Join: -1733 """ -1734 Append to or set the USING expressions. -1735 -1736 Example: -1737 >>> import sqlglot -1738 >>> sqlglot.parse_one("JOIN x", into=Join).using("foo", "bla").sql() -1739 'JOIN x USING (foo, bla)' -1740 -1741 Args: -1742 *expressions: the SQL code strings to parse. -1743 If an `Expression` instance is passed, it will be used as-is. -1744 append: if `True`, concatenate the new expressions to the existing "using" list. -1745 Otherwise, this resets the expression. -1746 dialect: the dialect used to parse the input expressions. -1747 copy: if `False`, modify this expression instance in-place. -1748 opts: other options to use to parse the input expressions. -1749 -1750 Returns: -1751 The modified Join expression. -1752 """ -1753 join = _apply_list_builder( -1754 *expressions, -1755 instance=self, -1756 arg="using", -1757 append=append, -1758 dialect=dialect, -1759 copy=copy, -1760 **opts, -1761 ) -1762 -1763 if join.kind == "CROSS": -1764 join.set("kind", None) -1765 -1766 return join -1767 +1649 +1650class Join(Expression): +1651 arg_types = { +1652 "this": True, +1653 "on": False, +1654 "side": False, +1655 "kind": False, +1656 "using": False, +1657 "method": False, +1658 "global": False, +1659 "hint": False, +1660 } +1661 +1662 @property +1663 def method(self) -> str: +1664 return self.text("method").upper() +1665 +1666 @property +1667 def kind(self) -> str: +1668 return self.text("kind").upper() +1669 +1670 @property +1671 def side(self) -> str: +1672 return self.text("side").upper() +1673 +1674 @property +1675 def hint(self) -> str: +1676 return self.text("hint").upper() +1677 +1678 @property +1679 def alias_or_name(self) -> str: +1680 return self.this.alias_or_name +1681 +1682 def on( +1683 self, +1684 *expressions: t.Optional[ExpOrStr], +1685 append: bool = True, +1686 dialect: DialectType = None, +1687 copy: bool = True, +1688 **opts, +1689 ) -> Join: +1690 """ +1691 Append to or set the ON expressions. +1692 +1693 Example: +1694 >>> import sqlglot +1695 >>> sqlglot.parse_one("JOIN x", into=Join).on("y = 1").sql() +1696 'JOIN x ON y = 1' +1697 +1698 Args: +1699 *expressions: the SQL code strings to parse. +1700 If an `Expression` instance is passed, it will be used as-is. +1701 Multiple expressions are combined with an AND operator. +1702 append: if `True`, AND the new expressions to any existing expression. +1703 Otherwise, this resets the expression. +1704 dialect: the dialect used to parse the input expressions. +1705 copy: if `False`, modify this expression instance in-place. +1706 opts: other options to use to parse the input expressions. +1707 +1708 Returns: +1709 The modified Join expression. +1710 """ +1711 join = _apply_conjunction_builder( +1712 *expressions, +1713 instance=self, +1714 arg="on", +1715 append=append, +1716 dialect=dialect, +1717 copy=copy, +1718 **opts, +1719 ) +1720 +1721 if join.kind == "CROSS": +1722 join.set("kind", None) +1723 +1724 return join +1725 +1726 def using( +1727 self, +1728 *expressions: t.Optional[ExpOrStr], +1729 append: bool = True, +1730 dialect: DialectType = None, +1731 copy: bool = True, +1732 **opts, +1733 ) -> Join: +1734 """ +1735 Append to or set the USING expressions. +1736 +1737 Example: +1738 >>> import sqlglot +1739 >>> sqlglot.parse_one("JOIN x", into=Join).using("foo", "bla").sql() +1740 'JOIN x USING (foo, bla)' +1741 +1742 Args: +1743 *expressions: the SQL code strings to parse. +1744 If an `Expression` instance is passed, it will be used as-is. +1745 append: if `True`, concatenate the new expressions to the existing "using" list. +1746 Otherwise, this resets the expression. +1747 dialect: the dialect used to parse the input expressions. +1748 copy: if `False`, modify this expression instance in-place. +1749 opts: other options to use to parse the input expressions. +1750 +1751 Returns: +1752 The modified Join expression. +1753 """ +1754 join = _apply_list_builder( +1755 *expressions, +1756 instance=self, +1757 arg="using", +1758 append=append, +1759 dialect=dialect, +1760 copy=copy, +1761 **opts, +1762 ) +1763 +1764 if join.kind == "CROSS": +1765 join.set("kind", None) +1766 +1767 return join 1768 -1769class Lateral(UDTF): -1770 arg_types = {"this": True, "view": False, "outer": False, "alias": False} -1771 +1769 +1770class Lateral(UDTF): +1771 arg_types = {"this": True, "view": False, "outer": False, "alias": False} 1772 -1773class MatchRecognize(Expression): -1774 arg_types = { -1775 "partition_by": False, -1776 "order": False, -1777 "measures": False, -1778 "rows": False, -1779 "after": False, -1780 "pattern": False, -1781 "define": False, -1782 "alias": False, -1783 } -1784 +1773 +1774class MatchRecognize(Expression): +1775 arg_types = { +1776 "partition_by": False, +1777 "order": False, +1778 "measures": False, +1779 "rows": False, +1780 "after": False, +1781 "pattern": False, +1782 "define": False, +1783 "alias": False, +1784 } 1785 -1786# Clickhouse FROM FINAL modifier -1787# https://clickhouse.com/docs/en/sql-reference/statements/select/from/#final-modifier -1788class Final(Expression): -1789 pass -1790 +1786 +1787# Clickhouse FROM FINAL modifier +1788# https://clickhouse.com/docs/en/sql-reference/statements/select/from/#final-modifier +1789class Final(Expression): +1790 pass 1791 -1792class Offset(Expression): -1793 arg_types = {"this": False, "expression": True} -1794 +1792 +1793class Offset(Expression): +1794 arg_types = {"this": False, "expression": True} 1795 -1796class Order(Expression): -1797 arg_types = {"this": False, "expressions": True} -1798 +1796 +1797class Order(Expression): +1798 arg_types = {"this": False, "expressions": True} 1799 -1800# hive specific sorts -1801# https://cwiki.apache.org/confluence/display/Hive/LanguageManual+SortBy -1802class Cluster(Order): -1803 pass -1804 +1800 +1801# hive specific sorts +1802# https://cwiki.apache.org/confluence/display/Hive/LanguageManual+SortBy +1803class Cluster(Order): +1804 pass 1805 -1806class Distribute(Order): -1807 pass -1808 +1806 +1807class Distribute(Order): +1808 pass 1809 -1810class Sort(Order): -1811 pass -1812 +1810 +1811class Sort(Order): +1812 pass 1813 -1814class Ordered(Expression): -1815 arg_types = {"this": True, "desc": True, "nulls_first": True} -1816 +1814 +1815class Ordered(Expression): +1816 arg_types = {"this": True, "desc": True, "nulls_first": True} 1817 -1818class Property(Expression): -1819 arg_types = {"this": True, "value": True} -1820 +1818 +1819class Property(Expression): +1820 arg_types = {"this": True, "value": True} 1821 -1822class AlgorithmProperty(Property): -1823 arg_types = {"this": True} -1824 +1822 +1823class AlgorithmProperty(Property): +1824 arg_types = {"this": True} 1825 -1826class AutoIncrementProperty(Property): -1827 arg_types = {"this": True} -1828 +1826 +1827class AutoIncrementProperty(Property): +1828 arg_types = {"this": True} 1829 -1830class BlockCompressionProperty(Property): -1831 arg_types = {"autotemp": False, "always": False, "default": True, "manual": True, "never": True} -1832 +1830 +1831class BlockCompressionProperty(Property): +1832 arg_types = {"autotemp": False, "always": False, "default": True, "manual": True, "never": True} 1833 -1834class CharacterSetProperty(Property): -1835 arg_types = {"this": True, "default": True} -1836 +1834 +1835class CharacterSetProperty(Property): +1836 arg_types = {"this": True, "default": True} 1837 -1838class ChecksumProperty(Property): -1839 arg_types = {"on": False, "default": False} -1840 +1838 +1839class ChecksumProperty(Property): +1840 arg_types = {"on": False, "default": False} 1841 -1842class CollateProperty(Property): -1843 arg_types = {"this": True} -1844 +1842 +1843class CollateProperty(Property): +1844 arg_types = {"this": True} 1845 -1846class DataBlocksizeProperty(Property): -1847 arg_types = { -1848 "size": False, -1849 "units": False, -1850 "minimum": False, -1851 "maximum": False, -1852 "default": False, -1853 } -1854 +1846 +1847class DataBlocksizeProperty(Property): +1848 arg_types = { +1849 "size": False, +1850 "units": False, +1851 "minimum": False, +1852 "maximum": False, +1853 "default": False, +1854 } 1855 -1856class DefinerProperty(Property): -1857 arg_types = {"this": True} -1858 +1856 +1857class DefinerProperty(Property): +1858 arg_types = {"this": True} 1859 -1860class DistKeyProperty(Property): -1861 arg_types = {"this": True} -1862 +1860 +1861class DistKeyProperty(Property): +1862 arg_types = {"this": True} 1863 -1864class DistStyleProperty(Property): -1865 arg_types = {"this": True} -1866 +1864 +1865class DistStyleProperty(Property): +1866 arg_types = {"this": True} 1867 -1868class EngineProperty(Property): -1869 arg_types = {"this": True} -1870 +1868 +1869class EngineProperty(Property): +1870 arg_types = {"this": True} 1871 -1872class ExecuteAsProperty(Property): -1873 arg_types = {"this": True} -1874 +1872 +1873class ToTableProperty(Property): +1874 arg_types = {"this": True} 1875 -1876class ExternalProperty(Property): -1877 arg_types = {"this": False} -1878 +1876 +1877class ExecuteAsProperty(Property): +1878 arg_types = {"this": True} 1879 -1880class FallbackProperty(Property): -1881 arg_types = {"no": True, "protection": False} -1882 +1880 +1881class ExternalProperty(Property): +1882 arg_types = {"this": False} 1883 -1884class FileFormatProperty(Property): -1885 arg_types = {"this": True} -1886 +1884 +1885class FallbackProperty(Property): +1886 arg_types = {"no": True, "protection": False} 1887 -1888class FreespaceProperty(Property): -1889 arg_types = {"this": True, "percent": False} -1890 +1888 +1889class FileFormatProperty(Property): +1890 arg_types = {"this": True} 1891 -1892class InputOutputFormat(Expression): -1893 arg_types = {"input_format": False, "output_format": False} -1894 +1892 +1893class FreespaceProperty(Property): +1894 arg_types = {"this": True, "percent": False} 1895 -1896class IsolatedLoadingProperty(Property): -1897 arg_types = { -1898 "no": True, -1899 "concurrent": True, -1900 "for_all": True, -1901 "for_insert": True, -1902 "for_none": True, -1903 } -1904 -1905 -1906class JournalProperty(Property): -1907 arg_types = { -1908 "no": False, -1909 "dual": False, -1910 "before": False, -1911 "local": False, -1912 "after": False, -1913 } -1914 -1915 -1916class LanguageProperty(Property): -1917 arg_types = {"this": True} -1918 +1896 +1897class InputOutputFormat(Expression): +1898 arg_types = {"input_format": False, "output_format": False} +1899 +1900 +1901class IsolatedLoadingProperty(Property): +1902 arg_types = { +1903 "no": True, +1904 "concurrent": True, +1905 "for_all": True, +1906 "for_insert": True, +1907 "for_none": True, +1908 } +1909 +1910 +1911class JournalProperty(Property): +1912 arg_types = { +1913 "no": False, +1914 "dual": False, +1915 "before": False, +1916 "local": False, +1917 "after": False, +1918 } 1919 -1920class DictProperty(Property): -1921 arg_types = {"this": True, "kind": True, "settings": False} -1922 +1920 +1921class LanguageProperty(Property): +1922 arg_types = {"this": True} 1923 -1924class DictSubProperty(Property): -1925 pass -1926 +1924 +1925class DictProperty(Property): +1926 arg_types = {"this": True, "kind": True, "settings": False} 1927 -1928class DictRange(Property): -1929 arg_types = {"this": True, "min": True, "max": True} -1930 +1928 +1929class DictSubProperty(Property): +1930 pass 1931 -1932class LikeProperty(Property): -1933 arg_types = {"this": True, "expressions": False} -1934 +1932 +1933class DictRange(Property): +1934 arg_types = {"this": True, "min": True, "max": True} 1935 -1936class LocationProperty(Property): -1937 arg_types = {"this": True} -1938 -1939 -1940class LockingProperty(Property): -1941 arg_types = { -1942 "this": False, -1943 "kind": True, -1944 "for_or_in": True, -1945 "lock_type": True, -1946 "override": False, -1947 } -1948 +1936 +1937# Clickhouse CREATE ... ON CLUSTER modifier +1938# https://clickhouse.com/docs/en/sql-reference/distributed-ddl +1939class OnCluster(Property): +1940 arg_types = {"this": True} +1941 +1942 +1943class LikeProperty(Property): +1944 arg_types = {"this": True, "expressions": False} +1945 +1946 +1947class LocationProperty(Property): +1948 arg_types = {"this": True} 1949 -1950class LogProperty(Property): -1951 arg_types = {"no": True} -1952 -1953 -1954class MaterializedProperty(Property): -1955 arg_types = {"this": False} -1956 -1957 -1958class MergeBlockRatioProperty(Property): -1959 arg_types = {"this": False, "no": False, "default": False, "percent": False} +1950 +1951class LockingProperty(Property): +1952 arg_types = { +1953 "this": False, +1954 "kind": True, +1955 "for_or_in": True, +1956 "lock_type": True, +1957 "override": False, +1958 } +1959 1960 -1961 -1962class NoPrimaryIndexProperty(Property): -1963 arg_types = {} +1961class LogProperty(Property): +1962 arg_types = {"no": True} +1963 1964 -1965 -1966class OnCommitProperty(Property): -1967 arg_type = {"delete": False} +1965class MaterializedProperty(Property): +1966 arg_types = {"this": False} +1967 1968 -1969 -1970class PartitionedByProperty(Property): -1971 arg_types = {"this": True} +1969class MergeBlockRatioProperty(Property): +1970 arg_types = {"this": False, "no": False, "default": False, "percent": False} +1971 1972 -1973 -1974class ReturnsProperty(Property): -1975 arg_types = {"this": True, "is_table": False, "table": False} +1973class NoPrimaryIndexProperty(Property): +1974 arg_types = {} +1975 1976 -1977 -1978class RowFormatProperty(Property): -1979 arg_types = {"this": True} +1977class OnCommitProperty(Property): +1978 arg_type = {"delete": False} +1979 1980 -1981 -1982class RowFormatDelimitedProperty(Property): -1983 # https://cwiki.apache.org/confluence/display/hive/languagemanual+dml -1984 arg_types = { -1985 "fields": False, -1986 "escaped": False, -1987 "collection_items": False, -1988 "map_keys": False, -1989 "lines": False, -1990 "null": False, -1991 "serde": False, -1992 } -1993 -1994 -1995class RowFormatSerdeProperty(Property): -1996 arg_types = {"this": True} -1997 -1998 -1999class SchemaCommentProperty(Property): -2000 arg_types = {"this": True} -2001 -2002 -2003class SerdeProperties(Property): -2004 arg_types = {"expressions": True} +1981class PartitionedByProperty(Property): +1982 arg_types = {"this": True} +1983 +1984 +1985class ReturnsProperty(Property): +1986 arg_types = {"this": True, "is_table": False, "table": False} +1987 +1988 +1989class RowFormatProperty(Property): +1990 arg_types = {"this": True} +1991 +1992 +1993class RowFormatDelimitedProperty(Property): +1994 # https://cwiki.apache.org/confluence/display/hive/languagemanual+dml +1995 arg_types = { +1996 "fields": False, +1997 "escaped": False, +1998 "collection_items": False, +1999 "map_keys": False, +2000 "lines": False, +2001 "null": False, +2002 "serde": False, +2003 } +2004 2005 -2006 -2007class SetProperty(Property): -2008 arg_types = {"multi": True} +2006class RowFormatSerdeProperty(Property): +2007 arg_types = {"this": True} +2008 2009 -2010 -2011class SettingsProperty(Property): -2012 arg_types = {"expressions": True} +2010class SchemaCommentProperty(Property): +2011 arg_types = {"this": True} +2012 2013 -2014 -2015class SortKeyProperty(Property): -2016 arg_types = {"this": True, "compound": False} +2014class SerdeProperties(Property): +2015 arg_types = {"expressions": True} +2016 2017 -2018 -2019class SqlSecurityProperty(Property): -2020 arg_types = {"definer": True} +2018class SetProperty(Property): +2019 arg_types = {"multi": True} +2020 2021 -2022 -2023class StabilityProperty(Property): -2024 arg_types = {"this": True} +2022class SettingsProperty(Property): +2023 arg_types = {"expressions": True} +2024 2025 -2026 -2027class TemporaryProperty(Property): -2028 arg_types = {} +2026class SortKeyProperty(Property): +2027 arg_types = {"this": True, "compound": False} +2028 2029 -2030 -2031class TransientProperty(Property): -2032 arg_types = {"this": False} +2030class SqlSecurityProperty(Property): +2031 arg_types = {"definer": True} +2032 2033 -2034 -2035class VolatileProperty(Property): -2036 arg_types = {"this": False} +2034class StabilityProperty(Property): +2035 arg_types = {"this": True} +2036 2037 -2038 -2039class WithDataProperty(Property): -2040 arg_types = {"no": True, "statistics": False} +2038class TemporaryProperty(Property): +2039 arg_types = {} +2040 2041 -2042 -2043class WithJournalTableProperty(Property): -2044 arg_types = {"this": True} +2042class TransientProperty(Property): +2043 arg_types = {"this": False} +2044 2045 -2046 -2047class Properties(Expression): -2048 arg_types = {"expressions": True} +2046class VolatileProperty(Property): +2047 arg_types = {"this": False} +2048 2049 -2050 NAME_TO_PROPERTY = { -2051 "ALGORITHM": AlgorithmProperty, -2052 "AUTO_INCREMENT": AutoIncrementProperty, -2053 "CHARACTER SET": CharacterSetProperty, -2054 "COLLATE": CollateProperty, -2055 "COMMENT": SchemaCommentProperty, -2056 "DEFINER": DefinerProperty, -2057 "DISTKEY": DistKeyProperty, -2058 "DISTSTYLE": DistStyleProperty, -2059 "ENGINE": EngineProperty, -2060 "EXECUTE AS": ExecuteAsProperty, -2061 "FORMAT": FileFormatProperty, -2062 "LANGUAGE": LanguageProperty, -2063 "LOCATION": LocationProperty, -2064 "PARTITIONED_BY": PartitionedByProperty, -2065 "RETURNS": ReturnsProperty, -2066 "ROW_FORMAT": RowFormatProperty, -2067 "SORTKEY": SortKeyProperty, -2068 } -2069 -2070 PROPERTY_TO_NAME = {v: k for k, v in NAME_TO_PROPERTY.items()} -2071 -2072 # CREATE property locations -2073 # Form: schema specified -2074 # create [POST_CREATE] -2075 # table a [POST_NAME] -2076 # (b int) [POST_SCHEMA] -2077 # with ([POST_WITH]) -2078 # index (b) [POST_INDEX] -2079 # -2080 # Form: alias selection -2081 # create [POST_CREATE] -2082 # table a [POST_NAME] -2083 # as [POST_ALIAS] (select * from b) [POST_EXPRESSION] -2084 # index (c) [POST_INDEX] -2085 class Location(AutoName): -2086 POST_CREATE = auto() -2087 POST_NAME = auto() -2088 POST_SCHEMA = auto() -2089 POST_WITH = auto() -2090 POST_ALIAS = auto() -2091 POST_EXPRESSION = auto() -2092 POST_INDEX = auto() -2093 UNSUPPORTED = auto() -2094 -2095 @classmethod -2096 def from_dict(cls, properties_dict: t.Dict) -> Properties: -2097 expressions = [] -2098 for key, value in properties_dict.items(): -2099 property_cls = cls.NAME_TO_PROPERTY.get(key.upper()) -2100 if property_cls: -2101 expressions.append(property_cls(this=convert(value))) -2102 else: -2103 expressions.append(Property(this=Literal.string(key), value=convert(value))) -2104 -2105 return cls(expressions=expressions) -2106 -2107 -2108class Qualify(Expression): -2109 pass -2110 -2111 -2112# https://www.ibm.com/docs/en/ias?topic=procedures-return-statement-in-sql -2113class Return(Expression): -2114 pass +2050class WithDataProperty(Property): +2051 arg_types = {"no": True, "statistics": False} +2052 +2053 +2054class WithJournalTableProperty(Property): +2055 arg_types = {"this": True} +2056 +2057 +2058class Properties(Expression): +2059 arg_types = {"expressions": True} +2060 +2061 NAME_TO_PROPERTY = { +2062 "ALGORITHM": AlgorithmProperty, +2063 "AUTO_INCREMENT": AutoIncrementProperty, +2064 "CHARACTER SET": CharacterSetProperty, +2065 "COLLATE": CollateProperty, +2066 "COMMENT": SchemaCommentProperty, +2067 "DEFINER": DefinerProperty, +2068 "DISTKEY": DistKeyProperty, +2069 "DISTSTYLE": DistStyleProperty, +2070 "ENGINE": EngineProperty, +2071 "EXECUTE AS": ExecuteAsProperty, +2072 "FORMAT": FileFormatProperty, +2073 "LANGUAGE": LanguageProperty, +2074 "LOCATION": LocationProperty, +2075 "PARTITIONED_BY": PartitionedByProperty, +2076 "RETURNS": ReturnsProperty, +2077 "ROW_FORMAT": RowFormatProperty, +2078 "SORTKEY": SortKeyProperty, +2079 } +2080 +2081 PROPERTY_TO_NAME = {v: k for k, v in NAME_TO_PROPERTY.items()} +2082 +2083 # CREATE property locations +2084 # Form: schema specified +2085 # create [POST_CREATE] +2086 # table a [POST_NAME] +2087 # (b int) [POST_SCHEMA] +2088 # with ([POST_WITH]) +2089 # index (b) [POST_INDEX] +2090 # +2091 # Form: alias selection +2092 # create [POST_CREATE] +2093 # table a [POST_NAME] +2094 # as [POST_ALIAS] (select * from b) [POST_EXPRESSION] +2095 # index (c) [POST_INDEX] +2096 class Location(AutoName): +2097 POST_CREATE = auto() +2098 POST_NAME = auto() +2099 POST_SCHEMA = auto() +2100 POST_WITH = auto() +2101 POST_ALIAS = auto() +2102 POST_EXPRESSION = auto() +2103 POST_INDEX = auto() +2104 UNSUPPORTED = auto() +2105 +2106 @classmethod +2107 def from_dict(cls, properties_dict: t.Dict) -> Properties: +2108 expressions = [] +2109 for key, value in properties_dict.items(): +2110 property_cls = cls.NAME_TO_PROPERTY.get(key.upper()) +2111 if property_cls: +2112 expressions.append(property_cls(this=convert(value))) +2113 else: +2114 expressions.append(Property(this=Literal.string(key), value=convert(value))) 2115 -2116 -2117class Reference(Expression): -2118 arg_types = {"this": True, "expressions": False, "options": False} -2119 -2120 -2121class Tuple(Expression): -2122 arg_types = {"expressions": False} -2123 -2124 def isin( -2125 self, *expressions: t.Any, query: t.Optional[ExpOrStr] = None, copy: bool = True, **opts -2126 ) -> In: -2127 return In( -2128 this=_maybe_copy(self, copy), -2129 expressions=[convert(e, copy=copy) for e in expressions], -2130 query=maybe_parse(query, copy=copy, **opts) if query else None, -2131 ) -2132 -2133 -2134class Subqueryable(Unionable): -2135 def subquery(self, alias: t.Optional[ExpOrStr] = None, copy: bool = True) -> Subquery: -2136 """ -2137 Convert this expression to an aliased expression that can be used as a Subquery. -2138 -2139 Example: -2140 >>> subquery = Select().select("x").from_("tbl").subquery() -2141 >>> Select().select("x").from_(subquery).sql() -2142 'SELECT x FROM (SELECT x FROM tbl)' +2116 return cls(expressions=expressions) +2117 +2118 +2119class Qualify(Expression): +2120 pass +2121 +2122 +2123# https://www.ibm.com/docs/en/ias?topic=procedures-return-statement-in-sql +2124class Return(Expression): +2125 pass +2126 +2127 +2128class Reference(Expression): +2129 arg_types = {"this": True, "expressions": False, "options": False} +2130 +2131 +2132class Tuple(Expression): +2133 arg_types = {"expressions": False} +2134 +2135 def isin( +2136 self, *expressions: t.Any, query: t.Optional[ExpOrStr] = None, copy: bool = True, **opts +2137 ) -> In: +2138 return In( +2139 this=_maybe_copy(self, copy), +2140 expressions=[convert(e, copy=copy) for e in expressions], +2141 query=maybe_parse(query, copy=copy, **opts) if query else None, +2142 ) 2143 -2144 Args: -2145 alias (str | Identifier): an optional alias for the subquery -2146 copy (bool): if `False`, modify this expression instance in-place. -2147 -2148 Returns: -2149 Alias: the subquery -2150 """ -2151 instance = _maybe_copy(self, copy) -2152 if not isinstance(alias, Expression): -2153 alias = TableAlias(this=to_identifier(alias)) if alias else None +2144 +2145class Subqueryable(Unionable): +2146 def subquery(self, alias: t.Optional[ExpOrStr] = None, copy: bool = True) -> Subquery: +2147 """ +2148 Convert this expression to an aliased expression that can be used as a Subquery. +2149 +2150 Example: +2151 >>> subquery = Select().select("x").from_("tbl").subquery() +2152 >>> Select().select("x").from_(subquery).sql() +2153 'SELECT x FROM (SELECT x FROM tbl)' 2154 -2155 return Subquery(this=instance, alias=alias) -2156 -2157 def limit( -2158 self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts -2159 ) -> Select: -2160 raise NotImplementedError -2161 -2162 @property -2163 def ctes(self): -2164 with_ = self.args.get("with") -2165 if not with_: -2166 return [] -2167 return with_.expressions -2168 -2169 @property -2170 def selects(self): -2171 raise NotImplementedError("Subqueryable objects must implement `selects`") +2155 Args: +2156 alias (str | Identifier): an optional alias for the subquery +2157 copy (bool): if `False`, modify this expression instance in-place. +2158 +2159 Returns: +2160 Alias: the subquery +2161 """ +2162 instance = _maybe_copy(self, copy) +2163 if not isinstance(alias, Expression): +2164 alias = TableAlias(this=to_identifier(alias)) if alias else None +2165 +2166 return Subquery(this=instance, alias=alias) +2167 +2168 def limit( +2169 self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts +2170 ) -> Select: +2171 raise NotImplementedError 2172 2173 @property -2174 def named_selects(self): -2175 raise NotImplementedError("Subqueryable objects must implement `named_selects`") -2176 -2177 def with_( -2178 self, -2179 alias: ExpOrStr, -2180 as_: ExpOrStr, -2181 recursive: t.Optional[bool] = None, -2182 append: bool = True, -2183 dialect: DialectType = None, -2184 copy: bool = True, -2185 **opts, -2186 ) -> Subqueryable: -2187 """ -2188 Append to or set the common table expressions. -2189 -2190 Example: -2191 >>> Select().with_("tbl2", as_="SELECT * FROM tbl").select("x").from_("tbl2").sql() -2192 'WITH tbl2 AS (SELECT * FROM tbl) SELECT x FROM tbl2' -2193 -2194 Args: -2195 alias: the SQL code string to parse as the table name. -2196 If an `Expression` instance is passed, this is used as-is. -2197 as_: the SQL code string to parse as the table expression. -2198 If an `Expression` instance is passed, it will be used as-is. -2199 recursive: set the RECURSIVE part of the expression. Defaults to `False`. -2200 append: if `True`, add to any existing expressions. -2201 Otherwise, this resets the expressions. -2202 dialect: the dialect used to parse the input expression. -2203 copy: if `False`, modify this expression instance in-place. -2204 opts: other options to use to parse the input expressions. -2205 -2206 Returns: -2207 The modified expression. -2208 """ -2209 return _apply_cte_builder( -2210 self, alias, as_, recursive=recursive, append=append, dialect=dialect, copy=copy, **opts -2211 ) -2212 -2213 -2214QUERY_MODIFIERS = { -2215 "match": False, -2216 "laterals": False, -2217 "joins": False, -2218 "pivots": False, -2219 "where": False, -2220 "group": False, -2221 "having": False, -2222 "qualify": False, -2223 "windows": False, -2224 "distribute": False, -2225 "sort": False, -2226 "cluster": False, -2227 "order": False, -2228 "limit": False, -2229 "offset": False, -2230 "locks": False, -2231 "sample": False, -2232 "settings": False, -2233 "format": False, -2234} -2235 -2236 -2237class Table(Expression): -2238 arg_types = { -2239 "this": True, -2240 "alias": False, -2241 "db": False, -2242 "catalog": False, -2243 "laterals": False, -2244 "joins": False, -2245 "pivots": False, -2246 "hints": False, -2247 "system_time": False, -2248 } -2249 -2250 @property -2251 def db(self) -> str: -2252 return self.text("db") -2253 -2254 @property -2255 def catalog(self) -> str: -2256 return self.text("catalog") -2257 -2258 @property -2259 def parts(self) -> t.List[Identifier]: -2260 """Return the parts of a table in order catalog, db, table.""" -2261 return [ -2262 t.cast(Identifier, self.args[part]) -2263 for part in ("catalog", "db", "this") -2264 if self.args.get(part) -2265 ] -2266 -2267 -2268# See the TSQL "Querying data in a system-versioned temporal table" page -2269class SystemTime(Expression): -2270 arg_types = { -2271 "this": False, -2272 "expression": False, -2273 "kind": True, -2274 } -2275 -2276 -2277class Union(Subqueryable): -2278 arg_types = { -2279 "with": False, -2280 "this": True, -2281 "expression": True, -2282 "distinct": False, -2283 **QUERY_MODIFIERS, -2284 } -2285 -2286 def limit( -2287 self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts -2288 ) -> Select: -2289 """ -2290 Set the LIMIT expression. -2291 -2292 Example: -2293 >>> select("1").union(select("1")).limit(1).sql() -2294 'SELECT * FROM (SELECT 1 UNION SELECT 1) AS _l_0 LIMIT 1' -2295 -2296 Args: -2297 expression: the SQL code string to parse. -2298 This can also be an integer. -2299 If a `Limit` instance is passed, this is used as-is. -2300 If another `Expression` instance is passed, it will be wrapped in a `Limit`. -2301 dialect: the dialect used to parse the input expression. -2302 copy: if `False`, modify this expression instance in-place. -2303 opts: other options to use to parse the input expressions. -2304 -2305 Returns: -2306 The limited subqueryable. -2307 """ -2308 return ( -2309 select("*") -2310 .from_(self.subquery(alias="_l_0", copy=copy)) -2311 .limit(expression, dialect=dialect, copy=False, **opts) -2312 ) -2313 -2314 def select( -2315 self, -2316 *expressions: t.Optional[ExpOrStr], -2317 append: bool = True, -2318 dialect: DialectType = None, -2319 copy: bool = True, -2320 **opts, -2321 ) -> Union: -2322 """Append to or set the SELECT of the union recursively. -2323 -2324 Example: -2325 >>> from sqlglot import parse_one -2326 >>> parse_one("select a from x union select a from y union select a from z").select("b").sql() -2327 'SELECT a, b FROM x UNION SELECT a, b FROM y UNION SELECT a, b FROM z' -2328 -2329 Args: -2330 *expressions: the SQL code strings to parse. -2331 If an `Expression` instance is passed, it will be used as-is. -2332 append: if `True`, add to any existing expressions. -2333 Otherwise, this resets the expressions. -2334 dialect: the dialect used to parse the input expressions. -2335 copy: if `False`, modify this expression instance in-place. -2336 opts: other options to use to parse the input expressions. -2337 -2338 Returns: -2339 Union: the modified expression. -2340 """ -2341 this = self.copy() if copy else self -2342 this.this.unnest().select(*expressions, append=append, dialect=dialect, copy=False, **opts) -2343 this.expression.unnest().select( -2344 *expressions, append=append, dialect=dialect, copy=False, **opts -2345 ) -2346 return this -2347 -2348 @property -2349 def named_selects(self): -2350 return self.this.unnest().named_selects -2351 -2352 @property -2353 def is_star(self) -> bool: -2354 return self.this.is_star or self.expression.is_star -2355 -2356 @property -2357 def selects(self): -2358 return self.this.unnest().selects -2359 -2360 @property -2361 def left(self): -2362 return self.this -2363 -2364 @property -2365 def right(self): -2366 return self.expression -2367 -2368 -2369class Except(Union): -2370 pass -2371 -2372 -2373class Intersect(Union): -2374 pass -2375 -2376 -2377class Unnest(UDTF): -2378 arg_types = { -2379 "expressions": True, -2380 "ordinality": False, -2381 "alias": False, -2382 "offset": False, -2383 } -2384 -2385 -2386class Update(Expression): -2387 arg_types = { -2388 "with": False, -2389 "this": False, +2174 def ctes(self): +2175 with_ = self.args.get("with") +2176 if not with_: +2177 return [] +2178 return with_.expressions +2179 +2180 @property +2181 def selects(self): +2182 raise NotImplementedError("Subqueryable objects must implement `selects`") +2183 +2184 @property +2185 def named_selects(self): +2186 raise NotImplementedError("Subqueryable objects must implement `named_selects`") +2187 +2188 def with_( +2189 self, +2190 alias: ExpOrStr, +2191 as_: ExpOrStr, +2192 recursive: t.Optional[bool] = None, +2193 append: bool = True, +2194 dialect: DialectType = None, +2195 copy: bool = True, +2196 **opts, +2197 ) -> Subqueryable: +2198 """ +2199 Append to or set the common table expressions. +2200 +2201 Example: +2202 >>> Select().with_("tbl2", as_="SELECT * FROM tbl").select("x").from_("tbl2").sql() +2203 'WITH tbl2 AS (SELECT * FROM tbl) SELECT x FROM tbl2' +2204 +2205 Args: +2206 alias: the SQL code string to parse as the table name. +2207 If an `Expression` instance is passed, this is used as-is. +2208 as_: the SQL code string to parse as the table expression. +2209 If an `Expression` instance is passed, it will be used as-is. +2210 recursive: set the RECURSIVE part of the expression. Defaults to `False`. +2211 append: if `True`, add to any existing expressions. +2212 Otherwise, this resets the expressions. +2213 dialect: the dialect used to parse the input expression. +2214 copy: if `False`, modify this expression instance in-place. +2215 opts: other options to use to parse the input expressions. +2216 +2217 Returns: +2218 The modified expression. +2219 """ +2220 return _apply_cte_builder( +2221 self, alias, as_, recursive=recursive, append=append, dialect=dialect, copy=copy, **opts +2222 ) +2223 +2224 +2225QUERY_MODIFIERS = { +2226 "match": False, +2227 "laterals": False, +2228 "joins": False, +2229 "pivots": False, +2230 "where": False, +2231 "group": False, +2232 "having": False, +2233 "qualify": False, +2234 "windows": False, +2235 "distribute": False, +2236 "sort": False, +2237 "cluster": False, +2238 "order": False, +2239 "limit": False, +2240 "offset": False, +2241 "locks": False, +2242 "sample": False, +2243 "settings": False, +2244 "format": False, +2245} +2246 +2247 +2248class Table(Expression): +2249 arg_types = { +2250 "this": True, +2251 "alias": False, +2252 "db": False, +2253 "catalog": False, +2254 "laterals": False, +2255 "joins": False, +2256 "pivots": False, +2257 "hints": False, +2258 "system_time": False, +2259 } +2260 +2261 @property +2262 def db(self) -> str: +2263 return self.text("db") +2264 +2265 @property +2266 def catalog(self) -> str: +2267 return self.text("catalog") +2268 +2269 @property +2270 def parts(self) -> t.List[Identifier]: +2271 """Return the parts of a table in order catalog, db, table.""" +2272 return [ +2273 t.cast(Identifier, self.args[part]) +2274 for part in ("catalog", "db", "this") +2275 if self.args.get(part) +2276 ] +2277 +2278 +2279# See the TSQL "Querying data in a system-versioned temporal table" page +2280class SystemTime(Expression): +2281 arg_types = { +2282 "this": False, +2283 "expression": False, +2284 "kind": True, +2285 } +2286 +2287 +2288class Union(Subqueryable): +2289 arg_types = { +2290 "with": False, +2291 "this": True, +2292 "expression": True, +2293 "distinct": False, +2294 **QUERY_MODIFIERS, +2295 } +2296 +2297 def limit( +2298 self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts +2299 ) -> Select: +2300 """ +2301 Set the LIMIT expression. +2302 +2303 Example: +2304 >>> select("1").union(select("1")).limit(1).sql() +2305 'SELECT * FROM (SELECT 1 UNION SELECT 1) AS _l_0 LIMIT 1' +2306 +2307 Args: +2308 expression: the SQL code string to parse. +2309 This can also be an integer. +2310 If a `Limit` instance is passed, this is used as-is. +2311 If another `Expression` instance is passed, it will be wrapped in a `Limit`. +2312 dialect: the dialect used to parse the input expression. +2313 copy: if `False`, modify this expression instance in-place. +2314 opts: other options to use to parse the input expressions. +2315 +2316 Returns: +2317 The limited subqueryable. +2318 """ +2319 return ( +2320 select("*") +2321 .from_(self.subquery(alias="_l_0", copy=copy)) +2322 .limit(expression, dialect=dialect, copy=False, **opts) +2323 ) +2324 +2325 def select( +2326 self, +2327 *expressions: t.Optional[ExpOrStr], +2328 append: bool = True, +2329 dialect: DialectType = None, +2330 copy: bool = True, +2331 **opts, +2332 ) -> Union: +2333 """Append to or set the SELECT of the union recursively. +2334 +2335 Example: +2336 >>> from sqlglot import parse_one +2337 >>> parse_one("select a from x union select a from y union select a from z").select("b").sql() +2338 'SELECT a, b FROM x UNION SELECT a, b FROM y UNION SELECT a, b FROM z' +2339 +2340 Args: +2341 *expressions: the SQL code strings to parse. +2342 If an `Expression` instance is passed, it will be used as-is. +2343 append: if `True`, add to any existing expressions. +2344 Otherwise, this resets the expressions. +2345 dialect: the dialect used to parse the input expressions. +2346 copy: if `False`, modify this expression instance in-place. +2347 opts: other options to use to parse the input expressions. +2348 +2349 Returns: +2350 Union: the modified expression. +2351 """ +2352 this = self.copy() if copy else self +2353 this.this.unnest().select(*expressions, append=append, dialect=dialect, copy=False, **opts) +2354 this.expression.unnest().select( +2355 *expressions, append=append, dialect=dialect, copy=False, **opts +2356 ) +2357 return this +2358 +2359 @property +2360 def named_selects(self): +2361 return self.this.unnest().named_selects +2362 +2363 @property +2364 def is_star(self) -> bool: +2365 return self.this.is_star or self.expression.is_star +2366 +2367 @property +2368 def selects(self): +2369 return self.this.unnest().selects +2370 +2371 @property +2372 def left(self): +2373 return self.this +2374 +2375 @property +2376 def right(self): +2377 return self.expression +2378 +2379 +2380class Except(Union): +2381 pass +2382 +2383 +2384class Intersect(Union): +2385 pass +2386 +2387 +2388class Unnest(UDTF): +2389 arg_types = { 2390 "expressions": True, -2391 "from": False, -2392 "where": False, -2393 "returning": False, +2391 "ordinality": False, +2392 "alias": False, +2393 "offset": False, 2394 } 2395 2396 -2397class Values(UDTF): +2397class Update(Expression): 2398 arg_types = { -2399 "expressions": True, -2400 "ordinality": False, -2401 "alias": False, -2402 } -2403 -2404 -2405class Var(Expression): -2406 pass +2399 "with": False, +2400 "this": False, +2401 "expressions": True, +2402 "from": False, +2403 "where": False, +2404 "returning": False, +2405 } +2406 2407 -2408 -2409class Schema(Expression): -2410 arg_types = {"this": False, "expressions": False} -2411 -2412 -2413# https://dev.mysql.com/doc/refman/8.0/en/select.html -2414# https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/SELECT.html -2415class Lock(Expression): -2416 arg_types = {"update": True, "expressions": False, "wait": False} -2417 +2408class Values(UDTF): +2409 arg_types = { +2410 "expressions": True, +2411 "ordinality": False, +2412 "alias": False, +2413 } +2414 +2415 +2416class Var(Expression): +2417 pass 2418 -2419class Select(Subqueryable): -2420 arg_types = { -2421 "with": False, -2422 "kind": False, -2423 "expressions": False, -2424 "hint": False, -2425 "distinct": False, -2426 "struct": False, # https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#return_query_results_as_a_value_table -2427 "value": False, -2428 "into": False, -2429 "from": False, -2430 **QUERY_MODIFIERS, -2431 } -2432 -2433 def from_( -2434 self, expression: ExpOrStr, dialect: DialectType = None, copy: bool = True, **opts -2435 ) -> Select: -2436 """ -2437 Set the FROM expression. -2438 -2439 Example: -2440 >>> Select().from_("tbl").select("x").sql() -2441 'SELECT x FROM tbl' -2442 -2443 Args: -2444 expression : the SQL code strings to parse. -2445 If a `From` instance is passed, this is used as-is. -2446 If another `Expression` instance is passed, it will be wrapped in a `From`. -2447 dialect: the dialect used to parse the input expression. -2448 copy: if `False`, modify this expression instance in-place. -2449 opts: other options to use to parse the input expressions. -2450 -2451 Returns: -2452 The modified Select expression. -2453 """ -2454 return _apply_builder( -2455 expression=expression, -2456 instance=self, -2457 arg="from", -2458 into=From, -2459 prefix="FROM", -2460 dialect=dialect, -2461 copy=copy, -2462 **opts, -2463 ) -2464 -2465 def group_by( -2466 self, -2467 *expressions: t.Optional[ExpOrStr], -2468 append: bool = True, -2469 dialect: DialectType = None, -2470 copy: bool = True, -2471 **opts, -2472 ) -> Select: -2473 """ -2474 Set the GROUP BY expression. +2419 +2420class Schema(Expression): +2421 arg_types = {"this": False, "expressions": False} +2422 +2423 +2424# https://dev.mysql.com/doc/refman/8.0/en/select.html +2425# https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/SELECT.html +2426class Lock(Expression): +2427 arg_types = {"update": True, "expressions": False, "wait": False} +2428 +2429 +2430class Select(Subqueryable): +2431 arg_types = { +2432 "with": False, +2433 "kind": False, +2434 "expressions": False, +2435 "hint": False, +2436 "distinct": False, +2437 "struct": False, # https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#return_query_results_as_a_value_table +2438 "value": False, +2439 "into": False, +2440 "from": False, +2441 **QUERY_MODIFIERS, +2442 } +2443 +2444 def from_( +2445 self, expression: ExpOrStr, dialect: DialectType = None, copy: bool = True, **opts +2446 ) -> Select: +2447 """ +2448 Set the FROM expression. +2449 +2450 Example: +2451 >>> Select().from_("tbl").select("x").sql() +2452 'SELECT x FROM tbl' +2453 +2454 Args: +2455 expression : the SQL code strings to parse. +2456 If a `From` instance is passed, this is used as-is. +2457 If another `Expression` instance is passed, it will be wrapped in a `From`. +2458 dialect: the dialect used to parse the input expression. +2459 copy: if `False`, modify this expression instance in-place. +2460 opts: other options to use to parse the input expressions. +2461 +2462 Returns: +2463 The modified Select expression. +2464 """ +2465 return _apply_builder( +2466 expression=expression, +2467 instance=self, +2468 arg="from", +2469 into=From, +2470 prefix="FROM", +2471 dialect=dialect, +2472 copy=copy, +2473 **opts, +2474 ) 2475 -2476 Example: -2477 >>> Select().from_("tbl").select("x", "COUNT(1)").group_by("x").sql() -2478 'SELECT x, COUNT(1) FROM tbl GROUP BY x' -2479 -2480 Args: -2481 *expressions: the SQL code strings to parse. -2482 If a `Group` instance is passed, this is used as-is. -2483 If another `Expression` instance is passed, it will be wrapped in a `Group`. -2484 If nothing is passed in then a group by is not applied to the expression -2485 append: if `True`, add to any existing expressions. -2486 Otherwise, this flattens all the `Group` expression into a single expression. -2487 dialect: the dialect used to parse the input expression. -2488 copy: if `False`, modify this expression instance in-place. -2489 opts: other options to use to parse the input expressions. +2476 def group_by( +2477 self, +2478 *expressions: t.Optional[ExpOrStr], +2479 append: bool = True, +2480 dialect: DialectType = None, +2481 copy: bool = True, +2482 **opts, +2483 ) -> Select: +2484 """ +2485 Set the GROUP BY expression. +2486 +2487 Example: +2488 >>> Select().from_("tbl").select("x", "COUNT(1)").group_by("x").sql() +2489 'SELECT x, COUNT(1) FROM tbl GROUP BY x' 2490 -2491 Returns: -2492 The modified Select expression. -2493 """ -2494 if not expressions: -2495 return self if not copy else self.copy() -2496 -2497 return _apply_child_list_builder( -2498 *expressions, -2499 instance=self, -2500 arg="group", -2501 append=append, -2502 copy=copy, -2503 prefix="GROUP BY", -2504 into=Group, -2505 dialect=dialect, -2506 **opts, -2507 ) -2508 -2509 def order_by( -2510 self, -2511 *expressions: t.Optional[ExpOrStr], -2512 append: bool = True, -2513 dialect: DialectType = None, -2514 copy: bool = True, -2515 **opts, -2516 ) -> Select: -2517 """ -2518 Set the ORDER BY expression. +2491 Args: +2492 *expressions: the SQL code strings to parse. +2493 If a `Group` instance is passed, this is used as-is. +2494 If another `Expression` instance is passed, it will be wrapped in a `Group`. +2495 If nothing is passed in then a group by is not applied to the expression +2496 append: if `True`, add to any existing expressions. +2497 Otherwise, this flattens all the `Group` expression into a single expression. +2498 dialect: the dialect used to parse the input expression. +2499 copy: if `False`, modify this expression instance in-place. +2500 opts: other options to use to parse the input expressions. +2501 +2502 Returns: +2503 The modified Select expression. +2504 """ +2505 if not expressions: +2506 return self if not copy else self.copy() +2507 +2508 return _apply_child_list_builder( +2509 *expressions, +2510 instance=self, +2511 arg="group", +2512 append=append, +2513 copy=copy, +2514 prefix="GROUP BY", +2515 into=Group, +2516 dialect=dialect, +2517 **opts, +2518 ) 2519 -2520 Example: -2521 >>> Select().from_("tbl").select("x").order_by("x DESC").sql() -2522 'SELECT x FROM tbl ORDER BY x DESC' -2523 -2524 Args: -2525 *expressions: the SQL code strings to parse. -2526 If a `Group` instance is passed, this is used as-is. -2527 If another `Expression` instance is passed, it will be wrapped in a `Order`. -2528 append: if `True`, add to any existing expressions. -2529 Otherwise, this flattens all the `Order` expression into a single expression. -2530 dialect: the dialect used to parse the input expression. -2531 copy: if `False`, modify this expression instance in-place. -2532 opts: other options to use to parse the input expressions. -2533 -2534 Returns: -2535 The modified Select expression. -2536 """ -2537 return _apply_child_list_builder( -2538 *expressions, -2539 instance=self, -2540 arg="order", -2541 append=append, -2542 copy=copy, -2543 prefix="ORDER BY", -2544 into=Order, -2545 dialect=dialect, -2546 **opts, -2547 ) -2548 -2549 def sort_by( -2550 self, -2551 *expressions: t.Optional[ExpOrStr], -2552 append: bool = True, -2553 dialect: DialectType = None, -2554 copy: bool = True, -2555 **opts, -2556 ) -> Select: -2557 """ -2558 Set the SORT BY expression. +2520 def order_by( +2521 self, +2522 *expressions: t.Optional[ExpOrStr], +2523 append: bool = True, +2524 dialect: DialectType = None, +2525 copy: bool = True, +2526 **opts, +2527 ) -> Select: +2528 """ +2529 Set the ORDER BY expression. +2530 +2531 Example: +2532 >>> Select().from_("tbl").select("x").order_by("x DESC").sql() +2533 'SELECT x FROM tbl ORDER BY x DESC' +2534 +2535 Args: +2536 *expressions: the SQL code strings to parse. +2537 If a `Group` instance is passed, this is used as-is. +2538 If another `Expression` instance is passed, it will be wrapped in a `Order`. +2539 append: if `True`, add to any existing expressions. +2540 Otherwise, this flattens all the `Order` expression into a single expression. +2541 dialect: the dialect used to parse the input expression. +2542 copy: if `False`, modify this expression instance in-place. +2543 opts: other options to use to parse the input expressions. +2544 +2545 Returns: +2546 The modified Select expression. +2547 """ +2548 return _apply_child_list_builder( +2549 *expressions, +2550 instance=self, +2551 arg="order", +2552 append=append, +2553 copy=copy, +2554 prefix="ORDER BY", +2555 into=Order, +2556 dialect=dialect, +2557 **opts, +2558 ) 2559 -2560 Example: -2561 >>> Select().from_("tbl").select("x").sort_by("x DESC").sql(dialect="hive") -2562 'SELECT x FROM tbl SORT BY x DESC' -2563 -2564 Args: -2565 *expressions: the SQL code strings to parse. -2566 If a `Group` instance is passed, this is used as-is. -2567 If another `Expression` instance is passed, it will be wrapped in a `SORT`. -2568 append: if `True`, add to any existing expressions. -2569 Otherwise, this flattens all the `Order` expression into a single expression. -2570 dialect: the dialect used to parse the input expression. -2571 copy: if `False`, modify this expression instance in-place. -2572 opts: other options to use to parse the input expressions. -2573 -2574 Returns: -2575 The modified Select expression. -2576 """ -2577 return _apply_child_list_builder( -2578 *expressions, -2579 instance=self, -2580 arg="sort", -2581 append=append, -2582 copy=copy, -2583 prefix="SORT BY", -2584 into=Sort, -2585 dialect=dialect, -2586 **opts, -2587 ) -2588 -2589 def cluster_by( -2590 self, -2591 *expressions: t.Optional[ExpOrStr], -2592 append: bool = True, -2593 dialect: DialectType = None, -2594 copy: bool = True, -2595 **opts, -2596 ) -> Select: -2597 """ -2598 Set the CLUSTER BY expression. +2560 def sort_by( +2561 self, +2562 *expressions: t.Optional[ExpOrStr], +2563 append: bool = True, +2564 dialect: DialectType = None, +2565 copy: bool = True, +2566 **opts, +2567 ) -> Select: +2568 """ +2569 Set the SORT BY expression. +2570 +2571 Example: +2572 >>> Select().from_("tbl").select("x").sort_by("x DESC").sql(dialect="hive") +2573 'SELECT x FROM tbl SORT BY x DESC' +2574 +2575 Args: +2576 *expressions: the SQL code strings to parse. +2577 If a `Group` instance is passed, this is used as-is. +2578 If another `Expression` instance is passed, it will be wrapped in a `SORT`. +2579 append: if `True`, add to any existing expressions. +2580 Otherwise, this flattens all the `Order` expression into a single expression. +2581 dialect: the dialect used to parse the input expression. +2582 copy: if `False`, modify this expression instance in-place. +2583 opts: other options to use to parse the input expressions. +2584 +2585 Returns: +2586 The modified Select expression. +2587 """ +2588 return _apply_child_list_builder( +2589 *expressions, +2590 instance=self, +2591 arg="sort", +2592 append=append, +2593 copy=copy, +2594 prefix="SORT BY", +2595 into=Sort, +2596 dialect=dialect, +2597 **opts, +2598 ) 2599 -2600 Example: -2601 >>> Select().from_("tbl").select("x").cluster_by("x DESC").sql(dialect="hive") -2602 'SELECT x FROM tbl CLUSTER BY x DESC' -2603 -2604 Args: -2605 *expressions: the SQL code strings to parse. -2606 If a `Group` instance is passed, this is used as-is. -2607 If another `Expression` instance is passed, it will be wrapped in a `Cluster`. -2608 append: if `True`, add to any existing expressions. -2609 Otherwise, this flattens all the `Order` expression into a single expression. -2610 dialect: the dialect used to parse the input expression. -2611 copy: if `False`, modify this expression instance in-place. -2612 opts: other options to use to parse the input expressions. -2613 -2614 Returns: -2615 The modified Select expression. -2616 """ -2617 return _apply_child_list_builder( -2618 *expressions, -2619 instance=self, -2620 arg="cluster", -2621 append=append, -2622 copy=copy, -2623 prefix="CLUSTER BY", -2624 into=Cluster, -2625 dialect=dialect, -2626 **opts, -2627 ) -2628 -2629 def limit( -2630 self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts -2631 ) -> Select: -2632 """ -2633 Set the LIMIT expression. -2634 -2635 Example: -2636 >>> Select().from_("tbl").select("x").limit(10).sql() -2637 'SELECT x FROM tbl LIMIT 10' -2638 -2639 Args: -2640 expression: the SQL code string to parse. -2641 This can also be an integer. -2642 If a `Limit` instance is passed, this is used as-is. -2643 If another `Expression` instance is passed, it will be wrapped in a `Limit`. -2644 dialect: the dialect used to parse the input expression. -2645 copy: if `False`, modify this expression instance in-place. -2646 opts: other options to use to parse the input expressions. -2647 -2648 Returns: -2649 Select: the modified expression. -2650 """ -2651 return _apply_builder( -2652 expression=expression, -2653 instance=self, -2654 arg="limit", -2655 into=Limit, -2656 prefix="LIMIT", -2657 dialect=dialect, -2658 copy=copy, -2659 **opts, -2660 ) -2661 -2662 def offset( -2663 self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts -2664 ) -> Select: -2665 """ -2666 Set the OFFSET expression. -2667 -2668 Example: -2669 >>> Select().from_("tbl").select("x").offset(10).sql() -2670 'SELECT x FROM tbl OFFSET 10' -2671 -2672 Args: -2673 expression: the SQL code string to parse. -2674 This can also be an integer. -2675 If a `Offset` instance is passed, this is used as-is. -2676 If another `Expression` instance is passed, it will be wrapped in a `Offset`. -2677 dialect: the dialect used to parse the input expression. -2678 copy: if `False`, modify this expression instance in-place. -2679 opts: other options to use to parse the input expressions. -2680 -2681 Returns: -2682 The modified Select expression. -2683 """ -2684 return _apply_builder( -2685 expression=expression, -2686 instance=self, -2687 arg="offset", -2688 into=Offset, -2689 prefix="OFFSET", -2690 dialect=dialect, -2691 copy=copy, -2692 **opts, -2693 ) -2694 -2695 def select( -2696 self, -2697 *expressions: t.Optional[ExpOrStr], -2698 append: bool = True, -2699 dialect: DialectType = None, -2700 copy: bool = True, -2701 **opts, -2702 ) -> Select: -2703 """ -2704 Append to or set the SELECT expressions. +2600 def cluster_by( +2601 self, +2602 *expressions: t.Optional[ExpOrStr], +2603 append: bool = True, +2604 dialect: DialectType = None, +2605 copy: bool = True, +2606 **opts, +2607 ) -> Select: +2608 """ +2609 Set the CLUSTER BY expression. +2610 +2611 Example: +2612 >>> Select().from_("tbl").select("x").cluster_by("x DESC").sql(dialect="hive") +2613 'SELECT x FROM tbl CLUSTER BY x DESC' +2614 +2615 Args: +2616 *expressions: the SQL code strings to parse. +2617 If a `Group` instance is passed, this is used as-is. +2618 If another `Expression` instance is passed, it will be wrapped in a `Cluster`. +2619 append: if `True`, add to any existing expressions. +2620 Otherwise, this flattens all the `Order` expression into a single expression. +2621 dialect: the dialect used to parse the input expression. +2622 copy: if `False`, modify this expression instance in-place. +2623 opts: other options to use to parse the input expressions. +2624 +2625 Returns: +2626 The modified Select expression. +2627 """ +2628 return _apply_child_list_builder( +2629 *expressions, +2630 instance=self, +2631 arg="cluster", +2632 append=append, +2633 copy=copy, +2634 prefix="CLUSTER BY", +2635 into=Cluster, +2636 dialect=dialect, +2637 **opts, +2638 ) +2639 +2640 def limit( +2641 self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts +2642 ) -> Select: +2643 """ +2644 Set the LIMIT expression. +2645 +2646 Example: +2647 >>> Select().from_("tbl").select("x").limit(10).sql() +2648 'SELECT x FROM tbl LIMIT 10' +2649 +2650 Args: +2651 expression: the SQL code string to parse. +2652 This can also be an integer. +2653 If a `Limit` instance is passed, this is used as-is. +2654 If another `Expression` instance is passed, it will be wrapped in a `Limit`. +2655 dialect: the dialect used to parse the input expression. +2656 copy: if `False`, modify this expression instance in-place. +2657 opts: other options to use to parse the input expressions. +2658 +2659 Returns: +2660 Select: the modified expression. +2661 """ +2662 return _apply_builder( +2663 expression=expression, +2664 instance=self, +2665 arg="limit", +2666 into=Limit, +2667 prefix="LIMIT", +2668 dialect=dialect, +2669 copy=copy, +2670 **opts, +2671 ) +2672 +2673 def offset( +2674 self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts +2675 ) -> Select: +2676 """ +2677 Set the OFFSET expression. +2678 +2679 Example: +2680 >>> Select().from_("tbl").select("x").offset(10).sql() +2681 'SELECT x FROM tbl OFFSET 10' +2682 +2683 Args: +2684 expression: the SQL code string to parse. +2685 This can also be an integer. +2686 If a `Offset` instance is passed, this is used as-is. +2687 If another `Expression` instance is passed, it will be wrapped in a `Offset`. +2688 dialect: the dialect used to parse the input expression. +2689 copy: if `False`, modify this expression instance in-place. +2690 opts: other options to use to parse the input expressions. +2691 +2692 Returns: +2693 The modified Select expression. +2694 """ +2695 return _apply_builder( +2696 expression=expression, +2697 instance=self, +2698 arg="offset", +2699 into=Offset, +2700 prefix="OFFSET", +2701 dialect=dialect, +2702 copy=copy, +2703 **opts, +2704 ) 2705 -2706 Example: -2707 >>> Select().select("x", "y").sql() -2708 'SELECT x, y' -2709 -2710 Args: -2711 *expressions: the SQL code strings to parse. -2712 If an `Expression` instance is passed, it will be used as-is. -2713 append: if `True`, add to any existing expressions. -2714 Otherwise, this resets the expressions. -2715 dialect: the dialect used to parse the input expressions. -2716 copy: if `False`, modify this expression instance in-place. -2717 opts: other options to use to parse the input expressions. -2718 -2719 Returns: -2720 The modified Select expression. -2721 """ -2722 return _apply_list_builder( -2723 *expressions, -2724 instance=self, -2725 arg="expressions", -2726 append=append, -2727 dialect=dialect, -2728 copy=copy, -2729 **opts, -2730 ) -2731 -2732 def lateral( -2733 self, -2734 *expressions: t.Optional[ExpOrStr], -2735 append: bool = True, -2736 dialect: DialectType = None, -2737 copy: bool = True, -2738 **opts, -2739 ) -> Select: -2740 """ -2741 Append to or set the LATERAL expressions. +2706 def select( +2707 self, +2708 *expressions: t.Optional[ExpOrStr], +2709 append: bool = True, +2710 dialect: DialectType = None, +2711 copy: bool = True, +2712 **opts, +2713 ) -> Select: +2714 """ +2715 Append to or set the SELECT expressions. +2716 +2717 Example: +2718 >>> Select().select("x", "y").sql() +2719 'SELECT x, y' +2720 +2721 Args: +2722 *expressions: the SQL code strings to parse. +2723 If an `Expression` instance is passed, it will be used as-is. +2724 append: if `True`, add to any existing expressions. +2725 Otherwise, this resets the expressions. +2726 dialect: the dialect used to parse the input expressions. +2727 copy: if `False`, modify this expression instance in-place. +2728 opts: other options to use to parse the input expressions. +2729 +2730 Returns: +2731 The modified Select expression. +2732 """ +2733 return _apply_list_builder( +2734 *expressions, +2735 instance=self, +2736 arg="expressions", +2737 append=append, +2738 dialect=dialect, +2739 copy=copy, +2740 **opts, +2741 ) 2742 -2743 Example: -2744 >>> Select().select("x").lateral("OUTER explode(y) tbl2 AS z").from_("tbl").sql() -2745 'SELECT x FROM tbl LATERAL VIEW OUTER EXPLODE(y) tbl2 AS z' -2746 -2747 Args: -2748 *expressions: the SQL code strings to parse. -2749 If an `Expression` instance is passed, it will be used as-is. -2750 append: if `True`, add to any existing expressions. -2751 Otherwise, this resets the expressions. -2752 dialect: the dialect used to parse the input expressions. -2753 copy: if `False`, modify this expression instance in-place. -2754 opts: other options to use to parse the input expressions. -2755 -2756 Returns: -2757 The modified Select expression. -2758 """ -2759 return _apply_list_builder( -2760 *expressions, -2761 instance=self, -2762 arg="laterals", -2763 append=append, -2764 into=Lateral, -2765 prefix="LATERAL VIEW", -2766 dialect=dialect, -2767 copy=copy, -2768 **opts, -2769 ) -2770 -2771 def join( -2772 self, -2773 expression: ExpOrStr, -2774 on: t.Optional[ExpOrStr] = None, -2775 using: t.Optional[ExpOrStr | t.List[ExpOrStr]] = None, -2776 append: bool = True, -2777 join_type: t.Optional[str] = None, -2778 join_alias: t.Optional[Identifier | str] = None, -2779 dialect: DialectType = None, -2780 copy: bool = True, -2781 **opts, -2782 ) -> Select: -2783 """ -2784 Append to or set the JOIN expressions. -2785 -2786 Example: -2787 >>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y").sql() -2788 'SELECT * FROM tbl JOIN tbl2 ON tbl1.y = tbl2.y' -2789 -2790 >>> Select().select("1").from_("a").join("b", using=["x", "y", "z"]).sql() -2791 'SELECT 1 FROM a JOIN b USING (x, y, z)' -2792 -2793 Use `join_type` to change the type of join: -2794 -2795 >>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y", join_type="left outer").sql() -2796 'SELECT * FROM tbl LEFT OUTER JOIN tbl2 ON tbl1.y = tbl2.y' -2797 -2798 Args: -2799 expression: the SQL code string to parse. -2800 If an `Expression` instance is passed, it will be used as-is. -2801 on: optionally specify the join "on" criteria as a SQL string. -2802 If an `Expression` instance is passed, it will be used as-is. -2803 using: optionally specify the join "using" criteria as a SQL string. -2804 If an `Expression` instance is passed, it will be used as-is. -2805 append: if `True`, add to any existing expressions. -2806 Otherwise, this resets the expressions. -2807 join_type: if set, alter the parsed join type. -2808 join_alias: an optional alias for the joined source. -2809 dialect: the dialect used to parse the input expressions. -2810 copy: if `False`, modify this expression instance in-place. -2811 opts: other options to use to parse the input expressions. -2812 -2813 Returns: -2814 Select: the modified expression. -2815 """ -2816 parse_args: t.Dict[str, t.Any] = {"dialect": dialect, **opts} -2817 -2818 try: -2819 expression = maybe_parse(expression, into=Join, prefix="JOIN", **parse_args) -2820 except ParseError: -2821 expression = maybe_parse(expression, into=(Join, Expression), **parse_args) -2822 -2823 join = expression if isinstance(expression, Join) else Join(this=expression) -2824 -2825 if isinstance(join.this, Select): -2826 join.this.replace(join.this.subquery()) -2827 -2828 if join_type: -2829 method: t.Optional[Token] -2830 side: t.Optional[Token] -2831 kind: t.Optional[Token] -2832 -2833 method, side, kind = maybe_parse(join_type, into="JOIN_TYPE", **parse_args) # type: ignore -2834 -2835 if method: -2836 join.set("method", method.text) -2837 if side: -2838 join.set("side", side.text) -2839 if kind: -2840 join.set("kind", kind.text) -2841 -2842 if on: -2843 on = and_(*ensure_list(on), dialect=dialect, copy=copy, **opts) -2844 join.set("on", on) +2743 def lateral( +2744 self, +2745 *expressions: t.Optional[ExpOrStr], +2746 append: bool = True, +2747 dialect: DialectType = None, +2748 copy: bool = True, +2749 **opts, +2750 ) -> Select: +2751 """ +2752 Append to or set the LATERAL expressions. +2753 +2754 Example: +2755 >>> Select().select("x").lateral("OUTER explode(y) tbl2 AS z").from_("tbl").sql() +2756 'SELECT x FROM tbl LATERAL VIEW OUTER EXPLODE(y) tbl2 AS z' +2757 +2758 Args: +2759 *expressions: the SQL code strings to parse. +2760 If an `Expression` instance is passed, it will be used as-is. +2761 append: if `True`, add to any existing expressions. +2762 Otherwise, this resets the expressions. +2763 dialect: the dialect used to parse the input expressions. +2764 copy: if `False`, modify this expression instance in-place. +2765 opts: other options to use to parse the input expressions. +2766 +2767 Returns: +2768 The modified Select expression. +2769 """ +2770 return _apply_list_builder( +2771 *expressions, +2772 instance=self, +2773 arg="laterals", +2774 append=append, +2775 into=Lateral, +2776 prefix="LATERAL VIEW", +2777 dialect=dialect, +2778 copy=copy, +2779 **opts, +2780 ) +2781 +2782 def join( +2783 self, +2784 expression: ExpOrStr, +2785 on: t.Optional[ExpOrStr] = None, +2786 using: t.Optional[ExpOrStr | t.List[ExpOrStr]] = None, +2787 append: bool = True, +2788 join_type: t.Optional[str] = None, +2789 join_alias: t.Optional[Identifier | str] = None, +2790 dialect: DialectType = None, +2791 copy: bool = True, +2792 **opts, +2793 ) -> Select: +2794 """ +2795 Append to or set the JOIN expressions. +2796 +2797 Example: +2798 >>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y").sql() +2799 'SELECT * FROM tbl JOIN tbl2 ON tbl1.y = tbl2.y' +2800 +2801 >>> Select().select("1").from_("a").join("b", using=["x", "y", "z"]).sql() +2802 'SELECT 1 FROM a JOIN b USING (x, y, z)' +2803 +2804 Use `join_type` to change the type of join: +2805 +2806 >>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y", join_type="left outer").sql() +2807 'SELECT * FROM tbl LEFT OUTER JOIN tbl2 ON tbl1.y = tbl2.y' +2808 +2809 Args: +2810 expression: the SQL code string to parse. +2811 If an `Expression` instance is passed, it will be used as-is. +2812 on: optionally specify the join "on" criteria as a SQL string. +2813 If an `Expression` instance is passed, it will be used as-is. +2814 using: optionally specify the join "using" criteria as a SQL string. +2815 If an `Expression` instance is passed, it will be used as-is. +2816 append: if `True`, add to any existing expressions. +2817 Otherwise, this resets the expressions. +2818 join_type: if set, alter the parsed join type. +2819 join_alias: an optional alias for the joined source. +2820 dialect: the dialect used to parse the input expressions. +2821 copy: if `False`, modify this expression instance in-place. +2822 opts: other options to use to parse the input expressions. +2823 +2824 Returns: +2825 Select: the modified expression. +2826 """ +2827 parse_args: t.Dict[str, t.Any] = {"dialect": dialect, **opts} +2828 +2829 try: +2830 expression = maybe_parse(expression, into=Join, prefix="JOIN", **parse_args) +2831 except ParseError: +2832 expression = maybe_parse(expression, into=(Join, Expression), **parse_args) +2833 +2834 join = expression if isinstance(expression, Join) else Join(this=expression) +2835 +2836 if isinstance(join.this, Select): +2837 join.this.replace(join.this.subquery()) +2838 +2839 if join_type: +2840 method: t.Optional[Token] +2841 side: t.Optional[Token] +2842 kind: t.Optional[Token] +2843 +2844 method, side, kind = maybe_parse(join_type, into="JOIN_TYPE", **parse_args) # type: ignore 2845 -2846 if using: -2847 join = _apply_list_builder( -2848 *ensure_list(using), -2849 instance=join, -2850 arg="using", -2851 append=append, -2852 copy=copy, -2853 **opts, -2854 ) -2855 -2856 if join_alias: -2857 join.set("this", alias_(join.this, join_alias, table=True)) -2858 -2859 return _apply_list_builder( -2860 join, -2861 instance=self, -2862 arg="joins", -2863 append=append, -2864 copy=copy, -2865 **opts, -2866 ) -2867 -2868 def where( -2869 self, -2870 *expressions: t.Optional[ExpOrStr], -2871 append: bool = True, -2872 dialect: DialectType = None, -2873 copy: bool = True, -2874 **opts, -2875 ) -> Select: -2876 """ -2877 Append to or set the WHERE expressions. +2846 if method: +2847 join.set("method", method.text) +2848 if side: +2849 join.set("side", side.text) +2850 if kind: +2851 join.set("kind", kind.text) +2852 +2853 if on: +2854 on = and_(*ensure_list(on), dialect=dialect, copy=copy, **opts) +2855 join.set("on", on) +2856 +2857 if using: +2858 join = _apply_list_builder( +2859 *ensure_list(using), +2860 instance=join, +2861 arg="using", +2862 append=append, +2863 copy=copy, +2864 **opts, +2865 ) +2866 +2867 if join_alias: +2868 join.set("this", alias_(join.this, join_alias, table=True)) +2869 +2870 return _apply_list_builder( +2871 join, +2872 instance=self, +2873 arg="joins", +2874 append=append, +2875 copy=copy, +2876 **opts, +2877 ) 2878 -2879 Example: -2880 >>> Select().select("x").from_("tbl").where("x = 'a' OR x < 'b'").sql() -2881 "SELECT x FROM tbl WHERE x = 'a' OR x < 'b'" -2882 -2883 Args: -2884 *expressions: the SQL code strings to parse. -2885 If an `Expression` instance is passed, it will be used as-is. -2886 Multiple expressions are combined with an AND operator. -2887 append: if `True`, AND the new expressions to any existing expression. -2888 Otherwise, this resets the expression. -2889 dialect: the dialect used to parse the input expressions. -2890 copy: if `False`, modify this expression instance in-place. -2891 opts: other options to use to parse the input expressions. -2892 -2893 Returns: -2894 Select: the modified expression. -2895 """ -2896 return _apply_conjunction_builder( -2897 *expressions, -2898 instance=self, -2899 arg="where", -2900 append=append, -2901 into=Where, -2902 dialect=dialect, -2903 copy=copy, -2904 **opts, -2905 ) -2906 -2907 def having( -2908 self, -2909 *expressions: t.Optional[ExpOrStr], -2910 append: bool = True, -2911 dialect: DialectType = None, -2912 copy: bool = True, -2913 **opts, -2914 ) -> Select: -2915 """ -2916 Append to or set the HAVING expressions. +2879 def where( +2880 self, +2881 *expressions: t.Optional[ExpOrStr], +2882 append: bool = True, +2883 dialect: DialectType = None, +2884 copy: bool = True, +2885 **opts, +2886 ) -> Select: +2887 """ +2888 Append to or set the WHERE expressions. +2889 +2890 Example: +2891 >>> Select().select("x").from_("tbl").where("x = 'a' OR x < 'b'").sql() +2892 "SELECT x FROM tbl WHERE x = 'a' OR x < 'b'" +2893 +2894 Args: +2895 *expressions: the SQL code strings to parse. +2896 If an `Expression` instance is passed, it will be used as-is. +2897 Multiple expressions are combined with an AND operator. +2898 append: if `True`, AND the new expressions to any existing expression. +2899 Otherwise, this resets the expression. +2900 dialect: the dialect used to parse the input expressions. +2901 copy: if `False`, modify this expression instance in-place. +2902 opts: other options to use to parse the input expressions. +2903 +2904 Returns: +2905 Select: the modified expression. +2906 """ +2907 return _apply_conjunction_builder( +2908 *expressions, +2909 instance=self, +2910 arg="where", +2911 append=append, +2912 into=Where, +2913 dialect=dialect, +2914 copy=copy, +2915 **opts, +2916 ) 2917 -2918 Example: -2919 >>> Select().select("x", "COUNT(y)").from_("tbl").group_by("x").having("COUNT(y) > 3").sql() -2920 'SELECT x, COUNT(y) FROM tbl GROUP BY x HAVING COUNT(y) > 3' -2921 -2922 Args: -2923 *expressions: the SQL code strings to parse. -2924 If an `Expression` instance is passed, it will be used as-is. -2925 Multiple expressions are combined with an AND operator. -2926 append: if `True`, AND the new expressions to any existing expression. -2927 Otherwise, this resets the expression. -2928 dialect: the dialect used to parse the input expressions. -2929 copy: if `False`, modify this expression instance in-place. -2930 opts: other options to use to parse the input expressions. -2931 -2932 Returns: -2933 The modified Select expression. -2934 """ -2935 return _apply_conjunction_builder( -2936 *expressions, -2937 instance=self, -2938 arg="having", -2939 append=append, -2940 into=Having, -2941 dialect=dialect, -2942 copy=copy, -2943 **opts, -2944 ) -2945 -2946 def window( -2947 self, -2948 *expressions: t.Optional[ExpOrStr], -2949 append: bool = True, -2950 dialect: DialectType = None, -2951 copy: bool = True, -2952 **opts, -2953 ) -> Select: -2954 return _apply_list_builder( -2955 *expressions, -2956 instance=self, -2957 arg="windows", -2958 append=append, -2959 into=Window, -2960 dialect=dialect, -2961 copy=copy, -2962 **opts, -2963 ) -2964 -2965 def qualify( -2966 self, -2967 *expressions: t.Optional[ExpOrStr], -2968 append: bool = True, -2969 dialect: DialectType = None, -2970 copy: bool = True, -2971 **opts, -2972 ) -> Select: -2973 return _apply_conjunction_builder( -2974 *expressions, -2975 instance=self, -2976 arg="qualify", -2977 append=append, -2978 into=Qualify, -2979 dialect=dialect, -2980 copy=copy, -2981 **opts, -2982 ) -2983 -2984 def distinct( -2985 self, *ons: t.Optional[ExpOrStr], distinct: bool = True, copy: bool = True -2986 ) -> Select: -2987 """ -2988 Set the OFFSET expression. -2989 -2990 Example: -2991 >>> Select().from_("tbl").select("x").distinct().sql() -2992 'SELECT DISTINCT x FROM tbl' -2993 -2994 Args: -2995 ons: the expressions to distinct on -2996 distinct: whether the Select should be distinct -2997 copy: if `False`, modify this expression instance in-place. -2998 -2999 Returns: -3000 Select: the modified expression. -3001 """ -3002 instance = _maybe_copy(self, copy) -3003 on = Tuple(expressions=[maybe_parse(on, copy=copy) for on in ons if on]) if ons else None -3004 instance.set("distinct", Distinct(on=on) if distinct else None) -3005 return instance -3006 -3007 def ctas( -3008 self, -3009 table: ExpOrStr, -3010 properties: t.Optional[t.Dict] = None, -3011 dialect: DialectType = None, -3012 copy: bool = True, -3013 **opts, -3014 ) -> Create: -3015 """ -3016 Convert this expression to a CREATE TABLE AS statement. +2918 def having( +2919 self, +2920 *expressions: t.Optional[ExpOrStr], +2921 append: bool = True, +2922 dialect: DialectType = None, +2923 copy: bool = True, +2924 **opts, +2925 ) -> Select: +2926 """ +2927 Append to or set the HAVING expressions. +2928 +2929 Example: +2930 >>> Select().select("x", "COUNT(y)").from_("tbl").group_by("x").having("COUNT(y) > 3").sql() +2931 'SELECT x, COUNT(y) FROM tbl GROUP BY x HAVING COUNT(y) > 3' +2932 +2933 Args: +2934 *expressions: the SQL code strings to parse. +2935 If an `Expression` instance is passed, it will be used as-is. +2936 Multiple expressions are combined with an AND operator. +2937 append: if `True`, AND the new expressions to any existing expression. +2938 Otherwise, this resets the expression. +2939 dialect: the dialect used to parse the input expressions. +2940 copy: if `False`, modify this expression instance in-place. +2941 opts: other options to use to parse the input expressions. +2942 +2943 Returns: +2944 The modified Select expression. +2945 """ +2946 return _apply_conjunction_builder( +2947 *expressions, +2948 instance=self, +2949 arg="having", +2950 append=append, +2951 into=Having, +2952 dialect=dialect, +2953 copy=copy, +2954 **opts, +2955 ) +2956 +2957 def window( +2958 self, +2959 *expressions: t.Optional[ExpOrStr], +2960 append: bool = True, +2961 dialect: DialectType = None, +2962 copy: bool = True, +2963 **opts, +2964 ) -> Select: +2965 return _apply_list_builder( +2966 *expressions, +2967 instance=self, +2968 arg="windows", +2969 append=append, +2970 into=Window, +2971 dialect=dialect, +2972 copy=copy, +2973 **opts, +2974 ) +2975 +2976 def qualify( +2977 self, +2978 *expressions: t.Optional[ExpOrStr], +2979 append: bool = True, +2980 dialect: DialectType = None, +2981 copy: bool = True, +2982 **opts, +2983 ) -> Select: +2984 return _apply_conjunction_builder( +2985 *expressions, +2986 instance=self, +2987 arg="qualify", +2988 append=append, +2989 into=Qualify, +2990 dialect=dialect, +2991 copy=copy, +2992 **opts, +2993 ) +2994 +2995 def distinct( +2996 self, *ons: t.Optional[ExpOrStr], distinct: bool = True, copy: bool = True +2997 ) -> Select: +2998 """ +2999 Set the OFFSET expression. +3000 +3001 Example: +3002 >>> Select().from_("tbl").select("x").distinct().sql() +3003 'SELECT DISTINCT x FROM tbl' +3004 +3005 Args: +3006 ons: the expressions to distinct on +3007 distinct: whether the Select should be distinct +3008 copy: if `False`, modify this expression instance in-place. +3009 +3010 Returns: +3011 Select: the modified expression. +3012 """ +3013 instance = _maybe_copy(self, copy) +3014 on = Tuple(expressions=[maybe_parse(on, copy=copy) for on in ons if on]) if ons else None +3015 instance.set("distinct", Distinct(on=on) if distinct else None) +3016 return instance 3017 -3018 Example: -3019 >>> Select().select("*").from_("tbl").ctas("x").sql() -3020 'CREATE TABLE x AS SELECT * FROM tbl' -3021 -3022 Args: -3023 table: the SQL code string to parse as the table name. -3024 If another `Expression` instance is passed, it will be used as-is. -3025 properties: an optional mapping of table properties -3026 dialect: the dialect used to parse the input table. -3027 copy: if `False`, modify this expression instance in-place. -3028 opts: other options to use to parse the input table. -3029 -3030 Returns: -3031 The new Create expression. -3032 """ -3033 instance = _maybe_copy(self, copy) -3034 table_expression = maybe_parse( -3035 table, -3036 into=Table, -3037 dialect=dialect, -3038 **opts, -3039 ) -3040 properties_expression = None -3041 if properties: -3042 properties_expression = Properties.from_dict(properties) -3043 -3044 return Create( -3045 this=table_expression, -3046 kind="table", -3047 expression=instance, -3048 properties=properties_expression, -3049 ) -3050 -3051 def lock(self, update: bool = True, copy: bool = True) -> Select: -3052 """ -3053 Set the locking read mode for this expression. +3018 def ctas( +3019 self, +3020 table: ExpOrStr, +3021 properties: t.Optional[t.Dict] = None, +3022 dialect: DialectType = None, +3023 copy: bool = True, +3024 **opts, +3025 ) -> Create: +3026 """ +3027 Convert this expression to a CREATE TABLE AS statement. +3028 +3029 Example: +3030 >>> Select().select("*").from_("tbl").ctas("x").sql() +3031 'CREATE TABLE x AS SELECT * FROM tbl' +3032 +3033 Args: +3034 table: the SQL code string to parse as the table name. +3035 If another `Expression` instance is passed, it will be used as-is. +3036 properties: an optional mapping of table properties +3037 dialect: the dialect used to parse the input table. +3038 copy: if `False`, modify this expression instance in-place. +3039 opts: other options to use to parse the input table. +3040 +3041 Returns: +3042 The new Create expression. +3043 """ +3044 instance = _maybe_copy(self, copy) +3045 table_expression = maybe_parse( +3046 table, +3047 into=Table, +3048 dialect=dialect, +3049 **opts, +3050 ) +3051 properties_expression = None +3052 if properties: +3053 properties_expression = Properties.from_dict(properties) 3054 -3055 Examples: -3056 >>> Select().select("x").from_("tbl").where("x = 'a'").lock().sql("mysql") -3057 "SELECT x FROM tbl WHERE x = 'a' FOR UPDATE" -3058 -3059 >>> Select().select("x").from_("tbl").where("x = 'a'").lock(update=False).sql("mysql") -3060 "SELECT x FROM tbl WHERE x = 'a' FOR SHARE" +3055 return Create( +3056 this=table_expression, +3057 kind="table", +3058 expression=instance, +3059 properties=properties_expression, +3060 ) 3061 -3062 Args: -3063 update: if `True`, the locking type will be `FOR UPDATE`, else it will be `FOR SHARE`. -3064 copy: if `False`, modify this expression instance in-place. +3062 def lock(self, update: bool = True, copy: bool = True) -> Select: +3063 """ +3064 Set the locking read mode for this expression. 3065 -3066 Returns: -3067 The modified expression. -3068 """ +3066 Examples: +3067 >>> Select().select("x").from_("tbl").where("x = 'a'").lock().sql("mysql") +3068 "SELECT x FROM tbl WHERE x = 'a' FOR UPDATE" 3069 -3070 inst = _maybe_copy(self, copy) -3071 inst.set("locks", [Lock(update=update)]) +3070 >>> Select().select("x").from_("tbl").where("x = 'a'").lock(update=False).sql("mysql") +3071 "SELECT x FROM tbl WHERE x = 'a' FOR SHARE" 3072 -3073 return inst -3074 -3075 @property -3076 def named_selects(self) -> t.List[str]: -3077 return [e.output_name for e in self.expressions if e.alias_or_name] -3078 -3079 @property -3080 def is_star(self) -> bool: -3081 return any(expression.is_star for expression in self.expressions) +3073 Args: +3074 update: if `True`, the locking type will be `FOR UPDATE`, else it will be `FOR SHARE`. +3075 copy: if `False`, modify this expression instance in-place. +3076 +3077 Returns: +3078 The modified expression. +3079 """ +3080 inst = _maybe_copy(self, copy) +3081 inst.set("locks", [Lock(update=update)]) 3082 -3083 @property -3084 def selects(self) -> t.List[Expression]: -3085 return self.expressions -3086 -3087 -3088class Subquery(DerivedTable, Unionable): -3089 arg_types = { -3090 "this": True, -3091 "alias": False, -3092 "with": False, -3093 **QUERY_MODIFIERS, -3094 } -3095 -3096 def unnest(self): -3097 """ -3098 Returns the first non subquery. -3099 """ -3100 expression = self -3101 while isinstance(expression, Subquery): -3102 expression = expression.this -3103 return expression -3104 -3105 @property -3106 def is_star(self) -> bool: -3107 return self.this.is_star +3083 return inst +3084 +3085 def hint(self, *hints: ExpOrStr, dialect: DialectType = None, copy: bool = True) -> Select: +3086 """ +3087 Set hints for this expression. +3088 +3089 Examples: +3090 >>> Select().select("x").from_("tbl").hint("BROADCAST(y)").sql(dialect="spark") +3091 'SELECT /*+ BROADCAST(y) */ x FROM tbl' +3092 +3093 Args: +3094 hints: The SQL code strings to parse as the hints. +3095 If an `Expression` instance is passed, it will be used as-is. +3096 dialect: The dialect used to parse the hints. +3097 copy: If `False`, modify this expression instance in-place. +3098 +3099 Returns: +3100 The modified expression. +3101 """ +3102 inst = _maybe_copy(self, copy) +3103 inst.set( +3104 "hint", Hint(expressions=[maybe_parse(h, copy=copy, dialect=dialect) for h in hints]) +3105 ) +3106 +3107 return inst 3108 3109 @property -3110 def output_name(self) -> str: -3111 return self.alias +3110 def named_selects(self) -> t.List[str]: +3111 return [e.output_name for e in self.expressions if e.alias_or_name] 3112 -3113 -3114class TableSample(Expression): -3115 arg_types = { -3116 "this": False, -3117 "method": False, -3118 "bucket_numerator": False, -3119 "bucket_denominator": False, -3120 "bucket_field": False, -3121 "percent": False, -3122 "rows": False, -3123 "size": False, -3124 "seed": False, -3125 "kind": False, -3126 } -3127 -3128 -3129class Tag(Expression): -3130 """Tags are used for generating arbitrary sql like SELECT <span>x</span>.""" -3131 -3132 arg_types = { -3133 "this": False, -3134 "prefix": False, -3135 "postfix": False, -3136 } -3137 +3113 @property +3114 def is_star(self) -> bool: +3115 return any(expression.is_star for expression in self.expressions) +3116 +3117 @property +3118 def selects(self) -> t.List[Expression]: +3119 return self.expressions +3120 +3121 +3122class Subquery(DerivedTable, Unionable): +3123 arg_types = { +3124 "this": True, +3125 "alias": False, +3126 "with": False, +3127 **QUERY_MODIFIERS, +3128 } +3129 +3130 def unnest(self): +3131 """ +3132 Returns the first non subquery. +3133 """ +3134 expression = self +3135 while isinstance(expression, Subquery): +3136 expression = expression.this +3137 return expression 3138 -3139# Represents both the standard SQL PIVOT operator and DuckDB's "simplified" PIVOT syntax -3140# https://duckdb.org/docs/sql/statements/pivot -3141class Pivot(Expression): -3142 arg_types = { -3143 "this": False, -3144 "alias": False, -3145 "expressions": True, -3146 "field": False, -3147 "unpivot": False, -3148 "using": False, -3149 "group": False, -3150 "columns": False, -3151 } -3152 -3153 -3154class Window(Expression): -3155 arg_types = { -3156 "this": True, -3157 "partition_by": False, -3158 "order": False, -3159 "spec": False, -3160 "alias": False, -3161 "over": False, -3162 "first": False, -3163 } -3164 +3139 @property +3140 def is_star(self) -> bool: +3141 return self.this.is_star +3142 +3143 @property +3144 def output_name(self) -> str: +3145 return self.alias +3146 +3147 +3148class TableSample(Expression): +3149 arg_types = { +3150 "this": False, +3151 "method": False, +3152 "bucket_numerator": False, +3153 "bucket_denominator": False, +3154 "bucket_field": False, +3155 "percent": False, +3156 "rows": False, +3157 "size": False, +3158 "seed": False, +3159 "kind": False, +3160 } +3161 +3162 +3163class Tag(Expression): +3164 """Tags are used for generating arbitrary sql like SELECT <span>x</span>.""" 3165 -3166class WindowSpec(Expression): -3167 arg_types = { -3168 "kind": False, -3169 "start": False, -3170 "start_side": False, -3171 "end": False, -3172 "end_side": False, -3173 } -3174 -3175 -3176class Where(Expression): -3177 pass -3178 -3179 -3180class Star(Expression): -3181 arg_types = {"except": False, "replace": False} -3182 -3183 @property -3184 def name(self) -> str: -3185 return "*" +3166 arg_types = { +3167 "this": False, +3168 "prefix": False, +3169 "postfix": False, +3170 } +3171 +3172 +3173# Represents both the standard SQL PIVOT operator and DuckDB's "simplified" PIVOT syntax +3174# https://duckdb.org/docs/sql/statements/pivot +3175class Pivot(Expression): +3176 arg_types = { +3177 "this": False, +3178 "alias": False, +3179 "expressions": True, +3180 "field": False, +3181 "unpivot": False, +3182 "using": False, +3183 "group": False, +3184 "columns": False, +3185 } 3186 -3187 @property -3188 def output_name(self) -> str: -3189 return self.name -3190 -3191 -3192class Parameter(Expression): -3193 arg_types = {"this": True, "wrapped": False} -3194 -3195 -3196class SessionParameter(Expression): -3197 arg_types = {"this": True, "kind": False} +3187 +3188class Window(Expression): +3189 arg_types = { +3190 "this": True, +3191 "partition_by": False, +3192 "order": False, +3193 "spec": False, +3194 "alias": False, +3195 "over": False, +3196 "first": False, +3197 } 3198 3199 -3200class Placeholder(Expression): -3201 arg_types = {"this": False, "kind": False} -3202 -3203 -3204class Null(Condition): -3205 arg_types: t.Dict[str, t.Any] = {} -3206 -3207 @property -3208 def name(self) -> str: -3209 return "NULL" -3210 -3211 -3212class Boolean(Condition): -3213 pass -3214 -3215 -3216class DataTypeSize(Expression): -3217 arg_types = {"this": True, "expression": False} -3218 -3219 -3220class DataType(Expression): -3221 arg_types = { -3222 "this": True, -3223 "expressions": False, -3224 "nested": False, -3225 "values": False, -3226 "prefix": False, -3227 } +3200class WindowSpec(Expression): +3201 arg_types = { +3202 "kind": False, +3203 "start": False, +3204 "start_side": False, +3205 "end": False, +3206 "end_side": False, +3207 } +3208 +3209 +3210class Where(Expression): +3211 pass +3212 +3213 +3214class Star(Expression): +3215 arg_types = {"except": False, "replace": False} +3216 +3217 @property +3218 def name(self) -> str: +3219 return "*" +3220 +3221 @property +3222 def output_name(self) -> str: +3223 return self.name +3224 +3225 +3226class Parameter(Expression): +3227 arg_types = {"this": True, "wrapped": False} 3228 -3229 class Type(AutoName): -3230 ARRAY = auto() -3231 BIGDECIMAL = auto() -3232 BIGINT = auto() -3233 BIGSERIAL = auto() -3234 BINARY = auto() -3235 BIT = auto() -3236 BOOLEAN = auto() -3237 CHAR = auto() -3238 DATE = auto() -3239 DATETIME = auto() -3240 DATETIME64 = auto() -3241 INT4RANGE = auto() -3242 INT4MULTIRANGE = auto() -3243 INT8RANGE = auto() -3244 INT8MULTIRANGE = auto() -3245 NUMRANGE = auto() -3246 NUMMULTIRANGE = auto() -3247 TSRANGE = auto() -3248 TSMULTIRANGE = auto() -3249 TSTZRANGE = auto() -3250 TSTZMULTIRANGE = auto() -3251 DATERANGE = auto() -3252 DATEMULTIRANGE = auto() -3253 DECIMAL = auto() -3254 DOUBLE = auto() -3255 FLOAT = auto() -3256 GEOGRAPHY = auto() -3257 GEOMETRY = auto() -3258 HLLSKETCH = auto() -3259 HSTORE = auto() -3260 IMAGE = auto() -3261 INET = auto() -3262 INT = auto() -3263 INT128 = auto() -3264 INT256 = auto() -3265 INTERVAL = auto() -3266 JSON = auto() -3267 JSONB = auto() -3268 LONGBLOB = auto() -3269 LONGTEXT = auto() -3270 MAP = auto() -3271 MEDIUMBLOB = auto() -3272 MEDIUMTEXT = auto() -3273 MONEY = auto() -3274 NCHAR = auto() -3275 NULL = auto() -3276 NULLABLE = auto() -3277 NVARCHAR = auto() -3278 OBJECT = auto() -3279 ROWVERSION = auto() -3280 SERIAL = auto() -3281 SMALLINT = auto() -3282 SMALLMONEY = auto() -3283 SMALLSERIAL = auto() -3284 STRUCT = auto() -3285 SUPER = auto() -3286 TEXT = auto() -3287 TIME = auto() -3288 TIMESTAMP = auto() -3289 TIMESTAMPTZ = auto() -3290 TIMESTAMPLTZ = auto() -3291 TINYINT = auto() -3292 UBIGINT = auto() -3293 UINT = auto() -3294 USMALLINT = auto() -3295 UTINYINT = auto() -3296 UNKNOWN = auto() # Sentinel value, useful for type annotation -3297 UINT128 = auto() -3298 UINT256 = auto() -3299 UNIQUEIDENTIFIER = auto() -3300 UUID = auto() -3301 VARBINARY = auto() -3302 VARCHAR = auto() -3303 VARIANT = auto() -3304 XML = auto() -3305 -3306 TEXT_TYPES = { -3307 Type.CHAR, -3308 Type.NCHAR, -3309 Type.VARCHAR, -3310 Type.NVARCHAR, -3311 Type.TEXT, -3312 } -3313 -3314 INTEGER_TYPES = { -3315 Type.INT, -3316 Type.TINYINT, -3317 Type.SMALLINT, -3318 Type.BIGINT, -3319 Type.INT128, -3320 Type.INT256, -3321 } -3322 -3323 FLOAT_TYPES = { -3324 Type.FLOAT, -3325 Type.DOUBLE, -3326 } -3327 -3328 NUMERIC_TYPES = {*INTEGER_TYPES, *FLOAT_TYPES} -3329 -3330 TEMPORAL_TYPES = { -3331 Type.TIMESTAMP, -3332 Type.TIMESTAMPTZ, -3333 Type.TIMESTAMPLTZ, -3334 Type.DATE, -3335 Type.DATETIME, -3336 Type.DATETIME64, -3337 } -3338 -3339 @classmethod -3340 def build( -3341 cls, dtype: str | DataType | DataType.Type, dialect: DialectType = None, **kwargs -3342 ) -> DataType: -3343 from sqlglot import parse_one -3344 -3345 if isinstance(dtype, str): -3346 if dtype.upper() in cls.Type.__members__: -3347 data_type_exp: t.Optional[Expression] = DataType(this=DataType.Type[dtype.upper()]) -3348 else: -3349 data_type_exp = parse_one(dtype, read=dialect, into=DataType) -3350 -3351 if data_type_exp is None: -3352 raise ValueError(f"Unparsable data type value: {dtype}") -3353 elif isinstance(dtype, DataType.Type): -3354 data_type_exp = DataType(this=dtype) -3355 elif isinstance(dtype, DataType): -3356 return dtype -3357 else: -3358 raise ValueError(f"Invalid data type: {type(dtype)}. Expected str or DataType.Type") -3359 -3360 return DataType(**{**data_type_exp.args, **kwargs}) -3361 -3362 def is_type(self, *dtypes: str | DataType | DataType.Type) -> bool: -3363 return any(self.this == DataType.build(dtype).this for dtype in dtypes) -3364 +3229 +3230class SessionParameter(Expression): +3231 arg_types = {"this": True, "kind": False} +3232 +3233 +3234class Placeholder(Expression): +3235 arg_types = {"this": False, "kind": False} +3236 +3237 +3238class Null(Condition): +3239 arg_types: t.Dict[str, t.Any] = {} +3240 +3241 @property +3242 def name(self) -> str: +3243 return "NULL" +3244 +3245 +3246class Boolean(Condition): +3247 pass +3248 +3249 +3250class DataTypeSize(Expression): +3251 arg_types = {"this": True, "expression": False} +3252 +3253 +3254class DataType(Expression): +3255 arg_types = { +3256 "this": True, +3257 "expressions": False, +3258 "nested": False, +3259 "values": False, +3260 "prefix": False, +3261 } +3262 +3263 class Type(AutoName): +3264 ARRAY = auto() +3265 BIGDECIMAL = auto() +3266 BIGINT = auto() +3267 BIGSERIAL = auto() +3268 BINARY = auto() +3269 BIT = auto() +3270 BOOLEAN = auto() +3271 CHAR = auto() +3272 DATE = auto() +3273 DATETIME = auto() +3274 DATETIME64 = auto() +3275 ENUM = auto() +3276 INT4RANGE = auto() +3277 INT4MULTIRANGE = auto() +3278 INT8RANGE = auto() +3279 INT8MULTIRANGE = auto() +3280 NUMRANGE = auto() +3281 NUMMULTIRANGE = auto() +3282 TSRANGE = auto() +3283 TSMULTIRANGE = auto() +3284 TSTZRANGE = auto() +3285 TSTZMULTIRANGE = auto() +3286 DATERANGE = auto() +3287 DATEMULTIRANGE = auto() +3288 DECIMAL = auto() +3289 DOUBLE = auto() +3290 FLOAT = auto() +3291 GEOGRAPHY = auto() +3292 GEOMETRY = auto() +3293 HLLSKETCH = auto() +3294 HSTORE = auto() +3295 IMAGE = auto() +3296 INET = auto() +3297 INT = auto() +3298 INT128 = auto() +3299 INT256 = auto() +3300 INTERVAL = auto() +3301 JSON = auto() +3302 JSONB = auto() +3303 LONGBLOB = auto() +3304 LONGTEXT = auto() +3305 MAP = auto() +3306 MEDIUMBLOB = auto() +3307 MEDIUMTEXT = auto() +3308 MONEY = auto() +3309 NCHAR = auto() +3310 NULL = auto() +3311 NULLABLE = auto() +3312 NVARCHAR = auto() +3313 OBJECT = auto() +3314 ROWVERSION = auto() +3315 SERIAL = auto() +3316 SET = auto() +3317 SMALLINT = auto() +3318 SMALLMONEY = auto() +3319 SMALLSERIAL = auto() +3320 STRUCT = auto() +3321 SUPER = auto() +3322 TEXT = auto() +3323 TIME = auto() +3324 TIMESTAMP = auto() +3325 TIMESTAMPTZ = auto() +3326 TIMESTAMPLTZ = auto() +3327 TINYINT = auto() +3328 UBIGINT = auto() +3329 UINT = auto() +3330 USMALLINT = auto() +3331 UTINYINT = auto() +3332 UNKNOWN = auto() # Sentinel value, useful for type annotation +3333 UINT128 = auto() +3334 UINT256 = auto() +3335 UNIQUEIDENTIFIER = auto() +3336 UUID = auto() +3337 VARBINARY = auto() +3338 VARCHAR = auto() +3339 VARIANT = auto() +3340 XML = auto() +3341 +3342 TEXT_TYPES = { +3343 Type.CHAR, +3344 Type.NCHAR, +3345 Type.VARCHAR, +3346 Type.NVARCHAR, +3347 Type.TEXT, +3348 } +3349 +3350 INTEGER_TYPES = { +3351 Type.INT, +3352 Type.TINYINT, +3353 Type.SMALLINT, +3354 Type.BIGINT, +3355 Type.INT128, +3356 Type.INT256, +3357 } +3358 +3359 FLOAT_TYPES = { +3360 Type.FLOAT, +3361 Type.DOUBLE, +3362 } +3363 +3364 NUMERIC_TYPES = {*INTEGER_TYPES, *FLOAT_TYPES} 3365 -3366# https://www.postgresql.org/docs/15/datatype-pseudo.html -3367class PseudoType(Expression): -3368 pass -3369 -3370 -3371# WHERE x <OP> EXISTS|ALL|ANY|SOME(SELECT ...) -3372class SubqueryPredicate(Predicate): -3373 pass -3374 +3366 TEMPORAL_TYPES = { +3367 Type.TIME, +3368 Type.TIMESTAMP, +3369 Type.TIMESTAMPTZ, +3370 Type.TIMESTAMPLTZ, +3371 Type.DATE, +3372 Type.DATETIME, +3373 Type.DATETIME64, +3374 } 3375 -3376class All(SubqueryPredicate): -3377 pass -3378 -3379 -3380class Any(SubqueryPredicate): -3381 pass -3382 -3383 -3384class Exists(SubqueryPredicate): -3385 pass -3386 +3376 @classmethod +3377 def build( +3378 cls, dtype: str | DataType | DataType.Type, dialect: DialectType = None, **kwargs +3379 ) -> DataType: +3380 from sqlglot import parse_one +3381 +3382 if isinstance(dtype, str): +3383 if dtype.upper() == "UNKNOWN": +3384 data_type_exp: t.Optional[Expression] = DataType(this=DataType.Type.UNKNOWN) +3385 else: +3386 data_type_exp = parse_one(dtype, read=dialect, into=DataType) 3387 -3388# Commands to interact with the databases or engines. For most of the command -3389# expressions we parse whatever comes after the command's name as a string. -3390class Command(Expression): -3391 arg_types = {"this": True, "expression": False} -3392 -3393 -3394class Transaction(Expression): -3395 arg_types = {"this": False, "modes": False} +3388 if data_type_exp is None: +3389 raise ValueError(f"Unparsable data type value: {dtype}") +3390 elif isinstance(dtype, DataType.Type): +3391 data_type_exp = DataType(this=dtype) +3392 elif isinstance(dtype, DataType): +3393 return dtype +3394 else: +3395 raise ValueError(f"Invalid data type: {type(dtype)}. Expected str or DataType.Type") 3396 -3397 -3398class Commit(Expression): -3399 arg_types = {"chain": False} -3400 +3397 return DataType(**{**data_type_exp.args, **kwargs}) +3398 +3399 def is_type(self, *dtypes: str | DataType | DataType.Type) -> bool: +3400 return any(self.this == DataType.build(dtype).this for dtype in dtypes) 3401 -3402class Rollback(Expression): -3403 arg_types = {"savepoint": False} -3404 -3405 -3406class AlterTable(Expression): -3407 arg_types = {"this": True, "actions": True, "exists": False} -3408 -3409 -3410class AddConstraint(Expression): -3411 arg_types = {"this": False, "expression": False, "enforced": False} +3402 +3403# https://www.postgresql.org/docs/15/datatype-pseudo.html +3404class PseudoType(Expression): +3405 pass +3406 +3407 +3408# WHERE x <OP> EXISTS|ALL|ANY|SOME(SELECT ...) +3409class SubqueryPredicate(Predicate): +3410 pass +3411 3412 -3413 -3414class DropPartition(Expression): -3415 arg_types = {"expressions": True, "exists": False} +3413class All(SubqueryPredicate): +3414 pass +3415 3416 -3417 -3418# Binary expressions like (ADD a b) -3419class Binary(Condition): -3420 arg_types = {"this": True, "expression": True} -3421 -3422 @property -3423 def left(self): -3424 return self.this -3425 -3426 @property -3427 def right(self): -3428 return self.expression +3417class Any(SubqueryPredicate): +3418 pass +3419 +3420 +3421class Exists(SubqueryPredicate): +3422 pass +3423 +3424 +3425# Commands to interact with the databases or engines. For most of the command +3426# expressions we parse whatever comes after the command's name as a string. +3427class Command(Expression): +3428 arg_types = {"this": True, "expression": False} 3429 3430 -3431class Add(Binary): -3432 pass +3431class Transaction(Expression): +3432 arg_types = {"this": False, "modes": False} 3433 3434 -3435class Connector(Binary): -3436 pass +3435class Commit(Expression): +3436 arg_types = {"chain": False} 3437 3438 -3439class And(Connector): -3440 pass +3439class Rollback(Expression): +3440 arg_types = {"savepoint": False} 3441 3442 -3443class Or(Connector): -3444 pass +3443class AlterTable(Expression): +3444 arg_types = {"this": True, "actions": True, "exists": False} 3445 3446 -3447class BitwiseAnd(Binary): -3448 pass +3447class AddConstraint(Expression): +3448 arg_types = {"this": False, "expression": False, "enforced": False} 3449 3450 -3451class BitwiseLeftShift(Binary): -3452 pass +3451class DropPartition(Expression): +3452 arg_types = {"expressions": True, "exists": False} 3453 3454 -3455class BitwiseOr(Binary): -3456 pass -3457 +3455# Binary expressions like (ADD a b) +3456class Binary(Condition): +3457 arg_types = {"this": True, "expression": True} 3458 -3459class BitwiseRightShift(Binary): -3460 pass -3461 +3459 @property +3460 def left(self): +3461 return self.this 3462 -3463class BitwiseXor(Binary): -3464 pass -3465 +3463 @property +3464 def right(self): +3465 return self.expression 3466 -3467class Div(Binary): -3468 pass -3469 +3467 +3468class Add(Binary): +3469 pass 3470 -3471class Overlaps(Binary): -3472 pass -3473 +3471 +3472class Connector(Binary): +3473 pass 3474 -3475class Dot(Binary): -3476 @property -3477 def name(self) -> str: -3478 return self.expression.name +3475 +3476class And(Connector): +3477 pass +3478 3479 -3480 @classmethod -3481 def build(self, expressions: t.Sequence[Expression]) -> Dot: -3482 """Build a Dot object with a sequence of expressions.""" -3483 if len(expressions) < 2: -3484 raise ValueError(f"Dot requires >= 2 expressions.") -3485 -3486 a, b, *expressions = expressions -3487 dot = Dot(this=a, expression=b) -3488 -3489 for expression in expressions: -3490 dot = Dot(this=dot, expression=expression) +3480class Or(Connector): +3481 pass +3482 +3483 +3484class BitwiseAnd(Binary): +3485 pass +3486 +3487 +3488class BitwiseLeftShift(Binary): +3489 pass +3490 3491 -3492 return dot -3493 +3492class BitwiseOr(Binary): +3493 pass 3494 -3495class DPipe(Binary): -3496 pass -3497 +3495 +3496class BitwiseRightShift(Binary): +3497 pass 3498 -3499class EQ(Binary, Predicate): -3500 pass -3501 +3499 +3500class BitwiseXor(Binary): +3501 pass 3502 -3503class NullSafeEQ(Binary, Predicate): -3504 pass -3505 +3503 +3504class Div(Binary): +3505 pass 3506 -3507class NullSafeNEQ(Binary, Predicate): -3508 pass -3509 +3507 +3508class Overlaps(Binary): +3509 pass 3510 -3511class Distance(Binary): -3512 pass -3513 -3514 -3515class Escape(Binary): -3516 pass -3517 -3518 -3519class Glob(Binary, Predicate): -3520 pass -3521 -3522 -3523class GT(Binary, Predicate): -3524 pass -3525 +3511 +3512class Dot(Binary): +3513 @property +3514 def name(self) -> str: +3515 return self.expression.name +3516 +3517 @property +3518 def output_name(self) -> str: +3519 return self.name +3520 +3521 @classmethod +3522 def build(self, expressions: t.Sequence[Expression]) -> Dot: +3523 """Build a Dot object with a sequence of expressions.""" +3524 if len(expressions) < 2: +3525 raise ValueError(f"Dot requires >= 2 expressions.") 3526 -3527class GTE(Binary, Predicate): -3528 pass +3527 a, b, *expressions = expressions +3528 dot = Dot(this=a, expression=b) 3529 -3530 -3531class ILike(Binary, Predicate): -3532 pass -3533 +3530 for expression in expressions: +3531 dot = Dot(this=dot, expression=expression) +3532 +3533 return dot 3534 -3535class ILikeAny(Binary, Predicate): -3536 pass -3537 +3535 +3536class DPipe(Binary): +3537 pass 3538 -3539class IntDiv(Binary): -3540 pass -3541 +3539 +3540class SafeDPipe(DPipe): +3541 pass 3542 -3543class Is(Binary, Predicate): -3544 pass -3545 +3543 +3544class EQ(Binary, Predicate): +3545 pass 3546 -3547class Kwarg(Binary): -3548 """Kwarg in special functions like func(kwarg => y).""" -3549 +3547 +3548class NullSafeEQ(Binary, Predicate): +3549 pass 3550 -3551class Like(Binary, Predicate): -3552 pass -3553 +3551 +3552class NullSafeNEQ(Binary, Predicate): +3553 pass 3554 -3555class LikeAny(Binary, Predicate): -3556 pass -3557 +3555 +3556class Distance(Binary): +3557 pass 3558 -3559class LT(Binary, Predicate): -3560 pass -3561 +3559 +3560class Escape(Binary): +3561 pass 3562 -3563class LTE(Binary, Predicate): -3564 pass -3565 +3563 +3564class Glob(Binary, Predicate): +3565 pass 3566 -3567class Mod(Binary): -3568 pass -3569 +3567 +3568class GT(Binary, Predicate): +3569 pass 3570 -3571class Mul(Binary): -3572 pass -3573 +3571 +3572class GTE(Binary, Predicate): +3573 pass 3574 -3575class NEQ(Binary, Predicate): -3576 pass -3577 +3575 +3576class ILike(Binary, Predicate): +3577 pass 3578 -3579class SimilarTo(Binary, Predicate): -3580 pass -3581 +3579 +3580class ILikeAny(Binary, Predicate): +3581 pass 3582 -3583class Slice(Binary): -3584 arg_types = {"this": False, "expression": False} -3585 +3583 +3584class IntDiv(Binary): +3585 pass 3586 -3587class Sub(Binary): -3588 pass -3589 +3587 +3588class Is(Binary, Predicate): +3589 pass 3590 -3591class ArrayOverlaps(Binary): -3592 pass -3593 +3591 +3592class Kwarg(Binary): +3593 """Kwarg in special functions like func(kwarg => y).""" 3594 -3595# Unary Expressions -3596# (NOT a) -3597class Unary(Condition): -3598 pass +3595 +3596class Like(Binary, Predicate): +3597 pass +3598 3599 -3600 -3601class BitwiseNot(Unary): -3602 pass +3600class LikeAny(Binary, Predicate): +3601 pass +3602 3603 -3604 -3605class Not(Unary): -3606 pass +3604class LT(Binary, Predicate): +3605 pass +3606 3607 -3608 -3609class Paren(Unary): -3610 arg_types = {"this": True, "with": False} +3608class LTE(Binary, Predicate): +3609 pass +3610 3611 -3612 -3613class Neg(Unary): -3614 pass +3612class Mod(Binary): +3613 pass +3614 3615 -3616 -3617class Alias(Expression): -3618 arg_types = {"this": True, "alias": False} +3616class Mul(Binary): +3617 pass +3618 3619 -3620 @property -3621 def output_name(self) -> str: -3622 return self.alias +3620class NEQ(Binary, Predicate): +3621 pass +3622 3623 -3624 -3625class Aliases(Expression): -3626 arg_types = {"this": True, "expressions": True} +3624class SimilarTo(Binary, Predicate): +3625 pass +3626 3627 -3628 @property -3629 def aliases(self): -3630 return self.expressions +3628class Slice(Binary): +3629 arg_types = {"this": False, "expression": False} +3630 3631 -3632 -3633class AtTimeZone(Expression): -3634 arg_types = {"this": True, "zone": True} +3632class Sub(Binary): +3633 pass +3634 3635 -3636 -3637class Between(Predicate): -3638 arg_types = {"this": True, "low": True, "high": True} +3636class ArrayOverlaps(Binary): +3637 pass +3638 3639 -3640 -3641class Bracket(Condition): -3642 arg_types = {"this": True, "expressions": True} -3643 +3640# Unary Expressions +3641# (NOT a) +3642class Unary(Condition): +3643 pass 3644 -3645class Distinct(Expression): -3646 arg_types = {"expressions": False, "on": False} -3647 +3645 +3646class BitwiseNot(Unary): +3647 pass 3648 -3649class In(Predicate): -3650 arg_types = { -3651 "this": True, -3652 "expressions": False, -3653 "query": False, -3654 "unnest": False, -3655 "field": False, -3656 "is_global": False, -3657 } -3658 -3659 -3660class TimeUnit(Expression): -3661 """Automatically converts unit arg into a var.""" -3662 -3663 arg_types = {"unit": False} +3649 +3650class Not(Unary): +3651 pass +3652 +3653 +3654class Paren(Unary): +3655 arg_types = {"this": True, "with": False} +3656 +3657 @property +3658 def output_name(self) -> str: +3659 return self.this.name +3660 +3661 +3662class Neg(Unary): +3663 pass 3664 -3665 def __init__(self, **args): -3666 unit = args.get("unit") -3667 if isinstance(unit, (Column, Literal)): -3668 args["unit"] = Var(this=unit.name) -3669 elif isinstance(unit, Week): -3670 unit.set("this", Var(this=unit.this.name)) -3671 -3672 super().__init__(**args) +3665 +3666class Alias(Expression): +3667 arg_types = {"this": True, "alias": False} +3668 +3669 @property +3670 def output_name(self) -> str: +3671 return self.alias +3672 3673 -3674 -3675class Interval(TimeUnit): -3676 arg_types = {"this": False, "unit": False} -3677 -3678 @property -3679 def unit(self) -> t.Optional[Var]: -3680 return self.args.get("unit") +3674class Aliases(Expression): +3675 arg_types = {"this": True, "expressions": True} +3676 +3677 @property +3678 def aliases(self): +3679 return self.expressions +3680 3681 -3682 -3683class IgnoreNulls(Expression): -3684 pass +3682class AtTimeZone(Expression): +3683 arg_types = {"this": True, "zone": True} +3684 3685 -3686 -3687class RespectNulls(Expression): -3688 pass +3686class Between(Predicate): +3687 arg_types = {"this": True, "low": True, "high": True} +3688 3689 -3690 -3691# Functions -3692class Func(Condition): -3693 """ -3694 The base class for all function expressions. -3695 -3696 Attributes: -3697 is_var_len_args (bool): if set to True the last argument defined in arg_types will be -3698 treated as a variable length argument and the argument's value will be stored as a list. -3699 _sql_names (list): determines the SQL name (1st item in the list) and aliases (subsequent items) -3700 for this function expression. These values are used to map this node to a name during parsing -3701 as well as to provide the function's name during SQL string generation. By default the SQL -3702 name is set to the expression's class name transformed to snake case. -3703 """ -3704 -3705 is_var_len_args = False -3706 -3707 @classmethod -3708 def from_arg_list(cls, args): -3709 if cls.is_var_len_args: -3710 all_arg_keys = list(cls.arg_types) -3711 # If this function supports variable length argument treat the last argument as such. -3712 non_var_len_arg_keys = all_arg_keys[:-1] if cls.is_var_len_args else all_arg_keys -3713 num_non_var = len(non_var_len_arg_keys) -3714 -3715 args_dict = {arg_key: arg for arg, arg_key in zip(args, non_var_len_arg_keys)} -3716 args_dict[all_arg_keys[-1]] = args[num_non_var:] -3717 else: -3718 args_dict = {arg_key: arg for arg, arg_key in zip(args, cls.arg_types)} -3719 -3720 return cls(**args_dict) -3721 -3722 @classmethod -3723 def sql_names(cls): -3724 if cls is Func: -3725 raise NotImplementedError( -3726 "SQL name is only supported by concrete function implementations" -3727 ) -3728 if "_sql_names" not in cls.__dict__: -3729 cls._sql_names = [camel_to_snake_case(cls.__name__)] -3730 return cls._sql_names +3690class Bracket(Condition): +3691 arg_types = {"this": True, "expressions": True} +3692 +3693 +3694class Distinct(Expression): +3695 arg_types = {"expressions": False, "on": False} +3696 +3697 +3698class In(Predicate): +3699 arg_types = { +3700 "this": True, +3701 "expressions": False, +3702 "query": False, +3703 "unnest": False, +3704 "field": False, +3705 "is_global": False, +3706 } +3707 +3708 +3709class TimeUnit(Expression): +3710 """Automatically converts unit arg into a var.""" +3711 +3712 arg_types = {"unit": False} +3713 +3714 def __init__(self, **args): +3715 unit = args.get("unit") +3716 if isinstance(unit, (Column, Literal)): +3717 args["unit"] = Var(this=unit.name) +3718 elif isinstance(unit, Week): +3719 unit.set("this", Var(this=unit.this.name)) +3720 +3721 super().__init__(**args) +3722 +3723 +3724class Interval(TimeUnit): +3725 arg_types = {"this": False, "unit": False} +3726 +3727 @property +3728 def unit(self) -> t.Optional[Var]: +3729 return self.args.get("unit") +3730 3731 -3732 @classmethod -3733 def sql_name(cls): -3734 return cls.sql_names()[0] +3732class IgnoreNulls(Expression): +3733 pass +3734 3735 -3736 @classmethod -3737 def default_parser_mappings(cls): -3738 return {name: cls.from_arg_list for name in cls.sql_names()} +3736class RespectNulls(Expression): +3737 pass +3738 3739 -3740 -3741class AggFunc(Func): -3742 pass -3743 +3740# Functions +3741class Func(Condition): +3742 """ +3743 The base class for all function expressions. 3744 -3745class ParameterizedAgg(AggFunc): -3746 arg_types = {"this": True, "expressions": True, "params": True} -3747 -3748 -3749class Abs(Func): -3750 pass -3751 -3752 -3753class Anonymous(Func): -3754 arg_types = {"this": True, "expressions": False} -3755 is_var_len_args = True -3756 -3757 -3758# https://docs.snowflake.com/en/sql-reference/functions/hll -3759# https://docs.aws.amazon.com/redshift/latest/dg/r_HLL_function.html -3760class Hll(AggFunc): -3761 arg_types = {"this": True, "expressions": False} -3762 is_var_len_args = True +3745 Attributes: +3746 is_var_len_args (bool): if set to True the last argument defined in arg_types will be +3747 treated as a variable length argument and the argument's value will be stored as a list. +3748 _sql_names (list): determines the SQL name (1st item in the list) and aliases (subsequent items) +3749 for this function expression. These values are used to map this node to a name during parsing +3750 as well as to provide the function's name during SQL string generation. By default the SQL +3751 name is set to the expression's class name transformed to snake case. +3752 """ +3753 +3754 is_var_len_args = False +3755 +3756 @classmethod +3757 def from_arg_list(cls, args): +3758 if cls.is_var_len_args: +3759 all_arg_keys = list(cls.arg_types) +3760 # If this function supports variable length argument treat the last argument as such. +3761 non_var_len_arg_keys = all_arg_keys[:-1] if cls.is_var_len_args else all_arg_keys +3762 num_non_var = len(non_var_len_arg_keys) 3763 -3764 -3765class ApproxDistinct(AggFunc): -3766 arg_types = {"this": True, "accuracy": False} -3767 _sql_names = ["APPROX_DISTINCT", "APPROX_COUNT_DISTINCT"] +3764 args_dict = {arg_key: arg for arg, arg_key in zip(args, non_var_len_arg_keys)} +3765 args_dict[all_arg_keys[-1]] = args[num_non_var:] +3766 else: +3767 args_dict = {arg_key: arg for arg, arg_key in zip(args, cls.arg_types)} 3768 -3769 -3770class Array(Func): -3771 arg_types = {"expressions": False} -3772 is_var_len_args = True -3773 -3774 -3775# https://docs.snowflake.com/en/sql-reference/functions/to_char -3776class ToChar(Func): -3777 arg_types = {"this": True, "format": False} -3778 -3779 -3780class GenerateSeries(Func): -3781 arg_types = {"start": True, "end": True, "step": False} -3782 -3783 -3784class ArrayAgg(AggFunc): -3785 pass -3786 -3787 -3788class ArrayAll(Func): -3789 arg_types = {"this": True, "expression": True} -3790 -3791 -3792class ArrayAny(Func): -3793 arg_types = {"this": True, "expression": True} -3794 -3795 -3796class ArrayConcat(Func): -3797 arg_types = {"this": True, "expressions": False} -3798 is_var_len_args = True -3799 +3769 return cls(**args_dict) +3770 +3771 @classmethod +3772 def sql_names(cls): +3773 if cls is Func: +3774 raise NotImplementedError( +3775 "SQL name is only supported by concrete function implementations" +3776 ) +3777 if "_sql_names" not in cls.__dict__: +3778 cls._sql_names = [camel_to_snake_case(cls.__name__)] +3779 return cls._sql_names +3780 +3781 @classmethod +3782 def sql_name(cls): +3783 return cls.sql_names()[0] +3784 +3785 @classmethod +3786 def default_parser_mappings(cls): +3787 return {name: cls.from_arg_list for name in cls.sql_names()} +3788 +3789 +3790class AggFunc(Func): +3791 pass +3792 +3793 +3794class ParameterizedAgg(AggFunc): +3795 arg_types = {"this": True, "expressions": True, "params": True} +3796 +3797 +3798class Abs(Func): +3799 pass 3800 -3801class ArrayContains(Binary, Func): -3802 pass -3803 -3804 -3805class ArrayContained(Binary): -3806 pass -3807 -3808 -3809class ArrayFilter(Func): -3810 arg_types = {"this": True, "expression": True} -3811 _sql_names = ["FILTER", "ARRAY_FILTER"] +3801 +3802class Anonymous(Func): +3803 arg_types = {"this": True, "expressions": False} +3804 is_var_len_args = True +3805 +3806 +3807# https://docs.snowflake.com/en/sql-reference/functions/hll +3808# https://docs.aws.amazon.com/redshift/latest/dg/r_HLL_function.html +3809class Hll(AggFunc): +3810 arg_types = {"this": True, "expressions": False} +3811 is_var_len_args = True 3812 3813 -3814class ArrayJoin(Func): -3815 arg_types = {"this": True, "expression": True, "null": False} -3816 +3814class ApproxDistinct(AggFunc): +3815 arg_types = {"this": True, "accuracy": False} +3816 _sql_names = ["APPROX_DISTINCT", "APPROX_COUNT_DISTINCT"] 3817 -3818class ArraySize(Func): -3819 arg_types = {"this": True, "expression": False} -3820 -3821 -3822class ArraySort(Func): -3823 arg_types = {"this": True, "expression": False} -3824 -3825 -3826class ArraySum(Func): -3827 pass +3818 +3819class Array(Func): +3820 arg_types = {"expressions": False} +3821 is_var_len_args = True +3822 +3823 +3824# https://docs.snowflake.com/en/sql-reference/functions/to_char +3825class ToChar(Func): +3826 arg_types = {"this": True, "format": False} +3827 3828 -3829 -3830class ArrayUnionAgg(AggFunc): -3831 pass +3829class GenerateSeries(Func): +3830 arg_types = {"start": True, "end": True, "step": False} +3831 3832 -3833 -3834class Avg(AggFunc): -3835 pass +3833class ArrayAgg(AggFunc): +3834 pass +3835 3836 -3837 -3838class AnyValue(AggFunc): -3839 pass +3837class ArrayAll(Func): +3838 arg_types = {"this": True, "expression": True} +3839 3840 -3841 -3842class Case(Func): -3843 arg_types = {"this": False, "ifs": True, "default": False} +3841class ArrayAny(Func): +3842 arg_types = {"this": True, "expression": True} +3843 3844 -3845 def when(self, condition: ExpOrStr, then: ExpOrStr, copy: bool = True, **opts) -> Case: -3846 instance = _maybe_copy(self, copy) -3847 instance.append( -3848 "ifs", -3849 If( -3850 this=maybe_parse(condition, copy=copy, **opts), -3851 true=maybe_parse(then, copy=copy, **opts), -3852 ), -3853 ) -3854 return instance -3855 -3856 def else_(self, condition: ExpOrStr, copy: bool = True, **opts) -> Case: -3857 instance = _maybe_copy(self, copy) -3858 instance.set("default", maybe_parse(condition, copy=copy, **opts)) -3859 return instance -3860 +3845class ArrayConcat(Func): +3846 arg_types = {"this": True, "expressions": False} +3847 is_var_len_args = True +3848 +3849 +3850class ArrayContains(Binary, Func): +3851 pass +3852 +3853 +3854class ArrayContained(Binary): +3855 pass +3856 +3857 +3858class ArrayFilter(Func): +3859 arg_types = {"this": True, "expression": True} +3860 _sql_names = ["FILTER", "ARRAY_FILTER"] 3861 -3862class Cast(Func): -3863 arg_types = {"this": True, "to": True} -3864 -3865 @property -3866 def name(self) -> str: -3867 return self.this.name -3868 -3869 @property -3870 def to(self) -> DataType: -3871 return self.args["to"] -3872 -3873 @property -3874 def output_name(self) -> str: -3875 return self.name -3876 -3877 def is_type(self, *dtypes: str | DataType | DataType.Type) -> bool: -3878 return self.to.is_type(*dtypes) -3879 -3880 -3881class CastToStrType(Func): -3882 arg_types = {"this": True, "expression": True} -3883 -3884 -3885class Collate(Binary): -3886 pass -3887 -3888 -3889class TryCast(Cast): -3890 pass -3891 -3892 -3893class Ceil(Func): -3894 arg_types = {"this": True, "decimals": False} -3895 _sql_names = ["CEIL", "CEILING"] -3896 -3897 -3898class Coalesce(Func): -3899 arg_types = {"this": True, "expressions": False} -3900 is_var_len_args = True -3901 -3902 -3903class Concat(Func): -3904 arg_types = {"expressions": True} -3905 is_var_len_args = True -3906 -3907 -3908class ConcatWs(Concat): -3909 _sql_names = ["CONCAT_WS"] +3862 +3863class ArrayJoin(Func): +3864 arg_types = {"this": True, "expression": True, "null": False} +3865 +3866 +3867class ArraySize(Func): +3868 arg_types = {"this": True, "expression": False} +3869 +3870 +3871class ArraySort(Func): +3872 arg_types = {"this": True, "expression": False} +3873 +3874 +3875class ArraySum(Func): +3876 pass +3877 +3878 +3879class ArrayUnionAgg(AggFunc): +3880 pass +3881 +3882 +3883class Avg(AggFunc): +3884 pass +3885 +3886 +3887class AnyValue(AggFunc): +3888 pass +3889 +3890 +3891class Case(Func): +3892 arg_types = {"this": False, "ifs": True, "default": False} +3893 +3894 def when(self, condition: ExpOrStr, then: ExpOrStr, copy: bool = True, **opts) -> Case: +3895 instance = _maybe_copy(self, copy) +3896 instance.append( +3897 "ifs", +3898 If( +3899 this=maybe_parse(condition, copy=copy, **opts), +3900 true=maybe_parse(then, copy=copy, **opts), +3901 ), +3902 ) +3903 return instance +3904 +3905 def else_(self, condition: ExpOrStr, copy: bool = True, **opts) -> Case: +3906 instance = _maybe_copy(self, copy) +3907 instance.set("default", maybe_parse(condition, copy=copy, **opts)) +3908 return instance +3909 3910 -3911 -3912class Count(AggFunc): -3913 arg_types = {"this": False} -3914 -3915 -3916class CountIf(AggFunc): -3917 pass -3918 -3919 -3920class CurrentDate(Func): -3921 arg_types = {"this": False} -3922 -3923 -3924class CurrentDatetime(Func): -3925 arg_types = {"this": False} -3926 -3927 -3928class CurrentTime(Func): -3929 arg_types = {"this": False} -3930 -3931 -3932class CurrentTimestamp(Func): -3933 arg_types = {"this": False} -3934 -3935 -3936class CurrentUser(Func): -3937 arg_types = {"this": False} -3938 -3939 -3940class DateAdd(Func, TimeUnit): -3941 arg_types = {"this": True, "expression": True, "unit": False} -3942 -3943 -3944class DateSub(Func, TimeUnit): -3945 arg_types = {"this": True, "expression": True, "unit": False} +3911class Cast(Func): +3912 arg_types = {"this": True, "to": True} +3913 +3914 @property +3915 def name(self) -> str: +3916 return self.this.name +3917 +3918 @property +3919 def to(self) -> DataType: +3920 return self.args["to"] +3921 +3922 @property +3923 def output_name(self) -> str: +3924 return self.name +3925 +3926 def is_type(self, *dtypes: str | DataType | DataType.Type) -> bool: +3927 return self.to.is_type(*dtypes) +3928 +3929 +3930class CastToStrType(Func): +3931 arg_types = {"this": True, "expression": True} +3932 +3933 +3934class Collate(Binary): +3935 pass +3936 +3937 +3938class TryCast(Cast): +3939 pass +3940 +3941 +3942class Ceil(Func): +3943 arg_types = {"this": True, "decimals": False} +3944 _sql_names = ["CEIL", "CEILING"] +3945 3946 -3947 -3948class DateDiff(Func, TimeUnit): -3949 _sql_names = ["DATEDIFF", "DATE_DIFF"] -3950 arg_types = {"this": True, "expression": True, "unit": False} +3947class Coalesce(Func): +3948 arg_types = {"this": True, "expressions": False} +3949 is_var_len_args = True +3950 _sql_names = ["COALESCE", "IFNULL", "NVL"] 3951 3952 -3953class DateTrunc(Func): -3954 arg_types = {"unit": True, "this": True, "zone": False} -3955 +3953class Concat(Func): +3954 arg_types = {"expressions": True} +3955 is_var_len_args = True 3956 -3957class DatetimeAdd(Func, TimeUnit): -3958 arg_types = {"this": True, "expression": True, "unit": False} -3959 +3957 +3958class SafeConcat(Concat): +3959 pass 3960 -3961class DatetimeSub(Func, TimeUnit): -3962 arg_types = {"this": True, "expression": True, "unit": False} -3963 +3961 +3962class ConcatWs(Concat): +3963 _sql_names = ["CONCAT_WS"] 3964 -3965class DatetimeDiff(Func, TimeUnit): -3966 arg_types = {"this": True, "expression": True, "unit": False} -3967 -3968 -3969class DatetimeTrunc(Func, TimeUnit): -3970 arg_types = {"this": True, "unit": True, "zone": False} -3971 -3972 -3973class DayOfWeek(Func): -3974 _sql_names = ["DAY_OF_WEEK", "DAYOFWEEK"] -3975 -3976 -3977class DayOfMonth(Func): -3978 _sql_names = ["DAY_OF_MONTH", "DAYOFMONTH"] -3979 -3980 -3981class DayOfYear(Func): -3982 _sql_names = ["DAY_OF_YEAR", "DAYOFYEAR"] -3983 -3984 -3985class WeekOfYear(Func): -3986 _sql_names = ["WEEK_OF_YEAR", "WEEKOFYEAR"] -3987 -3988 -3989class LastDateOfMonth(Func): -3990 pass -3991 -3992 -3993class Extract(Func): -3994 arg_types = {"this": True, "expression": True} -3995 -3996 -3997class TimestampAdd(Func, TimeUnit): -3998 arg_types = {"this": True, "expression": True, "unit": False} -3999 -4000 -4001class TimestampSub(Func, TimeUnit): -4002 arg_types = {"this": True, "expression": True, "unit": False} -4003 -4004 -4005class TimestampDiff(Func, TimeUnit): -4006 arg_types = {"this": True, "expression": True, "unit": False} +3965 +3966class Count(AggFunc): +3967 arg_types = {"this": False, "expressions": False} +3968 is_var_len_args = True +3969 +3970 +3971class CountIf(AggFunc): +3972 pass +3973 +3974 +3975class CurrentDate(Func): +3976 arg_types = {"this": False} +3977 +3978 +3979class CurrentDatetime(Func): +3980 arg_types = {"this": False} +3981 +3982 +3983class CurrentTime(Func): +3984 arg_types = {"this": False} +3985 +3986 +3987class CurrentTimestamp(Func): +3988 arg_types = {"this": False} +3989 +3990 +3991class CurrentUser(Func): +3992 arg_types = {"this": False} +3993 +3994 +3995class DateAdd(Func, TimeUnit): +3996 arg_types = {"this": True, "expression": True, "unit": False} +3997 +3998 +3999class DateSub(Func, TimeUnit): +4000 arg_types = {"this": True, "expression": True, "unit": False} +4001 +4002 +4003class DateDiff(Func, TimeUnit): +4004 _sql_names = ["DATEDIFF", "DATE_DIFF"] +4005 arg_types = {"this": True, "expression": True, "unit": False} +4006 4007 -4008 -4009class TimestampTrunc(Func, TimeUnit): -4010 arg_types = {"this": True, "unit": True, "zone": False} +4008class DateTrunc(Func): +4009 arg_types = {"unit": True, "this": True, "zone": False} +4010 4011 -4012 -4013class TimeAdd(Func, TimeUnit): -4014 arg_types = {"this": True, "expression": True, "unit": False} +4012class DatetimeAdd(Func, TimeUnit): +4013 arg_types = {"this": True, "expression": True, "unit": False} +4014 4015 -4016 -4017class TimeSub(Func, TimeUnit): -4018 arg_types = {"this": True, "expression": True, "unit": False} +4016class DatetimeSub(Func, TimeUnit): +4017 arg_types = {"this": True, "expression": True, "unit": False} +4018 4019 -4020 -4021class TimeDiff(Func, TimeUnit): -4022 arg_types = {"this": True, "expression": True, "unit": False} +4020class DatetimeDiff(Func, TimeUnit): +4021 arg_types = {"this": True, "expression": True, "unit": False} +4022 4023 -4024 -4025class TimeTrunc(Func, TimeUnit): -4026 arg_types = {"this": True, "unit": True, "zone": False} +4024class DatetimeTrunc(Func, TimeUnit): +4025 arg_types = {"this": True, "unit": True, "zone": False} +4026 4027 -4028 -4029class DateFromParts(Func): -4030 _sql_names = ["DATEFROMPARTS"] -4031 arg_types = {"year": True, "month": True, "day": True} -4032 -4033 -4034class DateStrToDate(Func): -4035 pass -4036 -4037 -4038class DateToDateStr(Func): -4039 pass -4040 -4041 -4042class DateToDi(Func): -4043 pass -4044 -4045 -4046class Day(Func): -4047 pass -4048 -4049 -4050class Decode(Func): -4051 arg_types = {"this": True, "charset": True, "replace": False} -4052 -4053 -4054class DiToDate(Func): -4055 pass -4056 -4057 -4058class Encode(Func): -4059 arg_types = {"this": True, "charset": True} -4060 -4061 -4062class Exp(Func): -4063 pass -4064 -4065 -4066class Explode(Func): -4067 pass -4068 -4069 -4070class Floor(Func): -4071 arg_types = {"this": True, "decimals": False} -4072 -4073 -4074class FromBase64(Func): -4075 pass -4076 -4077 -4078class ToBase64(Func): -4079 pass -4080 -4081 -4082class Greatest(Func): -4083 arg_types = {"this": True, "expressions": False} -4084 is_var_len_args = True -4085 -4086 -4087class GroupConcat(Func): -4088 arg_types = {"this": True, "separator": False} -4089 -4090 -4091class Hex(Func): -4092 pass -4093 -4094 -4095class If(Func): -4096 arg_types = {"this": True, "true": True, "false": False} -4097 -4098 -4099class IfNull(Func): -4100 arg_types = {"this": True, "expression": False} -4101 _sql_names = ["IFNULL", "NVL"] -4102 -4103 -4104class Initcap(Func): -4105 arg_types = {"this": True, "expression": False} -4106 -4107 -4108class JSONKeyValue(Expression): -4109 arg_types = {"this": True, "expression": True} -4110 -4111 -4112class JSONObject(Func): -4113 arg_types = { -4114 "expressions": False, -4115 "null_handling": False, -4116 "unique_keys": False, -4117 "return_type": False, -4118 "format_json": False, -4119 "encoding": False, -4120 } +4028class DayOfWeek(Func): +4029 _sql_names = ["DAY_OF_WEEK", "DAYOFWEEK"] +4030 +4031 +4032class DayOfMonth(Func): +4033 _sql_names = ["DAY_OF_MONTH", "DAYOFMONTH"] +4034 +4035 +4036class DayOfYear(Func): +4037 _sql_names = ["DAY_OF_YEAR", "DAYOFYEAR"] +4038 +4039 +4040class WeekOfYear(Func): +4041 _sql_names = ["WEEK_OF_YEAR", "WEEKOFYEAR"] +4042 +4043 +4044class LastDateOfMonth(Func): +4045 pass +4046 +4047 +4048class Extract(Func): +4049 arg_types = {"this": True, "expression": True} +4050 +4051 +4052class TimestampAdd(Func, TimeUnit): +4053 arg_types = {"this": True, "expression": True, "unit": False} +4054 +4055 +4056class TimestampSub(Func, TimeUnit): +4057 arg_types = {"this": True, "expression": True, "unit": False} +4058 +4059 +4060class TimestampDiff(Func, TimeUnit): +4061 arg_types = {"this": True, "expression": True, "unit": False} +4062 +4063 +4064class TimestampTrunc(Func, TimeUnit): +4065 arg_types = {"this": True, "unit": True, "zone": False} +4066 +4067 +4068class TimeAdd(Func, TimeUnit): +4069 arg_types = {"this": True, "expression": True, "unit": False} +4070 +4071 +4072class TimeSub(Func, TimeUnit): +4073 arg_types = {"this": True, "expression": True, "unit": False} +4074 +4075 +4076class TimeDiff(Func, TimeUnit): +4077 arg_types = {"this": True, "expression": True, "unit": False} +4078 +4079 +4080class TimeTrunc(Func, TimeUnit): +4081 arg_types = {"this": True, "unit": True, "zone": False} +4082 +4083 +4084class DateFromParts(Func): +4085 _sql_names = ["DATEFROMPARTS"] +4086 arg_types = {"year": True, "month": True, "day": True} +4087 +4088 +4089class DateStrToDate(Func): +4090 pass +4091 +4092 +4093class DateToDateStr(Func): +4094 pass +4095 +4096 +4097class DateToDi(Func): +4098 pass +4099 +4100 +4101class Date(Func): +4102 arg_types = {"expressions": True} +4103 is_var_len_args = True +4104 +4105 +4106class Day(Func): +4107 pass +4108 +4109 +4110class Decode(Func): +4111 arg_types = {"this": True, "charset": True, "replace": False} +4112 +4113 +4114class DiToDate(Func): +4115 pass +4116 +4117 +4118class Encode(Func): +4119 arg_types = {"this": True, "charset": True} +4120 4121 -4122 -4123class OpenJSONColumnDef(Expression): -4124 arg_types = {"this": True, "kind": True, "path": False, "as_json": False} +4122class Exp(Func): +4123 pass +4124 4125 -4126 -4127class OpenJSON(Func): -4128 arg_types = {"this": True, "path": False, "expressions": False} +4126class Explode(Func): +4127 pass +4128 4129 -4130 -4131class JSONBContains(Binary): -4132 _sql_names = ["JSONB_CONTAINS"] +4130class Floor(Func): +4131 arg_types = {"this": True, "decimals": False} +4132 4133 -4134 -4135class JSONExtract(Binary, Func): -4136 _sql_names = ["JSON_EXTRACT"] +4134class FromBase64(Func): +4135 pass +4136 4137 -4138 -4139class JSONExtractScalar(JSONExtract): -4140 _sql_names = ["JSON_EXTRACT_SCALAR"] +4138class ToBase64(Func): +4139 pass +4140 4141 -4142 -4143class JSONBExtract(JSONExtract): -4144 _sql_names = ["JSONB_EXTRACT"] +4142class Greatest(Func): +4143 arg_types = {"this": True, "expressions": False} +4144 is_var_len_args = True 4145 4146 -4147class JSONBExtractScalar(JSONExtract): -4148 _sql_names = ["JSONB_EXTRACT_SCALAR"] +4147class GroupConcat(Func): +4148 arg_types = {"this": True, "separator": False} 4149 4150 -4151class JSONFormat(Func): -4152 arg_types = {"this": False, "options": False} -4153 _sql_names = ["JSON_FORMAT"] +4151class Hex(Func): +4152 pass +4153 4154 -4155 -4156class Least(Func): -4157 arg_types = {"expressions": False} -4158 is_var_len_args = True -4159 -4160 -4161class Left(Func): -4162 arg_types = {"this": True, "expression": True} -4163 -4164 -4165class Right(Func): -4166 arg_types = {"this": True, "expression": True} -4167 -4168 -4169class Length(Func): -4170 pass -4171 -4172 -4173class Levenshtein(Func): -4174 arg_types = { -4175 "this": True, -4176 "expression": False, -4177 "ins_cost": False, -4178 "del_cost": False, -4179 "sub_cost": False, -4180 } +4155class If(Func): +4156 arg_types = {"this": True, "true": True, "false": False} +4157 +4158 +4159class Initcap(Func): +4160 arg_types = {"this": True, "expression": False} +4161 +4162 +4163class JSONKeyValue(Expression): +4164 arg_types = {"this": True, "expression": True} +4165 +4166 +4167class JSONObject(Func): +4168 arg_types = { +4169 "expressions": False, +4170 "null_handling": False, +4171 "unique_keys": False, +4172 "return_type": False, +4173 "format_json": False, +4174 "encoding": False, +4175 } +4176 +4177 +4178class OpenJSONColumnDef(Expression): +4179 arg_types = {"this": True, "kind": True, "path": False, "as_json": False} +4180 4181 -4182 -4183class Ln(Func): -4184 pass +4182class OpenJSON(Func): +4183 arg_types = {"this": True, "path": False, "expressions": False} +4184 4185 -4186 -4187class Log(Func): -4188 arg_types = {"this": True, "expression": False} +4186class JSONBContains(Binary): +4187 _sql_names = ["JSONB_CONTAINS"] +4188 4189 -4190 -4191class Log2(Func): -4192 pass +4190class JSONExtract(Binary, Func): +4191 _sql_names = ["JSON_EXTRACT"] +4192 4193 -4194 -4195class Log10(Func): -4196 pass +4194class JSONExtractScalar(JSONExtract): +4195 _sql_names = ["JSON_EXTRACT_SCALAR"] +4196 4197 -4198 -4199class LogicalOr(AggFunc): -4200 _sql_names = ["LOGICAL_OR", "BOOL_OR", "BOOLOR_AGG"] +4198class JSONBExtract(JSONExtract): +4199 _sql_names = ["JSONB_EXTRACT"] +4200 4201 -4202 -4203class LogicalAnd(AggFunc): -4204 _sql_names = ["LOGICAL_AND", "BOOL_AND", "BOOLAND_AGG"] +4202class JSONBExtractScalar(JSONExtract): +4203 _sql_names = ["JSONB_EXTRACT_SCALAR"] +4204 4205 -4206 -4207class Lower(Func): -4208 _sql_names = ["LOWER", "LCASE"] +4206class JSONFormat(Func): +4207 arg_types = {"this": False, "options": False} +4208 _sql_names = ["JSON_FORMAT"] 4209 4210 -4211class Map(Func): -4212 arg_types = {"keys": False, "values": False} -4213 +4211class Least(Func): +4212 arg_types = {"expressions": False} +4213 is_var_len_args = True 4214 -4215class StarMap(Func): -4216 pass -4217 +4215 +4216class Left(Func): +4217 arg_types = {"this": True, "expression": True} 4218 -4219class VarMap(Func): -4220 arg_types = {"keys": True, "values": True} -4221 is_var_len_args = True +4219 +4220class Right(Func): +4221 arg_types = {"this": True, "expression": True} 4222 -4223 @property -4224 def keys(self) -> t.List[Expression]: -4225 return self.args["keys"].expressions +4223 +4224class Length(Func): +4225 _sql_names = ["LENGTH", "LEN"] 4226 -4227 @property -4228 def values(self) -> t.List[Expression]: -4229 return self.args["values"].expressions -4230 -4231 -4232# https://dev.mysql.com/doc/refman/8.0/en/fulltext-search.html -4233class MatchAgainst(Func): -4234 arg_types = {"this": True, "expressions": True, "modifier": False} -4235 +4227 +4228class Levenshtein(Func): +4229 arg_types = { +4230 "this": True, +4231 "expression": False, +4232 "ins_cost": False, +4233 "del_cost": False, +4234 "sub_cost": False, +4235 } 4236 -4237class Max(AggFunc): -4238 arg_types = {"this": True, "expressions": False} -4239 is_var_len_args = True +4237 +4238class Ln(Func): +4239 pass 4240 4241 -4242class MD5(Func): -4243 _sql_names = ["MD5"] +4242class Log(Func): +4243 arg_types = {"this": True, "expression": False} 4244 4245 -4246class Min(AggFunc): -4247 arg_types = {"this": True, "expressions": False} -4248 is_var_len_args = True +4246class Log2(Func): +4247 pass +4248 4249 -4250 -4251class Month(Func): -4252 pass +4250class Log10(Func): +4251 pass +4252 4253 -4254 -4255class Nvl2(Func): -4256 arg_types = {"this": True, "true": True, "false": False} +4254class LogicalOr(AggFunc): +4255 _sql_names = ["LOGICAL_OR", "BOOL_OR", "BOOLOR_AGG"] +4256 4257 -4258 -4259class Posexplode(Func): -4260 pass +4258class LogicalAnd(AggFunc): +4259 _sql_names = ["LOGICAL_AND", "BOOL_AND", "BOOLAND_AGG"] +4260 4261 -4262 -4263class Pow(Binary, Func): -4264 _sql_names = ["POWER", "POW"] +4262class Lower(Func): +4263 _sql_names = ["LOWER", "LCASE"] +4264 4265 -4266 -4267class PercentileCont(AggFunc): -4268 arg_types = {"this": True, "expression": False} +4266class Map(Func): +4267 arg_types = {"keys": False, "values": False} +4268 4269 -4270 -4271class PercentileDisc(AggFunc): -4272 arg_types = {"this": True, "expression": False} +4270class StarMap(Func): +4271 pass +4272 4273 -4274 -4275class Quantile(AggFunc): -4276 arg_types = {"this": True, "quantile": True} +4274class VarMap(Func): +4275 arg_types = {"keys": True, "values": True} +4276 is_var_len_args = True 4277 -4278 -4279class ApproxQuantile(Quantile): -4280 arg_types = {"this": True, "quantile": True, "accuracy": False, "weight": False} +4278 @property +4279 def keys(self) -> t.List[Expression]: +4280 return self.args["keys"].expressions 4281 -4282 -4283class RangeN(Func): -4284 arg_types = {"this": True, "expressions": True, "each": False} +4282 @property +4283 def values(self) -> t.List[Expression]: +4284 return self.args["values"].expressions 4285 4286 -4287class ReadCSV(Func): -4288 _sql_names = ["READ_CSV"] -4289 is_var_len_args = True -4290 arg_types = {"this": True, "expressions": False} +4287# https://dev.mysql.com/doc/refman/8.0/en/fulltext-search.html +4288class MatchAgainst(Func): +4289 arg_types = {"this": True, "expressions": True, "modifier": False} +4290 4291 -4292 -4293class Reduce(Func): -4294 arg_types = {"this": True, "initial": True, "merge": True, "finish": False} +4292class Max(AggFunc): +4293 arg_types = {"this": True, "expressions": False} +4294 is_var_len_args = True 4295 4296 -4297class RegexpExtract(Func): -4298 arg_types = { -4299 "this": True, -4300 "expression": True, -4301 "position": False, -4302 "occurrence": False, -4303 "group": False, -4304 } +4297class MD5(Func): +4298 _sql_names = ["MD5"] +4299 +4300 +4301class Min(AggFunc): +4302 arg_types = {"this": True, "expressions": False} +4303 is_var_len_args = True +4304 4305 -4306 -4307class RegexpLike(Func): -4308 arg_types = {"this": True, "expression": True, "flag": False} +4306class Month(Func): +4307 pass +4308 4309 -4310 -4311class RegexpILike(Func): -4312 arg_types = {"this": True, "expression": True, "flag": False} +4310class Nvl2(Func): +4311 arg_types = {"this": True, "true": True, "false": False} +4312 4313 -4314 -4315# https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.split.html -4316# limit is the number of times a pattern is applied -4317class RegexpSplit(Func): -4318 arg_types = {"this": True, "expression": True, "limit": False} -4319 +4314class Posexplode(Func): +4315 pass +4316 +4317 +4318class Pow(Binary, Func): +4319 _sql_names = ["POWER", "POW"] 4320 -4321class Repeat(Func): -4322 arg_types = {"this": True, "times": True} -4323 +4321 +4322class PercentileCont(AggFunc): +4323 arg_types = {"this": True, "expression": False} 4324 -4325class Round(Func): -4326 arg_types = {"this": True, "decimals": False} -4327 +4325 +4326class PercentileDisc(AggFunc): +4327 arg_types = {"this": True, "expression": False} 4328 -4329class RowNumber(Func): -4330 arg_types: t.Dict[str, t.Any] = {} -4331 +4329 +4330class Quantile(AggFunc): +4331 arg_types = {"this": True, "quantile": True} 4332 -4333class SafeDivide(Func): -4334 arg_types = {"this": True, "expression": True} -4335 +4333 +4334class ApproxQuantile(Quantile): +4335 arg_types = {"this": True, "quantile": True, "accuracy": False, "weight": False} 4336 -4337class SetAgg(AggFunc): -4338 pass -4339 +4337 +4338class RangeN(Func): +4339 arg_types = {"this": True, "expressions": True, "each": False} 4340 -4341class SHA(Func): -4342 _sql_names = ["SHA", "SHA1"] -4343 -4344 -4345class SHA2(Func): -4346 _sql_names = ["SHA2"] -4347 arg_types = {"this": True, "length": False} -4348 -4349 -4350class SortArray(Func): -4351 arg_types = {"this": True, "asc": False} -4352 -4353 -4354class Split(Func): -4355 arg_types = {"this": True, "expression": True, "limit": False} -4356 -4357 -4358# Start may be omitted in the case of postgres -4359# https://www.postgresql.org/docs/9.1/functions-string.html @ Table 9-6 -4360class Substring(Func): -4361 arg_types = {"this": True, "start": False, "length": False} -4362 -4363 -4364class StandardHash(Func): -4365 arg_types = {"this": True, "expression": False} -4366 -4367 -4368class StrPosition(Func): -4369 arg_types = { -4370 "this": True, -4371 "substr": True, -4372 "position": False, -4373 "instance": False, -4374 } +4341 +4342class ReadCSV(Func): +4343 _sql_names = ["READ_CSV"] +4344 is_var_len_args = True +4345 arg_types = {"this": True, "expressions": False} +4346 +4347 +4348class Reduce(Func): +4349 arg_types = {"this": True, "initial": True, "merge": True, "finish": False} +4350 +4351 +4352class RegexpExtract(Func): +4353 arg_types = { +4354 "this": True, +4355 "expression": True, +4356 "position": False, +4357 "occurrence": False, +4358 "group": False, +4359 } +4360 +4361 +4362class RegexpLike(Func): +4363 arg_types = {"this": True, "expression": True, "flag": False} +4364 +4365 +4366class RegexpILike(Func): +4367 arg_types = {"this": True, "expression": True, "flag": False} +4368 +4369 +4370# https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.split.html +4371# limit is the number of times a pattern is applied +4372class RegexpSplit(Func): +4373 arg_types = {"this": True, "expression": True, "limit": False} +4374 4375 -4376 -4377class StrToDate(Func): -4378 arg_types = {"this": True, "format": True} +4376class Repeat(Func): +4377 arg_types = {"this": True, "times": True} +4378 4379 -4380 -4381class StrToTime(Func): -4382 arg_types = {"this": True, "format": True} +4380class Round(Func): +4381 arg_types = {"this": True, "decimals": False} +4382 4383 -4384 -4385# Spark allows unix_timestamp() -4386# https://spark.apache.org/docs/3.1.3/api/python/reference/api/pyspark.sql.functions.unix_timestamp.html -4387class StrToUnix(Func): -4388 arg_types = {"this": False, "format": False} -4389 +4384class RowNumber(Func): +4385 arg_types: t.Dict[str, t.Any] = {} +4386 +4387 +4388class SafeDivide(Func): +4389 arg_types = {"this": True, "expression": True} 4390 -4391class NumberToStr(Func): -4392 arg_types = {"this": True, "format": True} -4393 +4391 +4392class SetAgg(AggFunc): +4393 pass 4394 -4395class Struct(Func): -4396 arg_types = {"expressions": True} -4397 is_var_len_args = True +4395 +4396class SHA(Func): +4397 _sql_names = ["SHA", "SHA1"] 4398 4399 -4400class StructExtract(Func): -4401 arg_types = {"this": True, "expression": True} -4402 +4400class SHA2(Func): +4401 _sql_names = ["SHA2"] +4402 arg_types = {"this": True, "length": False} 4403 -4404class Sum(AggFunc): -4405 pass -4406 +4404 +4405class SortArray(Func): +4406 arg_types = {"this": True, "asc": False} 4407 -4408class Sqrt(Func): -4409 pass -4410 +4408 +4409class Split(Func): +4410 arg_types = {"this": True, "expression": True, "limit": False} 4411 -4412class Stddev(AggFunc): -4413 pass -4414 -4415 -4416class StddevPop(AggFunc): -4417 pass +4412 +4413# Start may be omitted in the case of postgres +4414# https://www.postgresql.org/docs/9.1/functions-string.html @ Table 9-6 +4415class Substring(Func): +4416 arg_types = {"this": True, "start": False, "length": False} +4417 4418 -4419 -4420class StddevSamp(AggFunc): -4421 pass +4419class StandardHash(Func): +4420 arg_types = {"this": True, "expression": False} +4421 4422 -4423 -4424class TimeToStr(Func): -4425 arg_types = {"this": True, "format": True} -4426 -4427 -4428class TimeToTimeStr(Func): -4429 pass +4423class StrPosition(Func): +4424 arg_types = { +4425 "this": True, +4426 "substr": True, +4427 "position": False, +4428 "instance": False, +4429 } 4430 4431 -4432class TimeToUnix(Func): -4433 pass +4432class StrToDate(Func): +4433 arg_types = {"this": True, "format": True} 4434 4435 -4436class TimeStrToDate(Func): -4437 pass +4436class StrToTime(Func): +4437 arg_types = {"this": True, "format": True} 4438 4439 -4440class TimeStrToTime(Func): -4441 pass -4442 -4443 -4444class TimeStrToUnix(Func): -4445 pass -4446 -4447 -4448class Trim(Func): -4449 arg_types = { -4450 "this": True, -4451 "expression": False, -4452 "position": False, -4453 "collation": False, -4454 } -4455 -4456 -4457class TsOrDsAdd(Func, TimeUnit): -4458 arg_types = {"this": True, "expression": True, "unit": False} -4459 -4460 -4461class TsOrDsToDateStr(Func): -4462 pass -4463 -4464 -4465class TsOrDsToDate(Func): -4466 arg_types = {"this": True, "format": False} -4467 -4468 -4469class TsOrDiToDi(Func): -4470 pass -4471 -4472 -4473class Unhex(Func): -4474 pass -4475 -4476 -4477class UnixToStr(Func): -4478 arg_types = {"this": True, "format": False} -4479 -4480 -4481# https://prestodb.io/docs/current/functions/datetime.html -4482# presto has weird zone/hours/minutes -4483class UnixToTime(Func): -4484 arg_types = {"this": True, "scale": False, "zone": False, "hours": False, "minutes": False} +4440# Spark allows unix_timestamp() +4441# https://spark.apache.org/docs/3.1.3/api/python/reference/api/pyspark.sql.functions.unix_timestamp.html +4442class StrToUnix(Func): +4443 arg_types = {"this": False, "format": False} +4444 +4445 +4446class NumberToStr(Func): +4447 arg_types = {"this": True, "format": True} +4448 +4449 +4450class FromBase(Func): +4451 arg_types = {"this": True, "expression": True} +4452 +4453 +4454class Struct(Func): +4455 arg_types = {"expressions": True} +4456 is_var_len_args = True +4457 +4458 +4459class StructExtract(Func): +4460 arg_types = {"this": True, "expression": True} +4461 +4462 +4463class Sum(AggFunc): +4464 pass +4465 +4466 +4467class Sqrt(Func): +4468 pass +4469 +4470 +4471class Stddev(AggFunc): +4472 pass +4473 +4474 +4475class StddevPop(AggFunc): +4476 pass +4477 +4478 +4479class StddevSamp(AggFunc): +4480 pass +4481 +4482 +4483class TimeToStr(Func): +4484 arg_types = {"this": True, "format": True} 4485 -4486 SECONDS = Literal.string("seconds") -4487 MILLIS = Literal.string("millis") -4488 MICROS = Literal.string("micros") +4486 +4487class TimeToTimeStr(Func): +4488 pass 4489 4490 -4491class UnixToTimeStr(Func): +4491class TimeToUnix(Func): 4492 pass 4493 4494 -4495class Upper(Func): -4496 _sql_names = ["UPPER", "UCASE"] +4495class TimeStrToDate(Func): +4496 pass 4497 4498 -4499class Variance(AggFunc): -4500 _sql_names = ["VARIANCE", "VARIANCE_SAMP", "VAR_SAMP"] +4499class TimeStrToTime(Func): +4500 pass 4501 4502 -4503class VariancePop(AggFunc): -4504 _sql_names = ["VARIANCE_POP", "VAR_POP"] +4503class TimeStrToUnix(Func): +4504 pass 4505 4506 -4507class Week(Func): -4508 arg_types = {"this": True, "mode": False} -4509 -4510 -4511class XMLTable(Func): -4512 arg_types = {"this": True, "passing": False, "columns": False, "by_ref": False} -4513 +4507class Trim(Func): +4508 arg_types = { +4509 "this": True, +4510 "expression": False, +4511 "position": False, +4512 "collation": False, +4513 } 4514 -4515class Year(Func): -4516 pass -4517 +4515 +4516class TsOrDsAdd(Func, TimeUnit): +4517 arg_types = {"this": True, "expression": True, "unit": False} 4518 -4519class Use(Expression): -4520 arg_types = {"this": True, "kind": False} -4521 +4519 +4520class TsOrDsToDateStr(Func): +4521 pass 4522 -4523class Merge(Expression): -4524 arg_types = {"this": True, "using": True, "on": True, "expressions": True} -4525 +4523 +4524class TsOrDsToDate(Func): +4525 arg_types = {"this": True, "format": False} 4526 -4527class When(Func): -4528 arg_types = {"matched": True, "source": False, "condition": False, "then": True} -4529 +4527 +4528class TsOrDiToDi(Func): +4529 pass 4530 -4531# https://docs.oracle.com/javadb/10.8.3.0/ref/rrefsqljnextvaluefor.html -4532# https://learn.microsoft.com/en-us/sql/t-sql/functions/next-value-for-transact-sql?view=sql-server-ver16 -4533class NextValueFor(Func): -4534 arg_types = {"this": True, "order": False} +4531 +4532class Unhex(Func): +4533 pass +4534 4535 -4536 -4537def _norm_arg(arg): -4538 return arg.lower() if type(arg) is str else arg +4536class UnixToStr(Func): +4537 arg_types = {"this": True, "format": False} +4538 4539 -4540 -4541ALL_FUNCTIONS = subclasses(__name__, Func, (AggFunc, Anonymous, Func)) -4542 -4543 -4544# Helpers -4545@t.overload -4546def maybe_parse( -4547 sql_or_expression: ExpOrStr, -4548 *, -4549 into: t.Type[E], -4550 dialect: DialectType = None, -4551 prefix: t.Optional[str] = None, -4552 copy: bool = False, -4553 **opts, -4554) -> E: -4555 ... +4540# https://prestodb.io/docs/current/functions/datetime.html +4541# presto has weird zone/hours/minutes +4542class UnixToTime(Func): +4543 arg_types = {"this": True, "scale": False, "zone": False, "hours": False, "minutes": False} +4544 +4545 SECONDS = Literal.string("seconds") +4546 MILLIS = Literal.string("millis") +4547 MICROS = Literal.string("micros") +4548 +4549 +4550class UnixToTimeStr(Func): +4551 pass +4552 +4553 +4554class Upper(Func): +4555 _sql_names = ["UPPER", "UCASE"] 4556 4557 -4558@t.overload -4559def maybe_parse( -4560 sql_or_expression: str | E, -4561 *, -4562 into: t.Optional[IntoType] = None, -4563 dialect: DialectType = None, -4564 prefix: t.Optional[str] = None, -4565 copy: bool = False, -4566 **opts, -4567) -> E: -4568 ... +4558class Variance(AggFunc): +4559 _sql_names = ["VARIANCE", "VARIANCE_SAMP", "VAR_SAMP"] +4560 +4561 +4562class VariancePop(AggFunc): +4563 _sql_names = ["VARIANCE_POP", "VAR_POP"] +4564 +4565 +4566class Week(Func): +4567 arg_types = {"this": True, "mode": False} +4568 4569 -4570 -4571def maybe_parse( -4572 sql_or_expression: ExpOrStr, -4573 *, -4574 into: t.Optional[IntoType] = None, -4575 dialect: DialectType = None, -4576 prefix: t.Optional[str] = None, -4577 copy: bool = False, -4578 **opts, -4579) -> Expression: -4580 """Gracefully handle a possible string or expression. +4570class XMLTable(Func): +4571 arg_types = {"this": True, "passing": False, "columns": False, "by_ref": False} +4572 +4573 +4574class Year(Func): +4575 pass +4576 +4577 +4578class Use(Expression): +4579 arg_types = {"this": True, "kind": False} +4580 4581 -4582 Example: -4583 >>> maybe_parse("1") -4584 (LITERAL this: 1, is_string: False) -4585 >>> maybe_parse(to_identifier("x")) -4586 (IDENTIFIER this: x, quoted: False) -4587 -4588 Args: -4589 sql_or_expression: the SQL code string or an expression -4590 into: the SQLGlot Expression to parse into -4591 dialect: the dialect used to parse the input expressions (in the case that an -4592 input expression is a SQL string). -4593 prefix: a string to prefix the sql with before it gets parsed -4594 (automatically includes a space) -4595 copy: whether or not to copy the expression. -4596 **opts: other options to use to parse the input expressions (again, in the case -4597 that an input expression is a SQL string). +4582class Merge(Expression): +4583 arg_types = {"this": True, "using": True, "on": True, "expressions": True} +4584 +4585 +4586class When(Func): +4587 arg_types = {"matched": True, "source": False, "condition": False, "then": True} +4588 +4589 +4590# https://docs.oracle.com/javadb/10.8.3.0/ref/rrefsqljnextvaluefor.html +4591# https://learn.microsoft.com/en-us/sql/t-sql/functions/next-value-for-transact-sql?view=sql-server-ver16 +4592class NextValueFor(Func): +4593 arg_types = {"this": True, "order": False} +4594 +4595 +4596def _norm_arg(arg): +4597 return arg.lower() if type(arg) is str else arg 4598 -4599 Returns: -4600 Expression: the parsed or given expression. -4601 """ -4602 if isinstance(sql_or_expression, Expression): -4603 if copy: -4604 return sql_or_expression.copy() -4605 return sql_or_expression -4606 -4607 if sql_or_expression is None: -4608 raise ParseError(f"SQL cannot be None") -4609 -4610 import sqlglot -4611 -4612 sql = str(sql_or_expression) -4613 if prefix: -4614 sql = f"{prefix} {sql}" +4599 +4600ALL_FUNCTIONS = subclasses(__name__, Func, (AggFunc, Anonymous, Func)) +4601 +4602 +4603# Helpers +4604@t.overload +4605def maybe_parse( +4606 sql_or_expression: ExpOrStr, +4607 *, +4608 into: t.Type[E], +4609 dialect: DialectType = None, +4610 prefix: t.Optional[str] = None, +4611 copy: bool = False, +4612 **opts, +4613) -> E: +4614 ... 4615 -4616 return sqlglot.parse_one(sql, read=dialect, into=into, **opts) -4617 -4618 -4619def _maybe_copy(instance: E, copy: bool = True) -> E: -4620 return instance.copy() if copy else instance -4621 -4622 -4623def _is_wrong_expression(expression, into): -4624 return isinstance(expression, Expression) and not isinstance(expression, into) -4625 -4626 -4627def _apply_builder( -4628 expression, -4629 instance, -4630 arg, -4631 copy=True, -4632 prefix=None, -4633 into=None, -4634 dialect=None, -4635 **opts, -4636): -4637 if _is_wrong_expression(expression, into): -4638 expression = into(this=expression) -4639 instance = _maybe_copy(instance, copy) -4640 expression = maybe_parse( -4641 sql_or_expression=expression, -4642 prefix=prefix, -4643 into=into, -4644 dialect=dialect, -4645 **opts, -4646 ) -4647 instance.set(arg, expression) -4648 return instance -4649 -4650 -4651def _apply_child_list_builder( -4652 *expressions, -4653 instance, -4654 arg, -4655 append=True, -4656 copy=True, -4657 prefix=None, -4658 into=None, -4659 dialect=None, -4660 properties=None, -4661 **opts, -4662): -4663 instance = _maybe_copy(instance, copy) -4664 parsed = [] -4665 for expression in expressions: -4666 if expression is not None: -4667 if _is_wrong_expression(expression, into): -4668 expression = into(expressions=[expression]) -4669 -4670 expression = maybe_parse( -4671 expression, -4672 into=into, -4673 dialect=dialect, -4674 prefix=prefix, -4675 **opts, -4676 ) -4677 parsed.extend(expression.expressions) -4678 -4679 existing = instance.args.get(arg) -4680 if append and existing: -4681 parsed = existing.expressions + parsed -4682 -4683 child = into(expressions=parsed) -4684 for k, v in (properties or {}).items(): -4685 child.set(k, v) -4686 instance.set(arg, child) -4687 -4688 return instance -4689 -4690 -4691def _apply_list_builder( -4692 *expressions, -4693 instance, -4694 arg, -4695 append=True, -4696 copy=True, -4697 prefix=None, -4698 into=None, -4699 dialect=None, -4700 **opts, -4701): -4702 inst = _maybe_copy(instance, copy) -4703 -4704 expressions = [ -4705 maybe_parse( -4706 sql_or_expression=expression, -4707 into=into, -4708 prefix=prefix, -4709 dialect=dialect, -4710 **opts, -4711 ) -4712 for expression in expressions -4713 if expression is not None -4714 ] -4715 -4716 existing_expressions = inst.args.get(arg) -4717 if append and existing_expressions: -4718 expressions = existing_expressions + expressions -4719 -4720 inst.set(arg, expressions) -4721 return inst -4722 -4723 -4724def _apply_conjunction_builder( -4725 *expressions, -4726 instance, -4727 arg, -4728 into=None, -4729 append=True, -4730 copy=True, -4731 dialect=None, -4732 **opts, -4733): -4734 expressions = [exp for exp in expressions if exp is not None and exp != ""] -4735 if not expressions: -4736 return instance +4616 +4617@t.overload +4618def maybe_parse( +4619 sql_or_expression: str | E, +4620 *, +4621 into: t.Optional[IntoType] = None, +4622 dialect: DialectType = None, +4623 prefix: t.Optional[str] = None, +4624 copy: bool = False, +4625 **opts, +4626) -> E: +4627 ... +4628 +4629 +4630def maybe_parse( +4631 sql_or_expression: ExpOrStr, +4632 *, +4633 into: t.Optional[IntoType] = None, +4634 dialect: DialectType = None, +4635 prefix: t.Optional[str] = None, +4636 copy: bool = False, +4637 **opts, +4638) -> Expression: +4639 """Gracefully handle a possible string or expression. +4640 +4641 Example: +4642 >>> maybe_parse("1") +4643 (LITERAL this: 1, is_string: False) +4644 >>> maybe_parse(to_identifier("x")) +4645 (IDENTIFIER this: x, quoted: False) +4646 +4647 Args: +4648 sql_or_expression: the SQL code string or an expression +4649 into: the SQLGlot Expression to parse into +4650 dialect: the dialect used to parse the input expressions (in the case that an +4651 input expression is a SQL string). +4652 prefix: a string to prefix the sql with before it gets parsed +4653 (automatically includes a space) +4654 copy: whether or not to copy the expression. +4655 **opts: other options to use to parse the input expressions (again, in the case +4656 that an input expression is a SQL string). +4657 +4658 Returns: +4659 Expression: the parsed or given expression. +4660 """ +4661 if isinstance(sql_or_expression, Expression): +4662 if copy: +4663 return sql_or_expression.copy() +4664 return sql_or_expression +4665 +4666 if sql_or_expression is None: +4667 raise ParseError(f"SQL cannot be None") +4668 +4669 import sqlglot +4670 +4671 sql = str(sql_or_expression) +4672 if prefix: +4673 sql = f"{prefix} {sql}" +4674 +4675 return sqlglot.parse_one(sql, read=dialect, into=into, **opts) +4676 +4677 +4678def _maybe_copy(instance: E, copy: bool = True) -> E: +4679 return instance.copy() if copy else instance +4680 +4681 +4682def _is_wrong_expression(expression, into): +4683 return isinstance(expression, Expression) and not isinstance(expression, into) +4684 +4685 +4686def _apply_builder( +4687 expression, +4688 instance, +4689 arg, +4690 copy=True, +4691 prefix=None, +4692 into=None, +4693 dialect=None, +4694 **opts, +4695): +4696 if _is_wrong_expression(expression, into): +4697 expression = into(this=expression) +4698 instance = _maybe_copy(instance, copy) +4699 expression = maybe_parse( +4700 sql_or_expression=expression, +4701 prefix=prefix, +4702 into=into, +4703 dialect=dialect, +4704 **opts, +4705 ) +4706 instance.set(arg, expression) +4707 return instance +4708 +4709 +4710def _apply_child_list_builder( +4711 *expressions, +4712 instance, +4713 arg, +4714 append=True, +4715 copy=True, +4716 prefix=None, +4717 into=None, +4718 dialect=None, +4719 properties=None, +4720 **opts, +4721): +4722 instance = _maybe_copy(instance, copy) +4723 parsed = [] +4724 for expression in expressions: +4725 if expression is not None: +4726 if _is_wrong_expression(expression, into): +4727 expression = into(expressions=[expression]) +4728 +4729 expression = maybe_parse( +4730 expression, +4731 into=into, +4732 dialect=dialect, +4733 prefix=prefix, +4734 **opts, +4735 ) +4736 parsed.extend(expression.expressions) 4737 -4738 inst = _maybe_copy(instance, copy) -4739 -4740 existing = inst.args.get(arg) -4741 if append and existing is not None: -4742 expressions = [existing.this if into else existing] + list(expressions) -4743 -4744 node = and_(*expressions, dialect=dialect, copy=copy, **opts) -4745 -4746 inst.set(arg, into(this=node) if into else node) -4747 return inst +4738 existing = instance.args.get(arg) +4739 if append and existing: +4740 parsed = existing.expressions + parsed +4741 +4742 child = into(expressions=parsed) +4743 for k, v in (properties or {}).items(): +4744 child.set(k, v) +4745 instance.set(arg, child) +4746 +4747 return instance 4748 4749 -4750def _apply_cte_builder( -4751 instance: E, -4752 alias: ExpOrStr, -4753 as_: ExpOrStr, -4754 recursive: t.Optional[bool] = None, -4755 append: bool = True, -4756 dialect: DialectType = None, -4757 copy: bool = True, -4758 **opts, -4759) -> E: -4760 alias_expression = maybe_parse(alias, dialect=dialect, into=TableAlias, **opts) -4761 as_expression = maybe_parse(as_, dialect=dialect, **opts) -4762 cte = CTE(this=as_expression, alias=alias_expression) -4763 return _apply_child_list_builder( -4764 cte, -4765 instance=instance, -4766 arg="with", -4767 append=append, -4768 copy=copy, -4769 into=With, -4770 properties={"recursive": recursive or False}, -4771 ) -4772 -4773 -4774def _combine( -4775 expressions: t.Sequence[t.Optional[ExpOrStr]], -4776 operator: t.Type[Connector], -4777 dialect: DialectType = None, -4778 copy: bool = True, -4779 **opts, -4780) -> Expression: -4781 conditions = [ -4782 condition(expression, dialect=dialect, copy=copy, **opts) -4783 for expression in expressions -4784 if expression is not None -4785 ] -4786 -4787 this, *rest = conditions -4788 if rest: -4789 this = _wrap(this, Connector) -4790 for expression in rest: -4791 this = operator(this=this, expression=_wrap(expression, Connector)) -4792 -4793 return this -4794 -4795 -4796def _wrap(expression: E, kind: t.Type[Expression]) -> E | Paren: -4797 return Paren(this=expression) if isinstance(expression, kind) else expression +4750def _apply_list_builder( +4751 *expressions, +4752 instance, +4753 arg, +4754 append=True, +4755 copy=True, +4756 prefix=None, +4757 into=None, +4758 dialect=None, +4759 **opts, +4760): +4761 inst = _maybe_copy(instance, copy) +4762 +4763 expressions = [ +4764 maybe_parse( +4765 sql_or_expression=expression, +4766 into=into, +4767 prefix=prefix, +4768 dialect=dialect, +4769 **opts, +4770 ) +4771 for expression in expressions +4772 if expression is not None +4773 ] +4774 +4775 existing_expressions = inst.args.get(arg) +4776 if append and existing_expressions: +4777 expressions = existing_expressions + expressions +4778 +4779 inst.set(arg, expressions) +4780 return inst +4781 +4782 +4783def _apply_conjunction_builder( +4784 *expressions, +4785 instance, +4786 arg, +4787 into=None, +4788 append=True, +4789 copy=True, +4790 dialect=None, +4791 **opts, +4792): +4793 expressions = [exp for exp in expressions if exp is not None and exp != ""] +4794 if not expressions: +4795 return instance +4796 +4797 inst = _maybe_copy(instance, copy) 4798 -4799 -4800def union( -4801 left: ExpOrStr, right: ExpOrStr, distinct: bool = True, dialect: DialectType = None, **opts -4802) -> Union: -4803 """ -4804 Initializes a syntax tree from one UNION expression. -4805 -4806 Example: -4807 >>> union("SELECT * FROM foo", "SELECT * FROM bla").sql() -4808 'SELECT * FROM foo UNION SELECT * FROM bla' -4809 -4810 Args: -4811 left: the SQL code string corresponding to the left-hand side. -4812 If an `Expression` instance is passed, it will be used as-is. -4813 right: the SQL code string corresponding to the right-hand side. -4814 If an `Expression` instance is passed, it will be used as-is. -4815 distinct: set the DISTINCT flag if and only if this is true. -4816 dialect: the dialect used to parse the input expression. -4817 opts: other options to use to parse the input expressions. -4818 -4819 Returns: -4820 The new Union instance. -4821 """ -4822 left = maybe_parse(sql_or_expression=left, dialect=dialect, **opts) -4823 right = maybe_parse(sql_or_expression=right, dialect=dialect, **opts) -4824 -4825 return Union(this=left, expression=right, distinct=distinct) -4826 -4827 -4828def intersect( -4829 left: ExpOrStr, right: ExpOrStr, distinct: bool = True, dialect: DialectType = None, **opts -4830) -> Intersect: -4831 """ -4832 Initializes a syntax tree from one INTERSECT expression. -4833 -4834 Example: -4835 >>> intersect("SELECT * FROM foo", "SELECT * FROM bla").sql() -4836 'SELECT * FROM foo INTERSECT SELECT * FROM bla' -4837 -4838 Args: -4839 left: the SQL code string corresponding to the left-hand side. -4840 If an `Expression` instance is passed, it will be used as-is. -4841 right: the SQL code string corresponding to the right-hand side. -4842 If an `Expression` instance is passed, it will be used as-is. -4843 distinct: set the DISTINCT flag if and only if this is true. -4844 dialect: the dialect used to parse the input expression. -4845 opts: other options to use to parse the input expressions. -4846 -4847 Returns: -4848 The new Intersect instance. -4849 """ -4850 left = maybe_parse(sql_or_expression=left, dialect=dialect, **opts) -4851 right = maybe_parse(sql_or_expression=right, dialect=dialect, **opts) -4852 -4853 return Intersect(this=left, expression=right, distinct=distinct) +4799 existing = inst.args.get(arg) +4800 if append and existing is not None: +4801 expressions = [existing.this if into else existing] + list(expressions) +4802 +4803 node = and_(*expressions, dialect=dialect, copy=copy, **opts) +4804 +4805 inst.set(arg, into(this=node) if into else node) +4806 return inst +4807 +4808 +4809def _apply_cte_builder( +4810 instance: E, +4811 alias: ExpOrStr, +4812 as_: ExpOrStr, +4813 recursive: t.Optional[bool] = None, +4814 append: bool = True, +4815 dialect: DialectType = None, +4816 copy: bool = True, +4817 **opts, +4818) -> E: +4819 alias_expression = maybe_parse(alias, dialect=dialect, into=TableAlias, **opts) +4820 as_expression = maybe_parse(as_, dialect=dialect, **opts) +4821 cte = CTE(this=as_expression, alias=alias_expression) +4822 return _apply_child_list_builder( +4823 cte, +4824 instance=instance, +4825 arg="with", +4826 append=append, +4827 copy=copy, +4828 into=With, +4829 properties={"recursive": recursive or False}, +4830 ) +4831 +4832 +4833def _combine( +4834 expressions: t.Sequence[t.Optional[ExpOrStr]], +4835 operator: t.Type[Connector], +4836 dialect: DialectType = None, +4837 copy: bool = True, +4838 **opts, +4839) -> Expression: +4840 conditions = [ +4841 condition(expression, dialect=dialect, copy=copy, **opts) +4842 for expression in expressions +4843 if expression is not None +4844 ] +4845 +4846 this, *rest = conditions +4847 if rest: +4848 this = _wrap(this, Connector) +4849 for expression in rest: +4850 this = operator(this=this, expression=_wrap(expression, Connector)) +4851 +4852 return this +4853 4854 -4855 -4856def except_( -4857 left: ExpOrStr, right: ExpOrStr, distinct: bool = True, dialect: DialectType = None, **opts -4858) -> Except: -4859 """ -4860 Initializes a syntax tree from one EXCEPT expression. -4861 -4862 Example: -4863 >>> except_("SELECT * FROM foo", "SELECT * FROM bla").sql() -4864 'SELECT * FROM foo EXCEPT SELECT * FROM bla' -4865 -4866 Args: -4867 left: the SQL code string corresponding to the left-hand side. -4868 If an `Expression` instance is passed, it will be used as-is. -4869 right: the SQL code string corresponding to the right-hand side. -4870 If an `Expression` instance is passed, it will be used as-is. -4871 distinct: set the DISTINCT flag if and only if this is true. -4872 dialect: the dialect used to parse the input expression. -4873 opts: other options to use to parse the input expressions. -4874 -4875 Returns: -4876 The new Except instance. -4877 """ -4878 left = maybe_parse(sql_or_expression=left, dialect=dialect, **opts) -4879 right = maybe_parse(sql_or_expression=right, dialect=dialect, **opts) -4880 -4881 return Except(this=left, expression=right, distinct=distinct) -4882 +4855def _wrap(expression: E, kind: t.Type[Expression]) -> E | Paren: +4856 return Paren(this=expression) if isinstance(expression, kind) else expression +4857 +4858 +4859def union( +4860 left: ExpOrStr, right: ExpOrStr, distinct: bool = True, dialect: DialectType = None, **opts +4861) -> Union: +4862 """ +4863 Initializes a syntax tree from one UNION expression. +4864 +4865 Example: +4866 >>> union("SELECT * FROM foo", "SELECT * FROM bla").sql() +4867 'SELECT * FROM foo UNION SELECT * FROM bla' +4868 +4869 Args: +4870 left: the SQL code string corresponding to the left-hand side. +4871 If an `Expression` instance is passed, it will be used as-is. +4872 right: the SQL code string corresponding to the right-hand side. +4873 If an `Expression` instance is passed, it will be used as-is. +4874 distinct: set the DISTINCT flag if and only if this is true. +4875 dialect: the dialect used to parse the input expression. +4876 opts: other options to use to parse the input expressions. +4877 +4878 Returns: +4879 The new Union instance. +4880 """ +4881 left = maybe_parse(sql_or_expression=left, dialect=dialect, **opts) +4882 right = maybe_parse(sql_or_expression=right, dialect=dialect, **opts) 4883 -4884def select(*expressions: ExpOrStr, dialect: DialectType = None, **opts) -> Select: -4885 """ -4886 Initializes a syntax tree from one or multiple SELECT expressions. -4887 -4888 Example: -4889 >>> select("col1", "col2").from_("tbl").sql() -4890 'SELECT col1, col2 FROM tbl' -4891 -4892 Args: -4893 *expressions: the SQL code string to parse as the expressions of a -4894 SELECT statement. If an Expression instance is passed, this is used as-is. -4895 dialect: the dialect used to parse the input expressions (in the case that an -4896 input expression is a SQL string). -4897 **opts: other options to use to parse the input expressions (again, in the case -4898 that an input expression is a SQL string). -4899 -4900 Returns: -4901 Select: the syntax tree for the SELECT statement. -4902 """ -4903 return Select().select(*expressions, dialect=dialect, **opts) -4904 +4884 return Union(this=left, expression=right, distinct=distinct) +4885 +4886 +4887def intersect( +4888 left: ExpOrStr, right: ExpOrStr, distinct: bool = True, dialect: DialectType = None, **opts +4889) -> Intersect: +4890 """ +4891 Initializes a syntax tree from one INTERSECT expression. +4892 +4893 Example: +4894 >>> intersect("SELECT * FROM foo", "SELECT * FROM bla").sql() +4895 'SELECT * FROM foo INTERSECT SELECT * FROM bla' +4896 +4897 Args: +4898 left: the SQL code string corresponding to the left-hand side. +4899 If an `Expression` instance is passed, it will be used as-is. +4900 right: the SQL code string corresponding to the right-hand side. +4901 If an `Expression` instance is passed, it will be used as-is. +4902 distinct: set the DISTINCT flag if and only if this is true. +4903 dialect: the dialect used to parse the input expression. +4904 opts: other options to use to parse the input expressions. 4905 -4906def from_(expression: ExpOrStr, dialect: DialectType = None, **opts) -> Select: -4907 """ -4908 Initializes a syntax tree from a FROM expression. -4909 -4910 Example: -4911 >>> from_("tbl").select("col1", "col2").sql() -4912 'SELECT col1, col2 FROM tbl' +4906 Returns: +4907 The new Intersect instance. +4908 """ +4909 left = maybe_parse(sql_or_expression=left, dialect=dialect, **opts) +4910 right = maybe_parse(sql_or_expression=right, dialect=dialect, **opts) +4911 +4912 return Intersect(this=left, expression=right, distinct=distinct) 4913 -4914 Args: -4915 *expression: the SQL code string to parse as the FROM expressions of a -4916 SELECT statement. If an Expression instance is passed, this is used as-is. -4917 dialect: the dialect used to parse the input expression (in the case that the -4918 input expression is a SQL string). -4919 **opts: other options to use to parse the input expressions (again, in the case -4920 that the input expression is a SQL string). -4921 -4922 Returns: -4923 Select: the syntax tree for the SELECT statement. -4924 """ -4925 return Select().from_(expression, dialect=dialect, **opts) -4926 -4927 -4928def update( -4929 table: str | Table, -4930 properties: dict, -4931 where: t.Optional[ExpOrStr] = None, -4932 from_: t.Optional[ExpOrStr] = None, -4933 dialect: DialectType = None, -4934 **opts, -4935) -> Update: -4936 """ -4937 Creates an update statement. -4938 -4939 Example: -4940 >>> update("my_table", {"x": 1, "y": "2", "z": None}, from_="baz", where="id > 1").sql() -4941 "UPDATE my_table SET x = 1, y = '2', z = NULL FROM baz WHERE id > 1" +4914 +4915def except_( +4916 left: ExpOrStr, right: ExpOrStr, distinct: bool = True, dialect: DialectType = None, **opts +4917) -> Except: +4918 """ +4919 Initializes a syntax tree from one EXCEPT expression. +4920 +4921 Example: +4922 >>> except_("SELECT * FROM foo", "SELECT * FROM bla").sql() +4923 'SELECT * FROM foo EXCEPT SELECT * FROM bla' +4924 +4925 Args: +4926 left: the SQL code string corresponding to the left-hand side. +4927 If an `Expression` instance is passed, it will be used as-is. +4928 right: the SQL code string corresponding to the right-hand side. +4929 If an `Expression` instance is passed, it will be used as-is. +4930 distinct: set the DISTINCT flag if and only if this is true. +4931 dialect: the dialect used to parse the input expression. +4932 opts: other options to use to parse the input expressions. +4933 +4934 Returns: +4935 The new Except instance. +4936 """ +4937 left = maybe_parse(sql_or_expression=left, dialect=dialect, **opts) +4938 right = maybe_parse(sql_or_expression=right, dialect=dialect, **opts) +4939 +4940 return Except(this=left, expression=right, distinct=distinct) +4941 4942 -4943 Args: -4944 *properties: dictionary of properties to set which are -4945 auto converted to sql objects eg None -> NULL -4946 where: sql conditional parsed into a WHERE statement -4947 from_: sql statement parsed into a FROM statement -4948 dialect: the dialect used to parse the input expressions. -4949 **opts: other options to use to parse the input expressions. +4943def select(*expressions: ExpOrStr, dialect: DialectType = None, **opts) -> Select: +4944 """ +4945 Initializes a syntax tree from one or multiple SELECT expressions. +4946 +4947 Example: +4948 >>> select("col1", "col2").from_("tbl").sql() +4949 'SELECT col1, col2 FROM tbl' 4950 -4951 Returns: -4952 Update: the syntax tree for the UPDATE statement. -4953 """ -4954 update_expr = Update(this=maybe_parse(table, into=Table, dialect=dialect)) -4955 update_expr.set( -4956 "expressions", -4957 [ -4958 EQ(this=maybe_parse(k, dialect=dialect, **opts), expression=convert(v)) -4959 for k, v in properties.items() -4960 ], -4961 ) -4962 if from_: -4963 update_expr.set( -4964 "from", -4965 maybe_parse(from_, into=From, dialect=dialect, prefix="FROM", **opts), -4966 ) -4967 if isinstance(where, Condition): -4968 where = Where(this=where) -4969 if where: -4970 update_expr.set( -4971 "where", -4972 maybe_parse(where, into=Where, dialect=dialect, prefix="WHERE", **opts), -4973 ) -4974 return update_expr -4975 -4976 -4977def delete( -4978 table: ExpOrStr, -4979 where: t.Optional[ExpOrStr] = None, -4980 returning: t.Optional[ExpOrStr] = None, -4981 dialect: DialectType = None, -4982 **opts, -4983) -> Delete: -4984 """ -4985 Builds a delete statement. +4951 Args: +4952 *expressions: the SQL code string to parse as the expressions of a +4953 SELECT statement. If an Expression instance is passed, this is used as-is. +4954 dialect: the dialect used to parse the input expressions (in the case that an +4955 input expression is a SQL string). +4956 **opts: other options to use to parse the input expressions (again, in the case +4957 that an input expression is a SQL string). +4958 +4959 Returns: +4960 Select: the syntax tree for the SELECT statement. +4961 """ +4962 return Select().select(*expressions, dialect=dialect, **opts) +4963 +4964 +4965def from_(expression: ExpOrStr, dialect: DialectType = None, **opts) -> Select: +4966 """ +4967 Initializes a syntax tree from a FROM expression. +4968 +4969 Example: +4970 >>> from_("tbl").select("col1", "col2").sql() +4971 'SELECT col1, col2 FROM tbl' +4972 +4973 Args: +4974 *expression: the SQL code string to parse as the FROM expressions of a +4975 SELECT statement. If an Expression instance is passed, this is used as-is. +4976 dialect: the dialect used to parse the input expression (in the case that the +4977 input expression is a SQL string). +4978 **opts: other options to use to parse the input expressions (again, in the case +4979 that the input expression is a SQL string). +4980 +4981 Returns: +4982 Select: the syntax tree for the SELECT statement. +4983 """ +4984 return Select().from_(expression, dialect=dialect, **opts) +4985 4986 -4987 Example: -4988 >>> delete("my_table", where="id > 1").sql() -4989 'DELETE FROM my_table WHERE id > 1' -4990 -4991 Args: -4992 where: sql conditional parsed into a WHERE statement -4993 returning: sql conditional parsed into a RETURNING statement -4994 dialect: the dialect used to parse the input expressions. -4995 **opts: other options to use to parse the input expressions. -4996 -4997 Returns: -4998 Delete: the syntax tree for the DELETE statement. -4999 """ -5000 delete_expr = Delete().delete(table, dialect=dialect, copy=False, **opts) -5001 if where: -5002 delete_expr = delete_expr.where(where, dialect=dialect, copy=False, **opts) -5003 if returning: -5004 delete_expr = delete_expr.returning(returning, dialect=dialect, copy=False, **opts) -5005 return delete_expr -5006 -5007 -5008def insert( -5009 expression: ExpOrStr, -5010 into: ExpOrStr, -5011 columns: t.Optional[t.Sequence[ExpOrStr]] = None, -5012 overwrite: t.Optional[bool] = None, -5013 dialect: DialectType = None, -5014 copy: bool = True, -5015 **opts, -5016) -> Insert: -5017 """ -5018 Builds an INSERT statement. -5019 -5020 Example: -5021 >>> insert("VALUES (1, 2, 3)", "tbl").sql() -5022 'INSERT INTO tbl VALUES (1, 2, 3)' -5023 -5024 Args: -5025 expression: the sql string or expression of the INSERT statement -5026 into: the tbl to insert data to. -5027 columns: optionally the table's column names. -5028 overwrite: whether to INSERT OVERWRITE or not. -5029 dialect: the dialect used to parse the input expressions. -5030 copy: whether or not to copy the expression. -5031 **opts: other options to use to parse the input expressions. -5032 -5033 Returns: -5034 Insert: the syntax tree for the INSERT statement. -5035 """ -5036 expr = maybe_parse(expression, dialect=dialect, copy=copy, **opts) -5037 this: Table | Schema = maybe_parse(into, into=Table, dialect=dialect, copy=copy, **opts) -5038 -5039 if columns: -5040 this = _apply_list_builder( -5041 *columns, -5042 instance=Schema(this=this), -5043 arg="expressions", -5044 into=Identifier, -5045 copy=False, -5046 dialect=dialect, -5047 **opts, -5048 ) +4987def update( +4988 table: str | Table, +4989 properties: dict, +4990 where: t.Optional[ExpOrStr] = None, +4991 from_: t.Optional[ExpOrStr] = None, +4992 dialect: DialectType = None, +4993 **opts, +4994) -> Update: +4995 """ +4996 Creates an update statement. +4997 +4998 Example: +4999 >>> update("my_table", {"x": 1, "y": "2", "z": None}, from_="baz", where="id > 1").sql() +5000 "UPDATE my_table SET x = 1, y = '2', z = NULL FROM baz WHERE id > 1" +5001 +5002 Args: +5003 *properties: dictionary of properties to set which are +5004 auto converted to sql objects eg None -> NULL +5005 where: sql conditional parsed into a WHERE statement +5006 from_: sql statement parsed into a FROM statement +5007 dialect: the dialect used to parse the input expressions. +5008 **opts: other options to use to parse the input expressions. +5009 +5010 Returns: +5011 Update: the syntax tree for the UPDATE statement. +5012 """ +5013 update_expr = Update(this=maybe_parse(table, into=Table, dialect=dialect)) +5014 update_expr.set( +5015 "expressions", +5016 [ +5017 EQ(this=maybe_parse(k, dialect=dialect, **opts), expression=convert(v)) +5018 for k, v in properties.items() +5019 ], +5020 ) +5021 if from_: +5022 update_expr.set( +5023 "from", +5024 maybe_parse(from_, into=From, dialect=dialect, prefix="FROM", **opts), +5025 ) +5026 if isinstance(where, Condition): +5027 where = Where(this=where) +5028 if where: +5029 update_expr.set( +5030 "where", +5031 maybe_parse(where, into=Where, dialect=dialect, prefix="WHERE", **opts), +5032 ) +5033 return update_expr +5034 +5035 +5036def delete( +5037 table: ExpOrStr, +5038 where: t.Optional[ExpOrStr] = None, +5039 returning: t.Optional[ExpOrStr] = None, +5040 dialect: DialectType = None, +5041 **opts, +5042) -> Delete: +5043 """ +5044 Builds a delete statement. +5045 +5046 Example: +5047 >>> delete("my_table", where="id > 1").sql() +5048 'DELETE FROM my_table WHERE id > 1' 5049 -5050 return Insert(this=this, expression=expr, overwrite=overwrite) -5051 -5052 -5053def condition( -5054 expression: ExpOrStr, dialect: DialectType = None, copy: bool = True, **opts -5055) -> Condition: -5056 """ -5057 Initialize a logical condition expression. -5058 -5059 Example: -5060 >>> condition("x=1").sql() -5061 'x = 1' -5062 -5063 This is helpful for composing larger logical syntax trees: -5064 >>> where = condition("x=1") -5065 >>> where = where.and_("y=1") -5066 >>> Select().from_("tbl").select("*").where(where).sql() -5067 'SELECT * FROM tbl WHERE x = 1 AND y = 1' -5068 -5069 Args: -5070 *expression: the SQL code string to parse. -5071 If an Expression instance is passed, this is used as-is. -5072 dialect: the dialect used to parse the input expression (in the case that the -5073 input expression is a SQL string). -5074 copy: Whether or not to copy `expression` (only applies to expressions). -5075 **opts: other options to use to parse the input expressions (again, in the case -5076 that the input expression is a SQL string). -5077 -5078 Returns: -5079 The new Condition instance -5080 """ -5081 return maybe_parse( -5082 expression, -5083 into=Condition, -5084 dialect=dialect, -5085 copy=copy, -5086 **opts, -5087 ) -5088 -5089 -5090def and_( -5091 *expressions: t.Optional[ExpOrStr], dialect: DialectType = None, copy: bool = True, **opts -5092) -> Condition: -5093 """ -5094 Combine multiple conditions with an AND logical operator. -5095 -5096 Example: -5097 >>> and_("x=1", and_("y=1", "z=1")).sql() -5098 'x = 1 AND (y = 1 AND z = 1)' -5099 -5100 Args: -5101 *expressions: the SQL code strings to parse. -5102 If an Expression instance is passed, this is used as-is. -5103 dialect: the dialect used to parse the input expression. -5104 copy: whether or not to copy `expressions` (only applies to Expressions). -5105 **opts: other options to use to parse the input expressions. -5106 -5107 Returns: -5108 And: the new condition -5109 """ -5110 return t.cast(Condition, _combine(expressions, And, dialect, copy=copy, **opts)) +5050 Args: +5051 where: sql conditional parsed into a WHERE statement +5052 returning: sql conditional parsed into a RETURNING statement +5053 dialect: the dialect used to parse the input expressions. +5054 **opts: other options to use to parse the input expressions. +5055 +5056 Returns: +5057 Delete: the syntax tree for the DELETE statement. +5058 """ +5059 delete_expr = Delete().delete(table, dialect=dialect, copy=False, **opts) +5060 if where: +5061 delete_expr = delete_expr.where(where, dialect=dialect, copy=False, **opts) +5062 if returning: +5063 delete_expr = delete_expr.returning(returning, dialect=dialect, copy=False, **opts) +5064 return delete_expr +5065 +5066 +5067def insert( +5068 expression: ExpOrStr, +5069 into: ExpOrStr, +5070 columns: t.Optional[t.Sequence[ExpOrStr]] = None, +5071 overwrite: t.Optional[bool] = None, +5072 dialect: DialectType = None, +5073 copy: bool = True, +5074 **opts, +5075) -> Insert: +5076 """ +5077 Builds an INSERT statement. +5078 +5079 Example: +5080 >>> insert("VALUES (1, 2, 3)", "tbl").sql() +5081 'INSERT INTO tbl VALUES (1, 2, 3)' +5082 +5083 Args: +5084 expression: the sql string or expression of the INSERT statement +5085 into: the tbl to insert data to. +5086 columns: optionally the table's column names. +5087 overwrite: whether to INSERT OVERWRITE or not. +5088 dialect: the dialect used to parse the input expressions. +5089 copy: whether or not to copy the expression. +5090 **opts: other options to use to parse the input expressions. +5091 +5092 Returns: +5093 Insert: the syntax tree for the INSERT statement. +5094 """ +5095 expr = maybe_parse(expression, dialect=dialect, copy=copy, **opts) +5096 this: Table | Schema = maybe_parse(into, into=Table, dialect=dialect, copy=copy, **opts) +5097 +5098 if columns: +5099 this = _apply_list_builder( +5100 *columns, +5101 instance=Schema(this=this), +5102 arg="expressions", +5103 into=Identifier, +5104 copy=False, +5105 dialect=dialect, +5106 **opts, +5107 ) +5108 +5109 return Insert(this=this, expression=expr, overwrite=overwrite) +5110 5111 -5112 -5113def or_( -5114 *expressions: t.Optional[ExpOrStr], dialect: DialectType = None, copy: bool = True, **opts -5115) -> Condition: -5116 """ -5117 Combine multiple conditions with an OR logical operator. -5118 -5119 Example: -5120 >>> or_("x=1", or_("y=1", "z=1")).sql() -5121 'x = 1 OR (y = 1 OR z = 1)' -5122 -5123 Args: -5124 *expressions: the SQL code strings to parse. -5125 If an Expression instance is passed, this is used as-is. -5126 dialect: the dialect used to parse the input expression. -5127 copy: whether or not to copy `expressions` (only applies to Expressions). -5128 **opts: other options to use to parse the input expressions. -5129 -5130 Returns: -5131 Or: the new condition -5132 """ -5133 return t.cast(Condition, _combine(expressions, Or, dialect, copy=copy, **opts)) -5134 -5135 -5136def not_(expression: ExpOrStr, dialect: DialectType = None, copy: bool = True, **opts) -> Not: -5137 """ -5138 Wrap a condition with a NOT operator. -5139 -5140 Example: -5141 >>> not_("this_suit='black'").sql() -5142 "NOT this_suit = 'black'" -5143 -5144 Args: -5145 expression: the SQL code string to parse. -5146 If an Expression instance is passed, this is used as-is. -5147 dialect: the dialect used to parse the input expression. -5148 copy: whether to copy the expression or not. -5149 **opts: other options to use to parse the input expressions. -5150 -5151 Returns: -5152 The new condition. -5153 """ -5154 this = condition( -5155 expression, -5156 dialect=dialect, -5157 copy=copy, -5158 **opts, -5159 ) -5160 return Not(this=_wrap(this, Connector)) -5161 -5162 -5163def paren(expression: ExpOrStr, copy: bool = True) -> Paren: -5164 """ -5165 Wrap an expression in parentheses. -5166 -5167 Example: -5168 >>> paren("5 + 3").sql() -5169 '(5 + 3)' +5112def condition( +5113 expression: ExpOrStr, dialect: DialectType = None, copy: bool = True, **opts +5114) -> Condition: +5115 """ +5116 Initialize a logical condition expression. +5117 +5118 Example: +5119 >>> condition("x=1").sql() +5120 'x = 1' +5121 +5122 This is helpful for composing larger logical syntax trees: +5123 >>> where = condition("x=1") +5124 >>> where = where.and_("y=1") +5125 >>> Select().from_("tbl").select("*").where(where).sql() +5126 'SELECT * FROM tbl WHERE x = 1 AND y = 1' +5127 +5128 Args: +5129 *expression: the SQL code string to parse. +5130 If an Expression instance is passed, this is used as-is. +5131 dialect: the dialect used to parse the input expression (in the case that the +5132 input expression is a SQL string). +5133 copy: Whether or not to copy `expression` (only applies to expressions). +5134 **opts: other options to use to parse the input expressions (again, in the case +5135 that the input expression is a SQL string). +5136 +5137 Returns: +5138 The new Condition instance +5139 """ +5140 return maybe_parse( +5141 expression, +5142 into=Condition, +5143 dialect=dialect, +5144 copy=copy, +5145 **opts, +5146 ) +5147 +5148 +5149def and_( +5150 *expressions: t.Optional[ExpOrStr], dialect: DialectType = None, copy: bool = True, **opts +5151) -> Condition: +5152 """ +5153 Combine multiple conditions with an AND logical operator. +5154 +5155 Example: +5156 >>> and_("x=1", and_("y=1", "z=1")).sql() +5157 'x = 1 AND (y = 1 AND z = 1)' +5158 +5159 Args: +5160 *expressions: the SQL code strings to parse. +5161 If an Expression instance is passed, this is used as-is. +5162 dialect: the dialect used to parse the input expression. +5163 copy: whether or not to copy `expressions` (only applies to Expressions). +5164 **opts: other options to use to parse the input expressions. +5165 +5166 Returns: +5167 And: the new condition +5168 """ +5169 return t.cast(Condition, _combine(expressions, And, dialect, copy=copy, **opts)) 5170 -5171 Args: -5172 expression: the SQL code string to parse. -5173 If an Expression instance is passed, this is used as-is. -5174 copy: whether to copy the expression or not. -5175 -5176 Returns: -5177 The wrapped expression. -5178 """ -5179 return Paren(this=maybe_parse(expression, copy=copy)) -5180 +5171 +5172def or_( +5173 *expressions: t.Optional[ExpOrStr], dialect: DialectType = None, copy: bool = True, **opts +5174) -> Condition: +5175 """ +5176 Combine multiple conditions with an OR logical operator. +5177 +5178 Example: +5179 >>> or_("x=1", or_("y=1", "z=1")).sql() +5180 'x = 1 OR (y = 1 OR z = 1)' 5181 -5182SAFE_IDENTIFIER_RE = re.compile(r"^[_a-zA-Z][\w]*$") -5183 -5184 -5185@t.overload -5186def to_identifier(name: None, quoted: t.Optional[bool] = None, copy: bool = True) -> None: -5187 ... +5182 Args: +5183 *expressions: the SQL code strings to parse. +5184 If an Expression instance is passed, this is used as-is. +5185 dialect: the dialect used to parse the input expression. +5186 copy: whether or not to copy `expressions` (only applies to Expressions). +5187 **opts: other options to use to parse the input expressions. 5188 -5189 -5190@t.overload -5191def to_identifier( -5192 name: str | Identifier, quoted: t.Optional[bool] = None, copy: bool = True -5193) -> Identifier: -5194 ... -5195 -5196 -5197def to_identifier(name, quoted=None, copy=True): -5198 """Builds an identifier. -5199 -5200 Args: -5201 name: The name to turn into an identifier. -5202 quoted: Whether or not force quote the identifier. -5203 copy: Whether or not to copy a passed in Identefier node. -5204 -5205 Returns: -5206 The identifier ast node. -5207 """ -5208 -5209 if name is None: -5210 return None -5211 -5212 if isinstance(name, Identifier): -5213 identifier = _maybe_copy(name, copy) -5214 elif isinstance(name, str): -5215 identifier = Identifier( -5216 this=name, -5217 quoted=not SAFE_IDENTIFIER_RE.match(name) if quoted is None else quoted, -5218 ) -5219 else: -5220 raise ValueError(f"Name needs to be a string or an Identifier, got: {name.__class__}") -5221 return identifier -5222 -5223 -5224INTERVAL_STRING_RE = re.compile(r"\s*([0-9]+)\s*([a-zA-Z]+)\s*") +5189 Returns: +5190 Or: the new condition +5191 """ +5192 return t.cast(Condition, _combine(expressions, Or, dialect, copy=copy, **opts)) +5193 +5194 +5195def not_(expression: ExpOrStr, dialect: DialectType = None, copy: bool = True, **opts) -> Not: +5196 """ +5197 Wrap a condition with a NOT operator. +5198 +5199 Example: +5200 >>> not_("this_suit='black'").sql() +5201 "NOT this_suit = 'black'" +5202 +5203 Args: +5204 expression: the SQL code string to parse. +5205 If an Expression instance is passed, this is used as-is. +5206 dialect: the dialect used to parse the input expression. +5207 copy: whether to copy the expression or not. +5208 **opts: other options to use to parse the input expressions. +5209 +5210 Returns: +5211 The new condition. +5212 """ +5213 this = condition( +5214 expression, +5215 dialect=dialect, +5216 copy=copy, +5217 **opts, +5218 ) +5219 return Not(this=_wrap(this, Connector)) +5220 +5221 +5222def paren(expression: ExpOrStr, copy: bool = True) -> Paren: +5223 """ +5224 Wrap an expression in parentheses. 5225 -5226 -5227def to_interval(interval: str | Literal) -> Interval: -5228 """Builds an interval expression from a string like '1 day' or '5 months'.""" -5229 if isinstance(interval, Literal): -5230 if not interval.is_string: -5231 raise ValueError("Invalid interval string.") -5232 -5233 interval = interval.this +5226 Example: +5227 >>> paren("5 + 3").sql() +5228 '(5 + 3)' +5229 +5230 Args: +5231 expression: the SQL code string to parse. +5232 If an Expression instance is passed, this is used as-is. +5233 copy: whether to copy the expression or not. 5234 -5235 interval_parts = INTERVAL_STRING_RE.match(interval) # type: ignore -5236 -5237 if not interval_parts: -5238 raise ValueError("Invalid interval string.") +5235 Returns: +5236 The wrapped expression. +5237 """ +5238 return Paren(this=maybe_parse(expression, copy=copy)) 5239 -5240 return Interval( -5241 this=Literal.string(interval_parts.group(1)), -5242 unit=Var(this=interval_parts.group(2)), -5243 ) -5244 -5245 -5246@t.overload -5247def to_table(sql_path: str | Table, **kwargs) -> Table: -5248 ... -5249 -5250 -5251@t.overload -5252def to_table(sql_path: None, **kwargs) -> None: +5240 +5241SAFE_IDENTIFIER_RE = re.compile(r"^[_a-zA-Z][\w]*$") +5242 +5243 +5244@t.overload +5245def to_identifier(name: None, quoted: t.Optional[bool] = None, copy: bool = True) -> None: +5246 ... +5247 +5248 +5249@t.overload +5250def to_identifier( +5251 name: str | Identifier, quoted: t.Optional[bool] = None, copy: bool = True +5252) -> Identifier: 5253 ... 5254 5255 -5256def to_table( -5257 sql_path: t.Optional[str | Table], dialect: DialectType = None, **kwargs -5258) -> t.Optional[Table]: -5259 """ -5260 Create a table expression from a `[catalog].[schema].[table]` sql path. Catalog and schema are optional. -5261 If a table is passed in then that table is returned. -5262 -5263 Args: -5264 sql_path: a `[catalog].[schema].[table]` string. -5265 dialect: the source dialect according to which the table name will be parsed. -5266 kwargs: the kwargs to instantiate the resulting `Table` expression with. +5256def to_identifier(name, quoted=None, copy=True): +5257 """Builds an identifier. +5258 +5259 Args: +5260 name: The name to turn into an identifier. +5261 quoted: Whether or not force quote the identifier. +5262 copy: Whether or not to copy a passed in Identefier node. +5263 +5264 Returns: +5265 The identifier ast node. +5266 """ 5267 -5268 Returns: -5269 A table expression. -5270 """ -5271 if sql_path is None or isinstance(sql_path, Table): -5272 return sql_path -5273 if not isinstance(sql_path, str): -5274 raise ValueError(f"Invalid type provided for a table: {type(sql_path)}") -5275 -5276 table = maybe_parse(sql_path, into=Table, dialect=dialect) -5277 if table: -5278 for k, v in kwargs.items(): -5279 table.set(k, v) -5280 -5281 return table +5268 if name is None: +5269 return None +5270 +5271 if isinstance(name, Identifier): +5272 identifier = _maybe_copy(name, copy) +5273 elif isinstance(name, str): +5274 identifier = Identifier( +5275 this=name, +5276 quoted=not SAFE_IDENTIFIER_RE.match(name) if quoted is None else quoted, +5277 ) +5278 else: +5279 raise ValueError(f"Name needs to be a string or an Identifier, got: {name.__class__}") +5280 return identifier +5281 5282 -5283 -5284def to_column(sql_path: str | Column, **kwargs) -> Column: -5285 """ -5286 Create a column from a `[table].[column]` sql path. Schema is optional. -5287 -5288 If a column is passed in then that column is returned. -5289 -5290 Args: -5291 sql_path: `[table].[column]` string -5292 Returns: -5293 Table: A column expression -5294 """ -5295 if sql_path is None or isinstance(sql_path, Column): -5296 return sql_path -5297 if not isinstance(sql_path, str): -5298 raise ValueError(f"Invalid type provided for column: {type(sql_path)}") -5299 return column(*reversed(sql_path.split(".")), **kwargs) # type: ignore -5300 -5301 -5302def alias_( -5303 expression: ExpOrStr, -5304 alias: str | Identifier, -5305 table: bool | t.Sequence[str | Identifier] = False, -5306 quoted: t.Optional[bool] = None, -5307 dialect: DialectType = None, -5308 copy: bool = True, -5309 **opts, -5310): -5311 """Create an Alias expression. -5312 -5313 Example: -5314 >>> alias_('foo', 'bar').sql() -5315 'foo AS bar' -5316 -5317 >>> alias_('(select 1, 2)', 'bar', table=['a', 'b']).sql() -5318 '(SELECT 1, 2) AS bar(a, b)' -5319 -5320 Args: -5321 expression: the SQL code strings to parse. -5322 If an Expression instance is passed, this is used as-is. -5323 alias: the alias name to use. If the name has -5324 special characters it is quoted. -5325 table: Whether or not to create a table alias, can also be a list of columns. -5326 quoted: whether or not to quote the alias -5327 dialect: the dialect used to parse the input expression. -5328 copy: Whether or not to copy the expression. -5329 **opts: other options to use to parse the input expressions. -5330 -5331 Returns: -5332 Alias: the aliased expression -5333 """ -5334 exp = maybe_parse(expression, dialect=dialect, copy=copy, **opts) -5335 alias = to_identifier(alias, quoted=quoted) -5336 -5337 if table: -5338 table_alias = TableAlias(this=alias) -5339 exp.set("alias", table_alias) -5340 -5341 if not isinstance(table, bool): -5342 for column in table: -5343 table_alias.append("columns", to_identifier(column, quoted=quoted)) -5344 -5345 return exp +5283INTERVAL_STRING_RE = re.compile(r"\s*([0-9]+)\s*([a-zA-Z]+)\s*") +5284 +5285 +5286def to_interval(interval: str | Literal) -> Interval: +5287 """Builds an interval expression from a string like '1 day' or '5 months'.""" +5288 if isinstance(interval, Literal): +5289 if not interval.is_string: +5290 raise ValueError("Invalid interval string.") +5291 +5292 interval = interval.this +5293 +5294 interval_parts = INTERVAL_STRING_RE.match(interval) # type: ignore +5295 +5296 if not interval_parts: +5297 raise ValueError("Invalid interval string.") +5298 +5299 return Interval( +5300 this=Literal.string(interval_parts.group(1)), +5301 unit=Var(this=interval_parts.group(2)), +5302 ) +5303 +5304 +5305@t.overload +5306def to_table(sql_path: str | Table, **kwargs) -> Table: +5307 ... +5308 +5309 +5310@t.overload +5311def to_table(sql_path: None, **kwargs) -> None: +5312 ... +5313 +5314 +5315def to_table( +5316 sql_path: t.Optional[str | Table], dialect: DialectType = None, **kwargs +5317) -> t.Optional[Table]: +5318 """ +5319 Create a table expression from a `[catalog].[schema].[table]` sql path. Catalog and schema are optional. +5320 If a table is passed in then that table is returned. +5321 +5322 Args: +5323 sql_path: a `[catalog].[schema].[table]` string. +5324 dialect: the source dialect according to which the table name will be parsed. +5325 kwargs: the kwargs to instantiate the resulting `Table` expression with. +5326 +5327 Returns: +5328 A table expression. +5329 """ +5330 if sql_path is None or isinstance(sql_path, Table): +5331 return sql_path +5332 if not isinstance(sql_path, str): +5333 raise ValueError(f"Invalid type provided for a table: {type(sql_path)}") +5334 +5335 table = maybe_parse(sql_path, into=Table, dialect=dialect) +5336 if table: +5337 for k, v in kwargs.items(): +5338 table.set(k, v) +5339 +5340 return table +5341 +5342 +5343def to_column(sql_path: str | Column, **kwargs) -> Column: +5344 """ +5345 Create a column from a `[table].[column]` sql path. Schema is optional. 5346 -5347 # We don't set the "alias" arg for Window expressions, because that would add an IDENTIFIER node in -5348 # the AST, representing a "named_window" [1] construct (eg. bigquery). What we want is an ALIAS node -5349 # for the complete Window expression. -5350 # -5351 # [1]: https://cloud.google.com/bigquery/docs/reference/standard-sql/window-function-calls -5352 -5353 if "alias" in exp.arg_types and not isinstance(exp, Window): -5354 exp.set("alias", alias) -5355 return exp -5356 return Alias(this=exp, alias=alias) -5357 -5358 -5359def subquery( -5360 expression: ExpOrStr, -5361 alias: t.Optional[Identifier | str] = None, -5362 dialect: DialectType = None, -5363 **opts, -5364) -> Select: -5365 """ -5366 Build a subquery expression. -5367 -5368 Example: -5369 >>> subquery('select x from tbl', 'bar').select('x').sql() -5370 'SELECT x FROM (SELECT x FROM tbl) AS bar' +5347 If a column is passed in then that column is returned. +5348 +5349 Args: +5350 sql_path: `[table].[column]` string +5351 Returns: +5352 Table: A column expression +5353 """ +5354 if sql_path is None or isinstance(sql_path, Column): +5355 return sql_path +5356 if not isinstance(sql_path, str): +5357 raise ValueError(f"Invalid type provided for column: {type(sql_path)}") +5358 return column(*reversed(sql_path.split(".")), **kwargs) # type: ignore +5359 +5360 +5361def alias_( +5362 expression: ExpOrStr, +5363 alias: str | Identifier, +5364 table: bool | t.Sequence[str | Identifier] = False, +5365 quoted: t.Optional[bool] = None, +5366 dialect: DialectType = None, +5367 copy: bool = True, +5368 **opts, +5369): +5370 """Create an Alias expression. 5371 -5372 Args: -5373 expression: the SQL code strings to parse. -5374 If an Expression instance is passed, this is used as-is. -5375 alias: the alias name to use. -5376 dialect: the dialect used to parse the input expression. -5377 **opts: other options to use to parse the input expressions. +5372 Example: +5373 >>> alias_('foo', 'bar').sql() +5374 'foo AS bar' +5375 +5376 >>> alias_('(select 1, 2)', 'bar', table=['a', 'b']).sql() +5377 '(SELECT 1, 2) AS bar(a, b)' 5378 -5379 Returns: -5380 A new Select instance with the subquery expression included. -5381 """ -5382 -5383 expression = maybe_parse(expression, dialect=dialect, **opts).subquery(alias) -5384 return Select().from_(expression, dialect=dialect, **opts) -5385 -5386 -5387def column( -5388 col: str | Identifier, -5389 table: t.Optional[str | Identifier] = None, -5390 db: t.Optional[str | Identifier] = None, -5391 catalog: t.Optional[str | Identifier] = None, -5392 quoted: t.Optional[bool] = None, -5393) -> Column: -5394 """ -5395 Build a Column. -5396 -5397 Args: -5398 col: Column name. -5399 table: Table name. -5400 db: Database name. -5401 catalog: Catalog name. -5402 quoted: Whether to force quotes on the column's identifiers. +5379 Args: +5380 expression: the SQL code strings to parse. +5381 If an Expression instance is passed, this is used as-is. +5382 alias: the alias name to use. If the name has +5383 special characters it is quoted. +5384 table: Whether or not to create a table alias, can also be a list of columns. +5385 quoted: whether or not to quote the alias +5386 dialect: the dialect used to parse the input expression. +5387 copy: Whether or not to copy the expression. +5388 **opts: other options to use to parse the input expressions. +5389 +5390 Returns: +5391 Alias: the aliased expression +5392 """ +5393 exp = maybe_parse(expression, dialect=dialect, copy=copy, **opts) +5394 alias = to_identifier(alias, quoted=quoted) +5395 +5396 if table: +5397 table_alias = TableAlias(this=alias) +5398 exp.set("alias", table_alias) +5399 +5400 if not isinstance(table, bool): +5401 for column in table: +5402 table_alias.append("columns", to_identifier(column, quoted=quoted)) 5403 -5404 Returns: -5405 The new Column instance. -5406 """ -5407 return Column( -5408 this=to_identifier(col, quoted=quoted), -5409 table=to_identifier(table, quoted=quoted), -5410 db=to_identifier(db, quoted=quoted), -5411 catalog=to_identifier(catalog, quoted=quoted), -5412 ) -5413 -5414 -5415def cast(expression: ExpOrStr, to: str | DataType | DataType.Type, **opts) -> Cast: -5416 """Cast an expression to a data type. +5404 return exp +5405 +5406 # We don't set the "alias" arg for Window expressions, because that would add an IDENTIFIER node in +5407 # the AST, representing a "named_window" [1] construct (eg. bigquery). What we want is an ALIAS node +5408 # for the complete Window expression. +5409 # +5410 # [1]: https://cloud.google.com/bigquery/docs/reference/standard-sql/window-function-calls +5411 +5412 if "alias" in exp.arg_types and not isinstance(exp, Window): +5413 exp.set("alias", alias) +5414 return exp +5415 return Alias(this=exp, alias=alias) +5416 5417 -5418 Example: -5419 >>> cast('x + 1', 'int').sql() -5420 'CAST(x + 1 AS INT)' -5421 -5422 Args: -5423 expression: The expression to cast. -5424 to: The datatype to cast to. -5425 -5426 Returns: -5427 The new Cast instance. -5428 """ -5429 expression = maybe_parse(expression, **opts) -5430 return Cast(this=expression, to=DataType.build(to, **opts)) -5431 -5432 -5433def table_( -5434 table: Identifier | str, -5435 db: t.Optional[Identifier | str] = None, -5436 catalog: t.Optional[Identifier | str] = None, -5437 quoted: t.Optional[bool] = None, -5438 alias: t.Optional[Identifier | str] = None, -5439) -> Table: -5440 """Build a Table. +5418def subquery( +5419 expression: ExpOrStr, +5420 alias: t.Optional[Identifier | str] = None, +5421 dialect: DialectType = None, +5422 **opts, +5423) -> Select: +5424 """ +5425 Build a subquery expression. +5426 +5427 Example: +5428 >>> subquery('select x from tbl', 'bar').select('x').sql() +5429 'SELECT x FROM (SELECT x FROM tbl) AS bar' +5430 +5431 Args: +5432 expression: the SQL code strings to parse. +5433 If an Expression instance is passed, this is used as-is. +5434 alias: the alias name to use. +5435 dialect: the dialect used to parse the input expression. +5436 **opts: other options to use to parse the input expressions. +5437 +5438 Returns: +5439 A new Select instance with the subquery expression included. +5440 """ 5441 -5442 Args: -5443 table: Table name. -5444 db: Database name. -5445 catalog: Catalog name. -5446 quote: Whether to force quotes on the table's identifiers. -5447 alias: Table's alias. -5448 -5449 Returns: -5450 The new Table instance. -5451 """ -5452 return Table( -5453 this=to_identifier(table, quoted=quoted), -5454 db=to_identifier(db, quoted=quoted), -5455 catalog=to_identifier(catalog, quoted=quoted), -5456 alias=TableAlias(this=to_identifier(alias)) if alias else None, -5457 ) -5458 -5459 -5460def values( -5461 values: t.Iterable[t.Tuple[t.Any, ...]], -5462 alias: t.Optional[str] = None, -5463 columns: t.Optional[t.Iterable[str] | t.Dict[str, DataType]] = None, -5464) -> Values: -5465 """Build VALUES statement. -5466 -5467 Example: -5468 >>> values([(1, '2')]).sql() -5469 "VALUES (1, '2')" -5470 -5471 Args: -5472 values: values statements that will be converted to SQL -5473 alias: optional alias -5474 columns: Optional list of ordered column names or ordered dictionary of column names to types. -5475 If either are provided then an alias is also required. +5442 expression = maybe_parse(expression, dialect=dialect, **opts).subquery(alias) +5443 return Select().from_(expression, dialect=dialect, **opts) +5444 +5445 +5446def column( +5447 col: str | Identifier, +5448 table: t.Optional[str | Identifier] = None, +5449 db: t.Optional[str | Identifier] = None, +5450 catalog: t.Optional[str | Identifier] = None, +5451 quoted: t.Optional[bool] = None, +5452) -> Column: +5453 """ +5454 Build a Column. +5455 +5456 Args: +5457 col: Column name. +5458 table: Table name. +5459 db: Database name. +5460 catalog: Catalog name. +5461 quoted: Whether to force quotes on the column's identifiers. +5462 +5463 Returns: +5464 The new Column instance. +5465 """ +5466 return Column( +5467 this=to_identifier(col, quoted=quoted), +5468 table=to_identifier(table, quoted=quoted), +5469 db=to_identifier(db, quoted=quoted), +5470 catalog=to_identifier(catalog, quoted=quoted), +5471 ) +5472 +5473 +5474def cast(expression: ExpOrStr, to: str | DataType | DataType.Type, **opts) -> Cast: +5475 """Cast an expression to a data type. 5476 -5477 Returns: -5478 Values: the Values expression object -5479 """ -5480 if columns and not alias: -5481 raise ValueError("Alias is required when providing columns") -5482 -5483 return Values( -5484 expressions=[convert(tup) for tup in values], -5485 alias=( -5486 TableAlias(this=to_identifier(alias), columns=[to_identifier(x) for x in columns]) -5487 if columns -5488 else (TableAlias(this=to_identifier(alias)) if alias else None) -5489 ), -5490 ) +5477 Example: +5478 >>> cast('x + 1', 'int').sql() +5479 'CAST(x + 1 AS INT)' +5480 +5481 Args: +5482 expression: The expression to cast. +5483 to: The datatype to cast to. +5484 +5485 Returns: +5486 The new Cast instance. +5487 """ +5488 expression = maybe_parse(expression, **opts) +5489 return Cast(this=expression, to=DataType.build(to, **opts)) +5490 5491 -5492 -5493def var(name: t.Optional[ExpOrStr]) -> Var: -5494 """Build a SQL variable. -5495 -5496 Example: -5497 >>> repr(var('x')) -5498 '(VAR this: x)' -5499 -5500 >>> repr(var(column('x', table='y'))) -5501 '(VAR this: x)' -5502 -5503 Args: -5504 name: The name of the var or an expression who's name will become the var. -5505 -5506 Returns: -5507 The new variable node. -5508 """ -5509 if not name: -5510 raise ValueError("Cannot convert empty name into var.") -5511 -5512 if isinstance(name, Expression): -5513 name = name.name -5514 return Var(this=name) -5515 -5516 -5517def rename_table(old_name: str | Table, new_name: str | Table) -> AlterTable: -5518 """Build ALTER TABLE... RENAME... expression -5519 -5520 Args: -5521 old_name: The old name of the table -5522 new_name: The new name of the table -5523 -5524 Returns: -5525 Alter table expression -5526 """ -5527 old_table = to_table(old_name) -5528 new_table = to_table(new_name) -5529 return AlterTable( -5530 this=old_table, -5531 actions=[ -5532 RenameTable(this=new_table), -5533 ], -5534 ) +5492def table_( +5493 table: Identifier | str, +5494 db: t.Optional[Identifier | str] = None, +5495 catalog: t.Optional[Identifier | str] = None, +5496 quoted: t.Optional[bool] = None, +5497 alias: t.Optional[Identifier | str] = None, +5498) -> Table: +5499 """Build a Table. +5500 +5501 Args: +5502 table: Table name. +5503 db: Database name. +5504 catalog: Catalog name. +5505 quote: Whether to force quotes on the table's identifiers. +5506 alias: Table's alias. +5507 +5508 Returns: +5509 The new Table instance. +5510 """ +5511 return Table( +5512 this=to_identifier(table, quoted=quoted), +5513 db=to_identifier(db, quoted=quoted), +5514 catalog=to_identifier(catalog, quoted=quoted), +5515 alias=TableAlias(this=to_identifier(alias)) if alias else None, +5516 ) +5517 +5518 +5519def values( +5520 values: t.Iterable[t.Tuple[t.Any, ...]], +5521 alias: t.Optional[str] = None, +5522 columns: t.Optional[t.Iterable[str] | t.Dict[str, DataType]] = None, +5523) -> Values: +5524 """Build VALUES statement. +5525 +5526 Example: +5527 >>> values([(1, '2')]).sql() +5528 "VALUES (1, '2')" +5529 +5530 Args: +5531 values: values statements that will be converted to SQL +5532 alias: optional alias +5533 columns: Optional list of ordered column names or ordered dictionary of column names to types. +5534 If either are provided then an alias is also required. 5535 -5536 -5537def convert(value: t.Any, copy: bool = False) -> Expression: -5538 """Convert a python value into an expression object. -5539 -5540 Raises an error if a conversion is not possible. +5536 Returns: +5537 Values: the Values expression object +5538 """ +5539 if columns and not alias: +5540 raise ValueError("Alias is required when providing columns") 5541 -5542 Args: -5543 value: A python object. -5544 copy: Whether or not to copy `value` (only applies to Expressions and collections). -5545 -5546 Returns: -5547 Expression: the equivalent expression object. -5548 """ -5549 if isinstance(value, Expression): -5550 return _maybe_copy(value, copy) -5551 if isinstance(value, str): -5552 return Literal.string(value) -5553 if isinstance(value, bool): -5554 return Boolean(this=value) -5555 if value is None or (isinstance(value, float) and math.isnan(value)): -5556 return NULL -5557 if isinstance(value, numbers.Number): -5558 return Literal.number(value) -5559 if isinstance(value, datetime.datetime): -5560 datetime_literal = Literal.string( -5561 (value if value.tzinfo else value.replace(tzinfo=datetime.timezone.utc)).isoformat() -5562 ) -5563 return TimeStrToTime(this=datetime_literal) -5564 if isinstance(value, datetime.date): -5565 date_literal = Literal.string(value.strftime("%Y-%m-%d")) -5566 return DateStrToDate(this=date_literal) -5567 if isinstance(value, tuple): -5568 return Tuple(expressions=[convert(v, copy=copy) for v in value]) -5569 if isinstance(value, list): -5570 return Array(expressions=[convert(v, copy=copy) for v in value]) -5571 if isinstance(value, dict): -5572 return Map( -5573 keys=[convert(k, copy=copy) for k in value], -5574 values=[convert(v, copy=copy) for v in value.values()], -5575 ) -5576 raise ValueError(f"Cannot convert {value}") -5577 +5542 return Values( +5543 expressions=[convert(tup) for tup in values], +5544 alias=( +5545 TableAlias(this=to_identifier(alias), columns=[to_identifier(x) for x in columns]) +5546 if columns +5547 else (TableAlias(this=to_identifier(alias)) if alias else None) +5548 ), +5549 ) +5550 +5551 +5552def var(name: t.Optional[ExpOrStr]) -> Var: +5553 """Build a SQL variable. +5554 +5555 Example: +5556 >>> repr(var('x')) +5557 '(VAR this: x)' +5558 +5559 >>> repr(var(column('x', table='y'))) +5560 '(VAR this: x)' +5561 +5562 Args: +5563 name: The name of the var or an expression who's name will become the var. +5564 +5565 Returns: +5566 The new variable node. +5567 """ +5568 if not name: +5569 raise ValueError("Cannot convert empty name into var.") +5570 +5571 if isinstance(name, Expression): +5572 name = name.name +5573 return Var(this=name) +5574 +5575 +5576def rename_table(old_name: str | Table, new_name: str | Table) -> AlterTable: +5577 """Build ALTER TABLE... RENAME... expression 5578 -5579def replace_children(expression: Expression, fun: t.Callable, *args, **kwargs) -> None: -5580 """ -5581 Replace children of an expression with the result of a lambda fun(child) -> exp. -5582 """ -5583 for k, v in expression.args.items(): -5584 is_list_arg = type(v) is list -5585 -5586 child_nodes = v if is_list_arg else [v] -5587 new_child_nodes = [] -5588 -5589 for cn in child_nodes: -5590 if isinstance(cn, Expression): -5591 for child_node in ensure_collection(fun(cn, *args, **kwargs)): -5592 new_child_nodes.append(child_node) -5593 child_node.parent = expression -5594 child_node.arg_key = k -5595 else: -5596 new_child_nodes.append(cn) -5597 -5598 expression.args[k] = new_child_nodes if is_list_arg else seq_get(new_child_nodes, 0) -5599 +5579 Args: +5580 old_name: The old name of the table +5581 new_name: The new name of the table +5582 +5583 Returns: +5584 Alter table expression +5585 """ +5586 old_table = to_table(old_name) +5587 new_table = to_table(new_name) +5588 return AlterTable( +5589 this=old_table, +5590 actions=[ +5591 RenameTable(this=new_table), +5592 ], +5593 ) +5594 +5595 +5596def convert(value: t.Any, copy: bool = False) -> Expression: +5597 """Convert a python value into an expression object. +5598 +5599 Raises an error if a conversion is not possible. 5600 -5601def column_table_names(expression: Expression) -> t.List[str]: -5602 """ -5603 Return all table names referenced through columns in an expression. +5601 Args: +5602 value: A python object. +5603 copy: Whether or not to copy `value` (only applies to Expressions and collections). 5604 -5605 Example: -5606 >>> import sqlglot -5607 >>> column_table_names(sqlglot.parse_one("a.b AND c.d AND c.e")) -5608 ['c', 'a'] -5609 -5610 Args: -5611 expression: expression to find table names. -5612 -5613 Returns: -5614 A list of unique names. -5615 """ -5616 return list(dict.fromkeys(column.table for column in expression.find_all(Column))) -5617 -5618 -5619def table_name(table: Table | str) -> str: -5620 """Get the full name of a table as a string. -5621 -5622 Args: -5623 table: table expression node or string. -5624 -5625 Examples: -5626 >>> from sqlglot import exp, parse_one -5627 >>> table_name(parse_one("select * from a.b.c").find(exp.Table)) -5628 'a.b.c' -5629 -5630 Returns: -5631 The table name. -5632 """ -5633 -5634 table = maybe_parse(table, into=Table) -5635 -5636 if not table: -5637 raise ValueError(f"Cannot parse {table}") -5638 -5639 return ".".join(part for part in (table.text("catalog"), table.text("db"), table.name) if part) -5640 -5641 -5642def replace_tables(expression: E, mapping: t.Dict[str, str]) -> E: -5643 """Replace all tables in expression according to the mapping. +5605 Returns: +5606 Expression: the equivalent expression object. +5607 """ +5608 if isinstance(value, Expression): +5609 return _maybe_copy(value, copy) +5610 if isinstance(value, str): +5611 return Literal.string(value) +5612 if isinstance(value, bool): +5613 return Boolean(this=value) +5614 if value is None or (isinstance(value, float) and math.isnan(value)): +5615 return NULL +5616 if isinstance(value, numbers.Number): +5617 return Literal.number(value) +5618 if isinstance(value, datetime.datetime): +5619 datetime_literal = Literal.string( +5620 (value if value.tzinfo else value.replace(tzinfo=datetime.timezone.utc)).isoformat() +5621 ) +5622 return TimeStrToTime(this=datetime_literal) +5623 if isinstance(value, datetime.date): +5624 date_literal = Literal.string(value.strftime("%Y-%m-%d")) +5625 return DateStrToDate(this=date_literal) +5626 if isinstance(value, tuple): +5627 return Tuple(expressions=[convert(v, copy=copy) for v in value]) +5628 if isinstance(value, list): +5629 return Array(expressions=[convert(v, copy=copy) for v in value]) +5630 if isinstance(value, dict): +5631 return Map( +5632 keys=[convert(k, copy=copy) for k in value], +5633 values=[convert(v, copy=copy) for v in value.values()], +5634 ) +5635 raise ValueError(f"Cannot convert {value}") +5636 +5637 +5638def replace_children(expression: Expression, fun: t.Callable, *args, **kwargs) -> None: +5639 """ +5640 Replace children of an expression with the result of a lambda fun(child) -> exp. +5641 """ +5642 for k, v in expression.args.items(): +5643 is_list_arg = type(v) is list 5644 -5645 Args: -5646 expression: expression node to be transformed and replaced. -5647 mapping: mapping of table names. -5648 -5649 Examples: -5650 >>> from sqlglot import exp, parse_one -5651 >>> replace_tables(parse_one("select * from a.b"), {"a.b": "c"}).sql() -5652 'SELECT * FROM c' -5653 -5654 Returns: -5655 The mapped expression. -5656 """ -5657 -5658 def _replace_tables(node: Expression) -> Expression: -5659 if isinstance(node, Table): -5660 new_name = mapping.get(table_name(node)) -5661 if new_name: -5662 return to_table( -5663 new_name, -5664 **{k: v for k, v in node.args.items() if k not in ("this", "db", "catalog")}, -5665 ) -5666 return node -5667 -5668 return expression.transform(_replace_tables) -5669 -5670 -5671def replace_placeholders(expression: Expression, *args, **kwargs) -> Expression: -5672 """Replace placeholders in an expression. -5673 -5674 Args: -5675 expression: expression node to be transformed and replaced. -5676 args: positional names that will substitute unnamed placeholders in the given order. -5677 kwargs: keyword arguments that will substitute named placeholders. -5678 -5679 Examples: -5680 >>> from sqlglot import exp, parse_one -5681 >>> replace_placeholders( -5682 ... parse_one("select * from :tbl where ? = ?"), -5683 ... exp.to_identifier("str_col"), "b", tbl=exp.to_identifier("foo") -5684 ... ).sql() -5685 "SELECT * FROM foo WHERE str_col = 'b'" -5686 -5687 Returns: -5688 The mapped expression. -5689 """ -5690 -5691 def _replace_placeholders(node: Expression, args, **kwargs) -> Expression: -5692 if isinstance(node, Placeholder): -5693 if node.name: -5694 new_name = kwargs.get(node.name) -5695 if new_name: -5696 return convert(new_name) -5697 else: -5698 try: -5699 return convert(next(args)) -5700 except StopIteration: -5701 pass -5702 return node +5645 child_nodes = v if is_list_arg else [v] +5646 new_child_nodes = [] +5647 +5648 for cn in child_nodes: +5649 if isinstance(cn, Expression): +5650 for child_node in ensure_collection(fun(cn, *args, **kwargs)): +5651 new_child_nodes.append(child_node) +5652 child_node.parent = expression +5653 child_node.arg_key = k +5654 else: +5655 new_child_nodes.append(cn) +5656 +5657 expression.args[k] = new_child_nodes if is_list_arg else seq_get(new_child_nodes, 0) +5658 +5659 +5660def column_table_names(expression: Expression) -> t.List[str]: +5661 """ +5662 Return all table names referenced through columns in an expression. +5663 +5664 Example: +5665 >>> import sqlglot +5666 >>> column_table_names(sqlglot.parse_one("a.b AND c.d AND c.e")) +5667 ['c', 'a'] +5668 +5669 Args: +5670 expression: expression to find table names. +5671 +5672 Returns: +5673 A list of unique names. +5674 """ +5675 return list(dict.fromkeys(column.table for column in expression.find_all(Column))) +5676 +5677 +5678def table_name(table: Table | str) -> str: +5679 """Get the full name of a table as a string. +5680 +5681 Args: +5682 table: table expression node or string. +5683 +5684 Examples: +5685 >>> from sqlglot import exp, parse_one +5686 >>> table_name(parse_one("select * from a.b.c").find(exp.Table)) +5687 'a.b.c' +5688 +5689 Returns: +5690 The table name. +5691 """ +5692 +5693 table = maybe_parse(table, into=Table) +5694 +5695 if not table: +5696 raise ValueError(f"Cannot parse {table}") +5697 +5698 return ".".join(part for part in (table.text("catalog"), table.text("db"), table.name) if part) +5699 +5700 +5701def replace_tables(expression: E, mapping: t.Dict[str, str], copy: bool = True) -> E: +5702 """Replace all tables in expression according to the mapping. 5703 -5704 return expression.transform(_replace_placeholders, iter(args), **kwargs) -5705 -5706 -5707def expand( -5708 expression: Expression, sources: t.Dict[str, Subqueryable], copy: bool = True -5709) -> Expression: -5710 """Transforms an expression by expanding all referenced sources into subqueries. -5711 -5712 Examples: -5713 >>> from sqlglot import parse_one -5714 >>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y")}).sql() -5715 'SELECT * FROM (SELECT * FROM y) AS z /* source: x */' -5716 -5717 >>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y"), "y": parse_one("select * from z")}).sql() -5718 'SELECT * FROM (SELECT * FROM (SELECT * FROM z) AS y /* source: y */) AS z /* source: x */' -5719 -5720 Args: -5721 expression: The expression to expand. -5722 sources: A dictionary of name to Subqueryables. -5723 copy: Whether or not to copy the expression during transformation. Defaults to True. -5724 -5725 Returns: -5726 The transformed expression. -5727 """ -5728 -5729 def _expand(node: Expression): -5730 if isinstance(node, Table): -5731 name = table_name(node) -5732 source = sources.get(name) -5733 if source: -5734 subquery = source.subquery(node.alias or name) -5735 subquery.comments = [f"source: {name}"] -5736 return subquery.transform(_expand, copy=False) -5737 return node +5704 Args: +5705 expression: expression node to be transformed and replaced. +5706 mapping: mapping of table names. +5707 copy: whether or not to copy the expression. +5708 +5709 Examples: +5710 >>> from sqlglot import exp, parse_one +5711 >>> replace_tables(parse_one("select * from a.b"), {"a.b": "c"}).sql() +5712 'SELECT * FROM c' +5713 +5714 Returns: +5715 The mapped expression. +5716 """ +5717 +5718 def _replace_tables(node: Expression) -> Expression: +5719 if isinstance(node, Table): +5720 new_name = mapping.get(table_name(node)) +5721 if new_name: +5722 return to_table( +5723 new_name, +5724 **{k: v for k, v in node.args.items() if k not in ("this", "db", "catalog")}, +5725 ) +5726 return node +5727 +5728 return expression.transform(_replace_tables, copy=copy) +5729 +5730 +5731def replace_placeholders(expression: Expression, *args, **kwargs) -> Expression: +5732 """Replace placeholders in an expression. +5733 +5734 Args: +5735 expression: expression node to be transformed and replaced. +5736 args: positional names that will substitute unnamed placeholders in the given order. +5737 kwargs: keyword arguments that will substitute named placeholders. 5738 -5739 return expression.transform(_expand, copy=copy) -5740 -5741 -5742def func(name: str, *args, dialect: DialectType = None, **kwargs) -> Func: -5743 """ -5744 Returns a Func expression. -5745 -5746 Examples: -5747 >>> func("abs", 5).sql() -5748 'ABS(5)' -5749 -5750 >>> func("cast", this=5, to=DataType.build("DOUBLE")).sql() -5751 'CAST(5 AS DOUBLE)' -5752 -5753 Args: -5754 name: the name of the function to build. -5755 args: the args used to instantiate the function of interest. -5756 dialect: the source dialect. -5757 kwargs: the kwargs used to instantiate the function of interest. -5758 -5759 Note: -5760 The arguments `args` and `kwargs` are mutually exclusive. -5761 -5762 Returns: -5763 An instance of the function of interest, or an anonymous function, if `name` doesn't -5764 correspond to an existing `sqlglot.expressions.Func` class. -5765 """ -5766 if args and kwargs: -5767 raise ValueError("Can't use both args and kwargs to instantiate a function.") -5768 -5769 from sqlglot.dialects.dialect import Dialect -5770 -5771 converted: t.List[Expression] = [maybe_parse(arg, dialect=dialect) for arg in args] -5772 kwargs = {key: maybe_parse(value, dialect=dialect) for key, value in kwargs.items()} -5773 -5774 parser = Dialect.get_or_raise(dialect)().parser() -5775 from_args_list = parser.FUNCTIONS.get(name.upper()) +5739 Examples: +5740 >>> from sqlglot import exp, parse_one +5741 >>> replace_placeholders( +5742 ... parse_one("select * from :tbl where ? = ?"), +5743 ... exp.to_identifier("str_col"), "b", tbl=exp.to_identifier("foo") +5744 ... ).sql() +5745 "SELECT * FROM foo WHERE str_col = 'b'" +5746 +5747 Returns: +5748 The mapped expression. +5749 """ +5750 +5751 def _replace_placeholders(node: Expression, args, **kwargs) -> Expression: +5752 if isinstance(node, Placeholder): +5753 if node.name: +5754 new_name = kwargs.get(node.name) +5755 if new_name: +5756 return convert(new_name) +5757 else: +5758 try: +5759 return convert(next(args)) +5760 except StopIteration: +5761 pass +5762 return node +5763 +5764 return expression.transform(_replace_placeholders, iter(args), **kwargs) +5765 +5766 +5767def expand( +5768 expression: Expression, sources: t.Dict[str, Subqueryable], copy: bool = True +5769) -> Expression: +5770 """Transforms an expression by expanding all referenced sources into subqueries. +5771 +5772 Examples: +5773 >>> from sqlglot import parse_one +5774 >>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y")}).sql() +5775 'SELECT * FROM (SELECT * FROM y) AS z /* source: x */' 5776 -5777 if from_args_list: -5778 function = from_args_list(converted) if converted else from_args_list.__self__(**kwargs) # type: ignore -5779 else: -5780 kwargs = kwargs or {"expressions": converted} -5781 function = Anonymous(this=name, **kwargs) -5782 -5783 for error_message in function.error_messages(converted): -5784 raise ValueError(error_message) -5785 -5786 return function -5787 +5777 >>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y"), "y": parse_one("select * from z")}).sql() +5778 'SELECT * FROM (SELECT * FROM (SELECT * FROM z) AS y /* source: y */) AS z /* source: x */' +5779 +5780 Args: +5781 expression: The expression to expand. +5782 sources: A dictionary of name to Subqueryables. +5783 copy: Whether or not to copy the expression during transformation. Defaults to True. +5784 +5785 Returns: +5786 The transformed expression. +5787 """ 5788 -5789def true() -> Boolean: -5790 """ -5791 Returns a true Boolean expression. -5792 """ -5793 return Boolean(this=True) -5794 -5795 -5796def false() -> Boolean: -5797 """ -5798 Returns a false Boolean expression. -5799 """ -5800 return Boolean(this=False) +5789 def _expand(node: Expression): +5790 if isinstance(node, Table): +5791 name = table_name(node) +5792 source = sources.get(name) +5793 if source: +5794 subquery = source.subquery(node.alias or name) +5795 subquery.comments = [f"source: {name}"] +5796 return subquery.transform(_expand, copy=False) +5797 return node +5798 +5799 return expression.transform(_expand, copy=copy) +5800 5801 -5802 -5803def null() -> Null: -5804 """ -5805 Returns a Null expression. -5806 """ -5807 return Null() -5808 +5802def func(name: str, *args, dialect: DialectType = None, **kwargs) -> Func: +5803 """ +5804 Returns a Func expression. +5805 +5806 Examples: +5807 >>> func("abs", 5).sql() +5808 'ABS(5)' 5809 -5810# TODO: deprecate this -5811TRUE = Boolean(this=True) -5812FALSE = Boolean(this=False) -5813NULL = Null() +5810 >>> func("cast", this=5, to=DataType.build("DOUBLE")).sql() +5811 'CAST(5 AS DOUBLE)' +5812 +5813 Args: +5814 name: the name of the function to build. +5815 args: the args used to instantiate the function of interest. +5816 dialect: the source dialect. +5817 kwargs: the kwargs used to instantiate the function of interest. +5818 +5819 Note: +5820 The arguments `args` and `kwargs` are mutually exclusive. +5821 +5822 Returns: +5823 An instance of the function of interest, or an anonymous function, if `name` doesn't +5824 correspond to an existing `sqlglot.expressions.Func` class. +5825 """ +5826 if args and kwargs: +5827 raise ValueError("Can't use both args and kwargs to instantiate a function.") +5828 +5829 from sqlglot.dialects.dialect import Dialect +5830 +5831 converted: t.List[Expression] = [maybe_parse(arg, dialect=dialect) for arg in args] +5832 kwargs = {key: maybe_parse(value, dialect=dialect) for key, value in kwargs.items()} +5833 +5834 parser = Dialect.get_or_raise(dialect)().parser() +5835 from_args_list = parser.FUNCTIONS.get(name.upper()) +5836 +5837 if from_args_list: +5838 function = from_args_list(converted) if converted else from_args_list.__self__(**kwargs) # type: ignore +5839 else: +5840 kwargs = kwargs or {"expressions": converted} +5841 function = Anonymous(this=name, **kwargs) +5842 +5843 for error_message in function.error_messages(converted): +5844 raise ValueError(error_message) +5845 +5846 return function +5847 +5848 +5849def true() -> Boolean: +5850 """ +5851 Returns a true Boolean expression. +5852 """ +5853 return Boolean(this=True) +5854 +5855 +5856def false() -> Boolean: +5857 """ +5858 Returns a false Boolean expression. +5859 """ +5860 return Boolean(this=False) +5861 +5862 +5863def null() -> Null: +5864 """ +5865 Returns a Null expression. +5866 """ +5867 return Null() +5868 +5869 +5870# TODO: deprecate this +5871TRUE = Boolean(this=True) +5872FALSE = Boolean(this=False) +5873NULL = Null()
  • @@ -16911,13 +17016,14 @@ If an Expression instance is passed, it w 1501 arg_types = { 1502 "this": False, 1503 "table": False, -1504 "where": False, -1505 "columns": False, -1506 "unique": False, -1507 "primary": False, -1508 "amp": False, # teradata -1509 "partition_by": False, # teradata -1510 } +1504 "using": False, +1505 "where": False, +1506 "columns": False, +1507 "unique": False, +1508 "primary": False, +1509 "amp": False, # teradata +1510 "partition_by": False, # teradata +1511 }
    @@ -16981,54 +17087,54 @@ If an Expression instance is passed, it w
    -
    1513class Insert(Expression):
    -1514    arg_types = {
    -1515        "with": False,
    -1516        "this": True,
    -1517        "expression": False,
    -1518        "conflict": False,
    -1519        "returning": False,
    -1520        "overwrite": False,
    -1521        "exists": False,
    -1522        "partition": False,
    -1523        "alternative": False,
    -1524    }
    -1525
    -1526    def with_(
    -1527        self,
    -1528        alias: ExpOrStr,
    -1529        as_: ExpOrStr,
    -1530        recursive: t.Optional[bool] = None,
    -1531        append: bool = True,
    -1532        dialect: DialectType = None,
    -1533        copy: bool = True,
    -1534        **opts,
    -1535    ) -> Insert:
    -1536        """
    -1537        Append to or set the common table expressions.
    -1538
    -1539        Example:
    -1540            >>> insert("SELECT x FROM cte", "t").with_("cte", as_="SELECT * FROM tbl").sql()
    -1541            'WITH cte AS (SELECT * FROM tbl) INSERT INTO t SELECT x FROM cte'
    -1542
    -1543        Args:
    -1544            alias: the SQL code string to parse as the table name.
    -1545                If an `Expression` instance is passed, this is used as-is.
    -1546            as_: the SQL code string to parse as the table expression.
    -1547                If an `Expression` instance is passed, it will be used as-is.
    -1548            recursive: set the RECURSIVE part of the expression. Defaults to `False`.
    -1549            append: if `True`, add to any existing expressions.
    -1550                Otherwise, this resets the expressions.
    -1551            dialect: the dialect used to parse the input expression.
    -1552            copy: if `False`, modify this expression instance in-place.
    -1553            opts: other options to use to parse the input expressions.
    -1554
    -1555        Returns:
    -1556            The modified expression.
    -1557        """
    -1558        return _apply_cte_builder(
    -1559            self, alias, as_, recursive=recursive, append=append, dialect=dialect, copy=copy, **opts
    -1560        )
    +            
    1514class Insert(Expression):
    +1515    arg_types = {
    +1516        "with": False,
    +1517        "this": True,
    +1518        "expression": False,
    +1519        "conflict": False,
    +1520        "returning": False,
    +1521        "overwrite": False,
    +1522        "exists": False,
    +1523        "partition": False,
    +1524        "alternative": False,
    +1525    }
    +1526
    +1527    def with_(
    +1528        self,
    +1529        alias: ExpOrStr,
    +1530        as_: ExpOrStr,
    +1531        recursive: t.Optional[bool] = None,
    +1532        append: bool = True,
    +1533        dialect: DialectType = None,
    +1534        copy: bool = True,
    +1535        **opts,
    +1536    ) -> Insert:
    +1537        """
    +1538        Append to or set the common table expressions.
    +1539
    +1540        Example:
    +1541            >>> insert("SELECT x FROM cte", "t").with_("cte", as_="SELECT * FROM tbl").sql()
    +1542            'WITH cte AS (SELECT * FROM tbl) INSERT INTO t SELECT x FROM cte'
    +1543
    +1544        Args:
    +1545            alias: the SQL code string to parse as the table name.
    +1546                If an `Expression` instance is passed, this is used as-is.
    +1547            as_: the SQL code string to parse as the table expression.
    +1548                If an `Expression` instance is passed, it will be used as-is.
    +1549            recursive: set the RECURSIVE part of the expression. Defaults to `False`.
    +1550            append: if `True`, add to any existing expressions.
    +1551                Otherwise, this resets the expressions.
    +1552            dialect: the dialect used to parse the input expression.
    +1553            copy: if `False`, modify this expression instance in-place.
    +1554            opts: other options to use to parse the input expressions.
    +1555
    +1556        Returns:
    +1557            The modified expression.
    +1558        """
    +1559        return _apply_cte_builder(
    +1560            self, alias, as_, recursive=recursive, append=append, dialect=dialect, copy=copy, **opts
    +1561        )
     
    @@ -17045,41 +17151,41 @@ If an Expression instance is passed, it w
    -
    1526    def with_(
    -1527        self,
    -1528        alias: ExpOrStr,
    -1529        as_: ExpOrStr,
    -1530        recursive: t.Optional[bool] = None,
    -1531        append: bool = True,
    -1532        dialect: DialectType = None,
    -1533        copy: bool = True,
    -1534        **opts,
    -1535    ) -> Insert:
    -1536        """
    -1537        Append to or set the common table expressions.
    -1538
    -1539        Example:
    -1540            >>> insert("SELECT x FROM cte", "t").with_("cte", as_="SELECT * FROM tbl").sql()
    -1541            'WITH cte AS (SELECT * FROM tbl) INSERT INTO t SELECT x FROM cte'
    -1542
    -1543        Args:
    -1544            alias: the SQL code string to parse as the table name.
    -1545                If an `Expression` instance is passed, this is used as-is.
    -1546            as_: the SQL code string to parse as the table expression.
    -1547                If an `Expression` instance is passed, it will be used as-is.
    -1548            recursive: set the RECURSIVE part of the expression. Defaults to `False`.
    -1549            append: if `True`, add to any existing expressions.
    -1550                Otherwise, this resets the expressions.
    -1551            dialect: the dialect used to parse the input expression.
    -1552            copy: if `False`, modify this expression instance in-place.
    -1553            opts: other options to use to parse the input expressions.
    -1554
    -1555        Returns:
    -1556            The modified expression.
    -1557        """
    -1558        return _apply_cte_builder(
    -1559            self, alias, as_, recursive=recursive, append=append, dialect=dialect, copy=copy, **opts
    -1560        )
    +            
    1527    def with_(
    +1528        self,
    +1529        alias: ExpOrStr,
    +1530        as_: ExpOrStr,
    +1531        recursive: t.Optional[bool] = None,
    +1532        append: bool = True,
    +1533        dialect: DialectType = None,
    +1534        copy: bool = True,
    +1535        **opts,
    +1536    ) -> Insert:
    +1537        """
    +1538        Append to or set the common table expressions.
    +1539
    +1540        Example:
    +1541            >>> insert("SELECT x FROM cte", "t").with_("cte", as_="SELECT * FROM tbl").sql()
    +1542            'WITH cte AS (SELECT * FROM tbl) INSERT INTO t SELECT x FROM cte'
    +1543
    +1544        Args:
    +1545            alias: the SQL code string to parse as the table name.
    +1546                If an `Expression` instance is passed, this is used as-is.
    +1547            as_: the SQL code string to parse as the table expression.
    +1548                If an `Expression` instance is passed, it will be used as-is.
    +1549            recursive: set the RECURSIVE part of the expression. Defaults to `False`.
    +1550            append: if `True`, add to any existing expressions.
    +1551                Otherwise, this resets the expressions.
    +1552            dialect: the dialect used to parse the input expression.
    +1553            copy: if `False`, modify this expression instance in-place.
    +1554            opts: other options to use to parse the input expressions.
    +1555
    +1556        Returns:
    +1557            The modified expression.
    +1558        """
    +1559        return _apply_cte_builder(
    +1560            self, alias, as_, recursive=recursive, append=append, dialect=dialect, copy=copy, **opts
    +1561        )
     
    @@ -17177,14 +17283,14 @@ Otherwise, this resets the expressions.
    -
    1563class OnConflict(Expression):
    -1564    arg_types = {
    -1565        "duplicate": False,
    -1566        "expressions": False,
    -1567        "nothing": False,
    -1568        "key": False,
    -1569        "constraint": False,
    -1570    }
    +            
    1564class OnConflict(Expression):
    +1565    arg_types = {
    +1566        "duplicate": False,
    +1567        "expressions": False,
    +1568        "nothing": False,
    +1569        "key": False,
    +1570        "constraint": False,
    +1571    }
     
    @@ -17248,8 +17354,8 @@ Otherwise, this resets the expressions.
    -
    1573class Returning(Expression):
    -1574    arg_types = {"expressions": True}
    +            
    1574class Returning(Expression):
    +1575    arg_types = {"expressions": True}
     
    @@ -17313,8 +17419,8 @@ Otherwise, this resets the expressions.
    -
    1578class Introducer(Expression):
    -1579    arg_types = {"this": True, "expression": True}
    +            
    1579class Introducer(Expression):
    +1580    arg_types = {"this": True, "expression": True}
     
    @@ -17378,8 +17484,8 @@ Otherwise, this resets the expressions.
    -
    1583class National(Expression):
    -1584    pass
    +            
    1584class National(Expression):
    +1585    pass
     
    @@ -17443,16 +17549,16 @@ Otherwise, this resets the expressions.
    -
    1587class LoadData(Expression):
    -1588    arg_types = {
    -1589        "this": True,
    -1590        "local": False,
    -1591        "overwrite": False,
    -1592        "inpath": True,
    -1593        "partition": False,
    -1594        "input_format": False,
    -1595        "serde": False,
    -1596    }
    +            
    1588class LoadData(Expression):
    +1589    arg_types = {
    +1590        "this": True,
    +1591        "local": False,
    +1592        "overwrite": False,
    +1593        "inpath": True,
    +1594        "partition": False,
    +1595        "input_format": False,
    +1596        "serde": False,
    +1597    }
     
    @@ -17516,8 +17622,8 @@ Otherwise, this resets the expressions.
    -
    1599class Partition(Expression):
    -1600    arg_types = {"expressions": True}
    +            
    1600class Partition(Expression):
    +1601    arg_types = {"expressions": True}
     
    @@ -17581,13 +17687,13 @@ Otherwise, this resets the expressions.
    -
    1603class Fetch(Expression):
    -1604    arg_types = {
    -1605        "direction": False,
    -1606        "count": False,
    -1607        "percent": False,
    -1608        "with_ties": False,
    -1609    }
    +            
    1604class Fetch(Expression):
    +1605    arg_types = {
    +1606        "direction": False,
    +1607        "count": False,
    +1608        "percent": False,
    +1609        "with_ties": False,
    +1610    }
     
    @@ -17651,14 +17757,14 @@ Otherwise, this resets the expressions.
    -
    1612class Group(Expression):
    -1613    arg_types = {
    -1614        "expressions": False,
    -1615        "grouping_sets": False,
    -1616        "cube": False,
    -1617        "rollup": False,
    -1618        "totals": False,
    -1619    }
    +            
    1613class Group(Expression):
    +1614    arg_types = {
    +1615        "expressions": False,
    +1616        "grouping_sets": False,
    +1617        "cube": False,
    +1618        "rollup": False,
    +1619        "totals": False,
    +1620    }
     
    @@ -17722,8 +17828,8 @@ Otherwise, this resets the expressions.
    -
    1622class Lambda(Expression):
    -1623    arg_types = {"this": True, "expressions": True}
    +            
    1623class Lambda(Expression):
    +1624    arg_types = {"this": True, "expressions": True}
     
    @@ -17787,8 +17893,8 @@ Otherwise, this resets the expressions.
    -
    1626class Limit(Expression):
    -1627    arg_types = {"this": False, "expression": True}
    +            
    1627class Limit(Expression):
    +1628    arg_types = {"this": False, "expression": True, "offset": False}
     
    @@ -17852,24 +17958,24 @@ Otherwise, this resets the expressions.
    -
    1630class Literal(Condition):
    -1631    arg_types = {"this": True, "is_string": True}
    -1632
    -1633    @property
    -1634    def hashable_args(self) -> t.Any:
    -1635        return (self.this, self.args.get("is_string"))
    -1636
    -1637    @classmethod
    -1638    def number(cls, number) -> Literal:
    -1639        return cls(this=str(number), is_string=False)
    -1640
    -1641    @classmethod
    -1642    def string(cls, string) -> Literal:
    -1643        return cls(this=str(string), is_string=True)
    -1644
    -1645    @property
    -1646    def output_name(self) -> str:
    -1647        return self.name
    +            
    1631class Literal(Condition):
    +1632    arg_types = {"this": True, "is_string": True}
    +1633
    +1634    @property
    +1635    def hashable_args(self) -> t.Any:
    +1636        return (self.this, self.args.get("is_string"))
    +1637
    +1638    @classmethod
    +1639    def number(cls, number) -> Literal:
    +1640        return cls(this=str(number), is_string=False)
    +1641
    +1642    @classmethod
    +1643    def string(cls, string) -> Literal:
    +1644        return cls(this=str(string), is_string=True)
    +1645
    +1646    @property
    +1647    def output_name(self) -> str:
    +1648        return self.name
     
    @@ -17887,9 +17993,9 @@ Otherwise, this resets the expressions.
    -
    1637    @classmethod
    -1638    def number(cls, number) -> Literal:
    -1639        return cls(this=str(number), is_string=False)
    +            
    1638    @classmethod
    +1639    def number(cls, number) -> Literal:
    +1640        return cls(this=str(number), is_string=False)
     
    @@ -17908,9 +18014,9 @@ Otherwise, this resets the expressions.
    -
    1641    @classmethod
    -1642    def string(cls, string) -> Literal:
    -1643        return cls(this=str(string), is_string=True)
    +            
    1642    @classmethod
    +1643    def string(cls, string) -> Literal:
    +1644        return cls(this=str(string), is_string=True)
     
    @@ -18019,124 +18125,124 @@ Otherwise, this resets the expressions.
    -
    1650class Join(Expression):
    -1651    arg_types = {
    -1652        "this": True,
    -1653        "on": False,
    -1654        "side": False,
    -1655        "kind": False,
    -1656        "using": False,
    -1657        "method": False,
    -1658        "global": False,
    -1659        "hint": False,
    -1660    }
    -1661
    -1662    @property
    -1663    def method(self) -> str:
    -1664        return self.text("method").upper()
    -1665
    -1666    @property
    -1667    def kind(self) -> str:
    -1668        return self.text("kind").upper()
    -1669
    -1670    @property
    -1671    def side(self) -> str:
    -1672        return self.text("side").upper()
    -1673
    -1674    @property
    -1675    def hint(self) -> str:
    -1676        return self.text("hint").upper()
    -1677
    -1678    @property
    -1679    def alias_or_name(self) -> str:
    -1680        return self.this.alias_or_name
    -1681
    -1682    def on(
    -1683        self,
    -1684        *expressions: t.Optional[ExpOrStr],
    -1685        append: bool = True,
    -1686        dialect: DialectType = None,
    -1687        copy: bool = True,
    -1688        **opts,
    -1689    ) -> Join:
    -1690        """
    -1691        Append to or set the ON expressions.
    -1692
    -1693        Example:
    -1694            >>> import sqlglot
    -1695            >>> sqlglot.parse_one("JOIN x", into=Join).on("y = 1").sql()
    -1696            'JOIN x ON y = 1'
    -1697
    -1698        Args:
    -1699            *expressions: the SQL code strings to parse.
    -1700                If an `Expression` instance is passed, it will be used as-is.
    -1701                Multiple expressions are combined with an AND operator.
    -1702            append: if `True`, AND the new expressions to any existing expression.
    -1703                Otherwise, this resets the expression.
    -1704            dialect: the dialect used to parse the input expressions.
    -1705            copy: if `False`, modify this expression instance in-place.
    -1706            opts: other options to use to parse the input expressions.
    -1707
    -1708        Returns:
    -1709            The modified Join expression.
    -1710        """
    -1711        join = _apply_conjunction_builder(
    -1712            *expressions,
    -1713            instance=self,
    -1714            arg="on",
    -1715            append=append,
    -1716            dialect=dialect,
    -1717            copy=copy,
    -1718            **opts,
    -1719        )
    -1720
    -1721        if join.kind == "CROSS":
    -1722            join.set("kind", None)
    -1723
    -1724        return join
    -1725
    -1726    def using(
    -1727        self,
    -1728        *expressions: t.Optional[ExpOrStr],
    -1729        append: bool = True,
    -1730        dialect: DialectType = None,
    -1731        copy: bool = True,
    -1732        **opts,
    -1733    ) -> Join:
    -1734        """
    -1735        Append to or set the USING expressions.
    -1736
    -1737        Example:
    -1738            >>> import sqlglot
    -1739            >>> sqlglot.parse_one("JOIN x", into=Join).using("foo", "bla").sql()
    -1740            'JOIN x USING (foo, bla)'
    -1741
    -1742        Args:
    -1743            *expressions: the SQL code strings to parse.
    -1744                If an `Expression` instance is passed, it will be used as-is.
    -1745            append: if `True`, concatenate the new expressions to the existing "using" list.
    -1746                Otherwise, this resets the expression.
    -1747            dialect: the dialect used to parse the input expressions.
    -1748            copy: if `False`, modify this expression instance in-place.
    -1749            opts: other options to use to parse the input expressions.
    -1750
    -1751        Returns:
    -1752            The modified Join expression.
    -1753        """
    -1754        join = _apply_list_builder(
    -1755            *expressions,
    -1756            instance=self,
    -1757            arg="using",
    -1758            append=append,
    -1759            dialect=dialect,
    -1760            copy=copy,
    -1761            **opts,
    -1762        )
    -1763
    -1764        if join.kind == "CROSS":
    -1765            join.set("kind", None)
    -1766
    -1767        return join
    +            
    1651class Join(Expression):
    +1652    arg_types = {
    +1653        "this": True,
    +1654        "on": False,
    +1655        "side": False,
    +1656        "kind": False,
    +1657        "using": False,
    +1658        "method": False,
    +1659        "global": False,
    +1660        "hint": False,
    +1661    }
    +1662
    +1663    @property
    +1664    def method(self) -> str:
    +1665        return self.text("method").upper()
    +1666
    +1667    @property
    +1668    def kind(self) -> str:
    +1669        return self.text("kind").upper()
    +1670
    +1671    @property
    +1672    def side(self) -> str:
    +1673        return self.text("side").upper()
    +1674
    +1675    @property
    +1676    def hint(self) -> str:
    +1677        return self.text("hint").upper()
    +1678
    +1679    @property
    +1680    def alias_or_name(self) -> str:
    +1681        return self.this.alias_or_name
    +1682
    +1683    def on(
    +1684        self,
    +1685        *expressions: t.Optional[ExpOrStr],
    +1686        append: bool = True,
    +1687        dialect: DialectType = None,
    +1688        copy: bool = True,
    +1689        **opts,
    +1690    ) -> Join:
    +1691        """
    +1692        Append to or set the ON expressions.
    +1693
    +1694        Example:
    +1695            >>> import sqlglot
    +1696            >>> sqlglot.parse_one("JOIN x", into=Join).on("y = 1").sql()
    +1697            'JOIN x ON y = 1'
    +1698
    +1699        Args:
    +1700            *expressions: the SQL code strings to parse.
    +1701                If an `Expression` instance is passed, it will be used as-is.
    +1702                Multiple expressions are combined with an AND operator.
    +1703            append: if `True`, AND the new expressions to any existing expression.
    +1704                Otherwise, this resets the expression.
    +1705            dialect: the dialect used to parse the input expressions.
    +1706            copy: if `False`, modify this expression instance in-place.
    +1707            opts: other options to use to parse the input expressions.
    +1708
    +1709        Returns:
    +1710            The modified Join expression.
    +1711        """
    +1712        join = _apply_conjunction_builder(
    +1713            *expressions,
    +1714            instance=self,
    +1715            arg="on",
    +1716            append=append,
    +1717            dialect=dialect,
    +1718            copy=copy,
    +1719            **opts,
    +1720        )
    +1721
    +1722        if join.kind == "CROSS":
    +1723            join.set("kind", None)
    +1724
    +1725        return join
    +1726
    +1727    def using(
    +1728        self,
    +1729        *expressions: t.Optional[ExpOrStr],
    +1730        append: bool = True,
    +1731        dialect: DialectType = None,
    +1732        copy: bool = True,
    +1733        **opts,
    +1734    ) -> Join:
    +1735        """
    +1736        Append to or set the USING expressions.
    +1737
    +1738        Example:
    +1739            >>> import sqlglot
    +1740            >>> sqlglot.parse_one("JOIN x", into=Join).using("foo", "bla").sql()
    +1741            'JOIN x USING (foo, bla)'
    +1742
    +1743        Args:
    +1744            *expressions: the SQL code strings to parse.
    +1745                If an `Expression` instance is passed, it will be used as-is.
    +1746            append: if `True`, concatenate the new expressions to the existing "using" list.
    +1747                Otherwise, this resets the expression.
    +1748            dialect: the dialect used to parse the input expressions.
    +1749            copy: if `False`, modify this expression instance in-place.
    +1750            opts: other options to use to parse the input expressions.
    +1751
    +1752        Returns:
    +1753            The modified Join expression.
    +1754        """
    +1755        join = _apply_list_builder(
    +1756            *expressions,
    +1757            instance=self,
    +1758            arg="using",
    +1759            append=append,
    +1760            dialect=dialect,
    +1761            copy=copy,
    +1762            **opts,
    +1763        )
    +1764
    +1765        if join.kind == "CROSS":
    +1766            join.set("kind", None)
    +1767
    +1768        return join
     
    @@ -18153,49 +18259,49 @@ Otherwise, this resets the expressions.
    -
    1682    def on(
    -1683        self,
    -1684        *expressions: t.Optional[ExpOrStr],
    -1685        append: bool = True,
    -1686        dialect: DialectType = None,
    -1687        copy: bool = True,
    -1688        **opts,
    -1689    ) -> Join:
    -1690        """
    -1691        Append to or set the ON expressions.
    -1692
    -1693        Example:
    -1694            >>> import sqlglot
    -1695            >>> sqlglot.parse_one("JOIN x", into=Join).on("y = 1").sql()
    -1696            'JOIN x ON y = 1'
    -1697
    -1698        Args:
    -1699            *expressions: the SQL code strings to parse.
    -1700                If an `Expression` instance is passed, it will be used as-is.
    -1701                Multiple expressions are combined with an AND operator.
    -1702            append: if `True`, AND the new expressions to any existing expression.
    -1703                Otherwise, this resets the expression.
    -1704            dialect: the dialect used to parse the input expressions.
    -1705            copy: if `False`, modify this expression instance in-place.
    -1706            opts: other options to use to parse the input expressions.
    -1707
    -1708        Returns:
    -1709            The modified Join expression.
    -1710        """
    -1711        join = _apply_conjunction_builder(
    -1712            *expressions,
    -1713            instance=self,
    -1714            arg="on",
    -1715            append=append,
    -1716            dialect=dialect,
    -1717            copy=copy,
    -1718            **opts,
    -1719        )
    -1720
    -1721        if join.kind == "CROSS":
    -1722            join.set("kind", None)
    -1723
    -1724        return join
    +            
    1683    def on(
    +1684        self,
    +1685        *expressions: t.Optional[ExpOrStr],
    +1686        append: bool = True,
    +1687        dialect: DialectType = None,
    +1688        copy: bool = True,
    +1689        **opts,
    +1690    ) -> Join:
    +1691        """
    +1692        Append to or set the ON expressions.
    +1693
    +1694        Example:
    +1695            >>> import sqlglot
    +1696            >>> sqlglot.parse_one("JOIN x", into=Join).on("y = 1").sql()
    +1697            'JOIN x ON y = 1'
    +1698
    +1699        Args:
    +1700            *expressions: the SQL code strings to parse.
    +1701                If an `Expression` instance is passed, it will be used as-is.
    +1702                Multiple expressions are combined with an AND operator.
    +1703            append: if `True`, AND the new expressions to any existing expression.
    +1704                Otherwise, this resets the expression.
    +1705            dialect: the dialect used to parse the input expressions.
    +1706            copy: if `False`, modify this expression instance in-place.
    +1707            opts: other options to use to parse the input expressions.
    +1708
    +1709        Returns:
    +1710            The modified Join expression.
    +1711        """
    +1712        join = _apply_conjunction_builder(
    +1713            *expressions,
    +1714            instance=self,
    +1715            arg="on",
    +1716            append=append,
    +1717            dialect=dialect,
    +1718            copy=copy,
    +1719            **opts,
    +1720        )
    +1721
    +1722        if join.kind == "CROSS":
    +1723            join.set("kind", None)
    +1724
    +1725        return join
     
    @@ -18245,48 +18351,48 @@ Otherwise, this resets the expression.
    -
    1726    def using(
    -1727        self,
    -1728        *expressions: t.Optional[ExpOrStr],
    -1729        append: bool = True,
    -1730        dialect: DialectType = None,
    -1731        copy: bool = True,
    -1732        **opts,
    -1733    ) -> Join:
    -1734        """
    -1735        Append to or set the USING expressions.
    -1736
    -1737        Example:
    -1738            >>> import sqlglot
    -1739            >>> sqlglot.parse_one("JOIN x", into=Join).using("foo", "bla").sql()
    -1740            'JOIN x USING (foo, bla)'
    -1741
    -1742        Args:
    -1743            *expressions: the SQL code strings to parse.
    -1744                If an `Expression` instance is passed, it will be used as-is.
    -1745            append: if `True`, concatenate the new expressions to the existing "using" list.
    -1746                Otherwise, this resets the expression.
    -1747            dialect: the dialect used to parse the input expressions.
    -1748            copy: if `False`, modify this expression instance in-place.
    -1749            opts: other options to use to parse the input expressions.
    -1750
    -1751        Returns:
    -1752            The modified Join expression.
    -1753        """
    -1754        join = _apply_list_builder(
    -1755            *expressions,
    -1756            instance=self,
    -1757            arg="using",
    -1758            append=append,
    -1759            dialect=dialect,
    -1760            copy=copy,
    -1761            **opts,
    -1762        )
    -1763
    -1764        if join.kind == "CROSS":
    -1765            join.set("kind", None)
    -1766
    -1767        return join
    +            
    1727    def using(
    +1728        self,
    +1729        *expressions: t.Optional[ExpOrStr],
    +1730        append: bool = True,
    +1731        dialect: DialectType = None,
    +1732        copy: bool = True,
    +1733        **opts,
    +1734    ) -> Join:
    +1735        """
    +1736        Append to or set the USING expressions.
    +1737
    +1738        Example:
    +1739            >>> import sqlglot
    +1740            >>> sqlglot.parse_one("JOIN x", into=Join).using("foo", "bla").sql()
    +1741            'JOIN x USING (foo, bla)'
    +1742
    +1743        Args:
    +1744            *expressions: the SQL code strings to parse.
    +1745                If an `Expression` instance is passed, it will be used as-is.
    +1746            append: if `True`, concatenate the new expressions to the existing "using" list.
    +1747                Otherwise, this resets the expression.
    +1748            dialect: the dialect used to parse the input expressions.
    +1749            copy: if `False`, modify this expression instance in-place.
    +1750            opts: other options to use to parse the input expressions.
    +1751
    +1752        Returns:
    +1753            The modified Join expression.
    +1754        """
    +1755        join = _apply_list_builder(
    +1756            *expressions,
    +1757            instance=self,
    +1758            arg="using",
    +1759            append=append,
    +1760            dialect=dialect,
    +1761            copy=copy,
    +1762            **opts,
    +1763        )
    +1764
    +1765        if join.kind == "CROSS":
    +1766            join.set("kind", None)
    +1767
    +1768        return join
     
    @@ -18382,8 +18488,8 @@ Otherwise, this resets the expression.
    -
    1770class Lateral(UDTF):
    -1771    arg_types = {"this": True, "view": False, "outer": False, "alias": False}
    +            
    1771class Lateral(UDTF):
    +1772    arg_types = {"this": True, "view": False, "outer": False, "alias": False}
     
    @@ -18453,17 +18559,17 @@ Otherwise, this resets the expression.
    -
    1774class MatchRecognize(Expression):
    -1775    arg_types = {
    -1776        "partition_by": False,
    -1777        "order": False,
    -1778        "measures": False,
    -1779        "rows": False,
    -1780        "after": False,
    -1781        "pattern": False,
    -1782        "define": False,
    -1783        "alias": False,
    -1784    }
    +            
    1775class MatchRecognize(Expression):
    +1776    arg_types = {
    +1777        "partition_by": False,
    +1778        "order": False,
    +1779        "measures": False,
    +1780        "rows": False,
    +1781        "after": False,
    +1782        "pattern": False,
    +1783        "define": False,
    +1784        "alias": False,
    +1785    }
     
    @@ -18527,8 +18633,8 @@ Otherwise, this resets the expression.
    -
    1789class Final(Expression):
    -1790    pass
    +            
    1790class Final(Expression):
    +1791    pass
     
    @@ -18592,8 +18698,8 @@ Otherwise, this resets the expression.
    -
    1793class Offset(Expression):
    -1794    arg_types = {"this": False, "expression": True}
    +            
    1794class Offset(Expression):
    +1795    arg_types = {"this": False, "expression": True}
     
    @@ -18657,8 +18763,8 @@ Otherwise, this resets the expression.
    -
    1797class Order(Expression):
    -1798    arg_types = {"this": False, "expressions": True}
    +            
    1798class Order(Expression):
    +1799    arg_types = {"this": False, "expressions": True}
     
    @@ -18722,8 +18828,8 @@ Otherwise, this resets the expression.
    -
    1803class Cluster(Order):
    -1804    pass
    +            
    1804class Cluster(Order):
    +1805    pass
     
    @@ -18787,8 +18893,8 @@ Otherwise, this resets the expression.
    -
    1807class Distribute(Order):
    -1808    pass
    +            
    1808class Distribute(Order):
    +1809    pass
     
    @@ -18852,8 +18958,8 @@ Otherwise, this resets the expression.
    -
    1811class Sort(Order):
    -1812    pass
    +            
    1812class Sort(Order):
    +1813    pass
     
    @@ -18917,8 +19023,8 @@ Otherwise, this resets the expression.
    -
    1815class Ordered(Expression):
    -1816    arg_types = {"this": True, "desc": True, "nulls_first": True}
    +            
    1816class Ordered(Expression):
    +1817    arg_types = {"this": True, "desc": True, "nulls_first": True}
     
    @@ -18982,8 +19088,8 @@ Otherwise, this resets the expression.
    -
    1819class Property(Expression):
    -1820    arg_types = {"this": True, "value": True}
    +            
    1820class Property(Expression):
    +1821    arg_types = {"this": True, "value": True}
     
    @@ -19047,8 +19153,8 @@ Otherwise, this resets the expression.
    -
    1823class AlgorithmProperty(Property):
    -1824    arg_types = {"this": True}
    +            
    1824class AlgorithmProperty(Property):
    +1825    arg_types = {"this": True}
     
    @@ -19112,8 +19218,8 @@ Otherwise, this resets the expression.
    -
    1827class AutoIncrementProperty(Property):
    -1828    arg_types = {"this": True}
    +            
    1828class AutoIncrementProperty(Property):
    +1829    arg_types = {"this": True}
     
    @@ -19177,8 +19283,8 @@ Otherwise, this resets the expression.
    -
    1831class BlockCompressionProperty(Property):
    -1832    arg_types = {"autotemp": False, "always": False, "default": True, "manual": True, "never": True}
    +            
    1832class BlockCompressionProperty(Property):
    +1833    arg_types = {"autotemp": False, "always": False, "default": True, "manual": True, "never": True}
     
    @@ -19242,8 +19348,8 @@ Otherwise, this resets the expression.
    -
    1835class CharacterSetProperty(Property):
    -1836    arg_types = {"this": True, "default": True}
    +            
    1836class CharacterSetProperty(Property):
    +1837    arg_types = {"this": True, "default": True}
     
    @@ -19307,8 +19413,8 @@ Otherwise, this resets the expression.
    -
    1839class ChecksumProperty(Property):
    -1840    arg_types = {"on": False, "default": False}
    +            
    1840class ChecksumProperty(Property):
    +1841    arg_types = {"on": False, "default": False}
     
    @@ -19372,8 +19478,8 @@ Otherwise, this resets the expression.
    -
    1843class CollateProperty(Property):
    -1844    arg_types = {"this": True}
    +            
    1844class CollateProperty(Property):
    +1845    arg_types = {"this": True}
     
    @@ -19437,14 +19543,14 @@ Otherwise, this resets the expression.
    -
    1847class DataBlocksizeProperty(Property):
    -1848    arg_types = {
    -1849        "size": False,
    -1850        "units": False,
    -1851        "minimum": False,
    -1852        "maximum": False,
    -1853        "default": False,
    -1854    }
    +            
    1848class DataBlocksizeProperty(Property):
    +1849    arg_types = {
    +1850        "size": False,
    +1851        "units": False,
    +1852        "minimum": False,
    +1853        "maximum": False,
    +1854        "default": False,
    +1855    }
     
    @@ -19508,8 +19614,8 @@ Otherwise, this resets the expression.
    -
    1857class DefinerProperty(Property):
    -1858    arg_types = {"this": True}
    +            
    1858class DefinerProperty(Property):
    +1859    arg_types = {"this": True}
     
    @@ -19573,8 +19679,8 @@ Otherwise, this resets the expression.
    -
    1861class DistKeyProperty(Property):
    -1862    arg_types = {"this": True}
    +            
    1862class DistKeyProperty(Property):
    +1863    arg_types = {"this": True}
     
    @@ -19638,8 +19744,8 @@ Otherwise, this resets the expression.
    -
    1865class DistStyleProperty(Property):
    -1866    arg_types = {"this": True}
    +            
    1866class DistStyleProperty(Property):
    +1867    arg_types = {"this": True}
     
    @@ -19703,8 +19809,8 @@ Otherwise, this resets the expression.
    -
    1869class EngineProperty(Property):
    -1870    arg_types = {"this": True}
    +            
    1870class EngineProperty(Property):
    +1871    arg_types = {"this": True}
     
    @@ -19753,6 +19859,71 @@ Otherwise, this resets the expression.
    dump
    load
    +
    + +
    + +
    + +
    + + class + ToTableProperty(Property): + + + +
    + +
    1874class ToTableProperty(Property):
    +1875    arg_types = {"this": True}
    +
    + + + + + @@ -19768,8 +19939,8 @@ Otherwise, this resets the expression.
    -
    1873class ExecuteAsProperty(Property):
    -1874    arg_types = {"this": True}
    +            
    1878class ExecuteAsProperty(Property):
    +1879    arg_types = {"this": True}
     
    @@ -19833,8 +20004,8 @@ Otherwise, this resets the expression.
    -
    1877class ExternalProperty(Property):
    -1878    arg_types = {"this": False}
    +            
    1882class ExternalProperty(Property):
    +1883    arg_types = {"this": False}
     
    @@ -19898,8 +20069,8 @@ Otherwise, this resets the expression.
    -
    1881class FallbackProperty(Property):
    -1882    arg_types = {"no": True, "protection": False}
    +            
    1886class FallbackProperty(Property):
    +1887    arg_types = {"no": True, "protection": False}
     
    @@ -19963,8 +20134,8 @@ Otherwise, this resets the expression.
    -
    1885class FileFormatProperty(Property):
    -1886    arg_types = {"this": True}
    +            
    1890class FileFormatProperty(Property):
    +1891    arg_types = {"this": True}
     
    @@ -20028,8 +20199,8 @@ Otherwise, this resets the expression.
    -
    1889class FreespaceProperty(Property):
    -1890    arg_types = {"this": True, "percent": False}
    +            
    1894class FreespaceProperty(Property):
    +1895    arg_types = {"this": True, "percent": False}
     
    @@ -20093,8 +20264,8 @@ Otherwise, this resets the expression.
    -
    1893class InputOutputFormat(Expression):
    -1894    arg_types = {"input_format": False, "output_format": False}
    +            
    1898class InputOutputFormat(Expression):
    +1899    arg_types = {"input_format": False, "output_format": False}
     
    @@ -20158,14 +20329,14 @@ Otherwise, this resets the expression.
    -
    1897class IsolatedLoadingProperty(Property):
    -1898    arg_types = {
    -1899        "no": True,
    -1900        "concurrent": True,
    -1901        "for_all": True,
    -1902        "for_insert": True,
    -1903        "for_none": True,
    -1904    }
    +            
    1902class IsolatedLoadingProperty(Property):
    +1903    arg_types = {
    +1904        "no": True,
    +1905        "concurrent": True,
    +1906        "for_all": True,
    +1907        "for_insert": True,
    +1908        "for_none": True,
    +1909    }
     
    @@ -20229,14 +20400,14 @@ Otherwise, this resets the expression.
    -
    1907class JournalProperty(Property):
    -1908    arg_types = {
    -1909        "no": False,
    -1910        "dual": False,
    -1911        "before": False,
    -1912        "local": False,
    -1913        "after": False,
    -1914    }
    +            
    1912class JournalProperty(Property):
    +1913    arg_types = {
    +1914        "no": False,
    +1915        "dual": False,
    +1916        "before": False,
    +1917        "local": False,
    +1918        "after": False,
    +1919    }
     
    @@ -20300,8 +20471,8 @@ Otherwise, this resets the expression.
    -
    1917class LanguageProperty(Property):
    -1918    arg_types = {"this": True}
    +            
    1922class LanguageProperty(Property):
    +1923    arg_types = {"this": True}
     
    @@ -20365,8 +20536,8 @@ Otherwise, this resets the expression.
    -
    1921class DictProperty(Property):
    -1922    arg_types = {"this": True, "kind": True, "settings": False}
    +            
    1926class DictProperty(Property):
    +1927    arg_types = {"this": True, "kind": True, "settings": False}
     
    @@ -20430,8 +20601,8 @@ Otherwise, this resets the expression.
    -
    1925class DictSubProperty(Property):
    -1926    pass
    +            
    1930class DictSubProperty(Property):
    +1931    pass
     
    @@ -20495,8 +20666,8 @@ Otherwise, this resets the expression.
    -
    1929class DictRange(Property):
    -1930    arg_types = {"this": True, "min": True, "max": True}
    +            
    1934class DictRange(Property):
    +1935    arg_types = {"this": True, "min": True, "max": True}
     
    @@ -20545,6 +20716,71 @@ Otherwise, this resets the expression.
    dump
    load
    +
    + + + +
    + +
    + + class + OnCluster(Property): + + + +
    + +
    1940class OnCluster(Property):
    +1941    arg_types = {"this": True}
    +
    + + + + + @@ -20560,8 +20796,8 @@ Otherwise, this resets the expression. -
    1933class LikeProperty(Property):
    -1934    arg_types = {"this": True, "expressions": False}
    +            
    1944class LikeProperty(Property):
    +1945    arg_types = {"this": True, "expressions": False}
     
    @@ -20625,8 +20861,8 @@ Otherwise, this resets the expression.
    -
    1937class LocationProperty(Property):
    -1938    arg_types = {"this": True}
    +            
    1948class LocationProperty(Property):
    +1949    arg_types = {"this": True}
     
    @@ -20690,14 +20926,14 @@ Otherwise, this resets the expression.
    -
    1941class LockingProperty(Property):
    -1942    arg_types = {
    -1943        "this": False,
    -1944        "kind": True,
    -1945        "for_or_in": True,
    -1946        "lock_type": True,
    -1947        "override": False,
    -1948    }
    +            
    1952class LockingProperty(Property):
    +1953    arg_types = {
    +1954        "this": False,
    +1955        "kind": True,
    +1956        "for_or_in": True,
    +1957        "lock_type": True,
    +1958        "override": False,
    +1959    }
     
    @@ -20761,8 +20997,8 @@ Otherwise, this resets the expression.
    -
    1951class LogProperty(Property):
    -1952    arg_types = {"no": True}
    +            
    1962class LogProperty(Property):
    +1963    arg_types = {"no": True}
     
    @@ -20826,8 +21062,8 @@ Otherwise, this resets the expression.
    -
    1955class MaterializedProperty(Property):
    -1956    arg_types = {"this": False}
    +            
    1966class MaterializedProperty(Property):
    +1967    arg_types = {"this": False}
     
    @@ -20891,8 +21127,8 @@ Otherwise, this resets the expression.
    -
    1959class MergeBlockRatioProperty(Property):
    -1960    arg_types = {"this": False, "no": False, "default": False, "percent": False}
    +            
    1970class MergeBlockRatioProperty(Property):
    +1971    arg_types = {"this": False, "no": False, "default": False, "percent": False}
     
    @@ -20956,8 +21192,8 @@ Otherwise, this resets the expression.
    -
    1963class NoPrimaryIndexProperty(Property):
    -1964    arg_types = {}
    +            
    1974class NoPrimaryIndexProperty(Property):
    +1975    arg_types = {}
     
    @@ -21021,8 +21257,8 @@ Otherwise, this resets the expression.
    -
    1967class OnCommitProperty(Property):
    -1968    arg_type = {"delete": False}
    +            
    1978class OnCommitProperty(Property):
    +1979    arg_type = {"delete": False}
     
    @@ -21086,8 +21322,8 @@ Otherwise, this resets the expression.
    -
    1971class PartitionedByProperty(Property):
    -1972    arg_types = {"this": True}
    +            
    1982class PartitionedByProperty(Property):
    +1983    arg_types = {"this": True}
     
    @@ -21151,8 +21387,8 @@ Otherwise, this resets the expression.
    -
    1975class ReturnsProperty(Property):
    -1976    arg_types = {"this": True, "is_table": False, "table": False}
    +            
    1986class ReturnsProperty(Property):
    +1987    arg_types = {"this": True, "is_table": False, "table": False}
     
    @@ -21216,8 +21452,8 @@ Otherwise, this resets the expression.
    -
    1979class RowFormatProperty(Property):
    -1980    arg_types = {"this": True}
    +            
    1990class RowFormatProperty(Property):
    +1991    arg_types = {"this": True}
     
    @@ -21281,17 +21517,17 @@ Otherwise, this resets the expression.
    -
    1983class RowFormatDelimitedProperty(Property):
    -1984    # https://cwiki.apache.org/confluence/display/hive/languagemanual+dml
    -1985    arg_types = {
    -1986        "fields": False,
    -1987        "escaped": False,
    -1988        "collection_items": False,
    -1989        "map_keys": False,
    -1990        "lines": False,
    -1991        "null": False,
    -1992        "serde": False,
    -1993    }
    +            
    1994class RowFormatDelimitedProperty(Property):
    +1995    # https://cwiki.apache.org/confluence/display/hive/languagemanual+dml
    +1996    arg_types = {
    +1997        "fields": False,
    +1998        "escaped": False,
    +1999        "collection_items": False,
    +2000        "map_keys": False,
    +2001        "lines": False,
    +2002        "null": False,
    +2003        "serde": False,
    +2004    }
     
    @@ -21355,8 +21591,8 @@ Otherwise, this resets the expression.
    -
    1996class RowFormatSerdeProperty(Property):
    -1997    arg_types = {"this": True}
    +            
    2007class RowFormatSerdeProperty(Property):
    +2008    arg_types = {"this": True}
     
    @@ -21420,8 +21656,8 @@ Otherwise, this resets the expression.
    -
    2000class SchemaCommentProperty(Property):
    -2001    arg_types = {"this": True}
    +            
    2011class SchemaCommentProperty(Property):
    +2012    arg_types = {"this": True}
     
    @@ -21485,8 +21721,8 @@ Otherwise, this resets the expression.
    -
    2004class SerdeProperties(Property):
    -2005    arg_types = {"expressions": True}
    +            
    2015class SerdeProperties(Property):
    +2016    arg_types = {"expressions": True}
     
    @@ -21550,8 +21786,8 @@ Otherwise, this resets the expression.
    -
    2008class SetProperty(Property):
    -2009    arg_types = {"multi": True}
    +            
    2019class SetProperty(Property):
    +2020    arg_types = {"multi": True}
     
    @@ -21615,8 +21851,8 @@ Otherwise, this resets the expression.
    -
    2012class SettingsProperty(Property):
    -2013    arg_types = {"expressions": True}
    +            
    2023class SettingsProperty(Property):
    +2024    arg_types = {"expressions": True}
     
    @@ -21680,8 +21916,8 @@ Otherwise, this resets the expression.
    -
    2016class SortKeyProperty(Property):
    -2017    arg_types = {"this": True, "compound": False}
    +            
    2027class SortKeyProperty(Property):
    +2028    arg_types = {"this": True, "compound": False}
     
    @@ -21745,8 +21981,8 @@ Otherwise, this resets the expression.
    -
    2020class SqlSecurityProperty(Property):
    -2021    arg_types = {"definer": True}
    +            
    2031class SqlSecurityProperty(Property):
    +2032    arg_types = {"definer": True}
     
    @@ -21810,8 +22046,8 @@ Otherwise, this resets the expression.
    -
    2024class StabilityProperty(Property):
    -2025    arg_types = {"this": True}
    +            
    2035class StabilityProperty(Property):
    +2036    arg_types = {"this": True}
     
    @@ -21875,8 +22111,8 @@ Otherwise, this resets the expression.
    -
    2028class TemporaryProperty(Property):
    -2029    arg_types = {}
    +            
    2039class TemporaryProperty(Property):
    +2040    arg_types = {}
     
    @@ -21940,8 +22176,8 @@ Otherwise, this resets the expression.
    -
    2032class TransientProperty(Property):
    -2033    arg_types = {"this": False}
    +            
    2043class TransientProperty(Property):
    +2044    arg_types = {"this": False}
     
    @@ -22005,8 +22241,8 @@ Otherwise, this resets the expression.
    -
    2036class VolatileProperty(Property):
    -2037    arg_types = {"this": False}
    +            
    2047class VolatileProperty(Property):
    +2048    arg_types = {"this": False}
     
    @@ -22070,8 +22306,8 @@ Otherwise, this resets the expression.
    -
    2040class WithDataProperty(Property):
    -2041    arg_types = {"no": True, "statistics": False}
    +            
    2051class WithDataProperty(Property):
    +2052    arg_types = {"no": True, "statistics": False}
     
    @@ -22135,8 +22371,8 @@ Otherwise, this resets the expression.
    -
    2044class WithJournalTableProperty(Property):
    -2045    arg_types = {"this": True}
    +            
    2055class WithJournalTableProperty(Property):
    +2056    arg_types = {"this": True}
     
    @@ -22200,65 +22436,65 @@ Otherwise, this resets the expression.
    -
    2048class Properties(Expression):
    -2049    arg_types = {"expressions": True}
    -2050
    -2051    NAME_TO_PROPERTY = {
    -2052        "ALGORITHM": AlgorithmProperty,
    -2053        "AUTO_INCREMENT": AutoIncrementProperty,
    -2054        "CHARACTER SET": CharacterSetProperty,
    -2055        "COLLATE": CollateProperty,
    -2056        "COMMENT": SchemaCommentProperty,
    -2057        "DEFINER": DefinerProperty,
    -2058        "DISTKEY": DistKeyProperty,
    -2059        "DISTSTYLE": DistStyleProperty,
    -2060        "ENGINE": EngineProperty,
    -2061        "EXECUTE AS": ExecuteAsProperty,
    -2062        "FORMAT": FileFormatProperty,
    -2063        "LANGUAGE": LanguageProperty,
    -2064        "LOCATION": LocationProperty,
    -2065        "PARTITIONED_BY": PartitionedByProperty,
    -2066        "RETURNS": ReturnsProperty,
    -2067        "ROW_FORMAT": RowFormatProperty,
    -2068        "SORTKEY": SortKeyProperty,
    -2069    }
    -2070
    -2071    PROPERTY_TO_NAME = {v: k for k, v in NAME_TO_PROPERTY.items()}
    -2072
    -2073    # CREATE property locations
    -2074    # Form: schema specified
    -2075    #   create [POST_CREATE]
    -2076    #     table a [POST_NAME]
    -2077    #     (b int) [POST_SCHEMA]
    -2078    #     with ([POST_WITH])
    -2079    #     index (b) [POST_INDEX]
    -2080    #
    -2081    # Form: alias selection
    -2082    #   create [POST_CREATE]
    -2083    #     table a [POST_NAME]
    -2084    #     as [POST_ALIAS] (select * from b) [POST_EXPRESSION]
    -2085    #     index (c) [POST_INDEX]
    -2086    class Location(AutoName):
    -2087        POST_CREATE = auto()
    -2088        POST_NAME = auto()
    -2089        POST_SCHEMA = auto()
    -2090        POST_WITH = auto()
    -2091        POST_ALIAS = auto()
    -2092        POST_EXPRESSION = auto()
    -2093        POST_INDEX = auto()
    -2094        UNSUPPORTED = auto()
    -2095
    -2096    @classmethod
    -2097    def from_dict(cls, properties_dict: t.Dict) -> Properties:
    -2098        expressions = []
    -2099        for key, value in properties_dict.items():
    -2100            property_cls = cls.NAME_TO_PROPERTY.get(key.upper())
    -2101            if property_cls:
    -2102                expressions.append(property_cls(this=convert(value)))
    -2103            else:
    -2104                expressions.append(Property(this=Literal.string(key), value=convert(value)))
    -2105
    -2106        return cls(expressions=expressions)
    +            
    2059class Properties(Expression):
    +2060    arg_types = {"expressions": True}
    +2061
    +2062    NAME_TO_PROPERTY = {
    +2063        "ALGORITHM": AlgorithmProperty,
    +2064        "AUTO_INCREMENT": AutoIncrementProperty,
    +2065        "CHARACTER SET": CharacterSetProperty,
    +2066        "COLLATE": CollateProperty,
    +2067        "COMMENT": SchemaCommentProperty,
    +2068        "DEFINER": DefinerProperty,
    +2069        "DISTKEY": DistKeyProperty,
    +2070        "DISTSTYLE": DistStyleProperty,
    +2071        "ENGINE": EngineProperty,
    +2072        "EXECUTE AS": ExecuteAsProperty,
    +2073        "FORMAT": FileFormatProperty,
    +2074        "LANGUAGE": LanguageProperty,
    +2075        "LOCATION": LocationProperty,
    +2076        "PARTITIONED_BY": PartitionedByProperty,
    +2077        "RETURNS": ReturnsProperty,
    +2078        "ROW_FORMAT": RowFormatProperty,
    +2079        "SORTKEY": SortKeyProperty,
    +2080    }
    +2081
    +2082    PROPERTY_TO_NAME = {v: k for k, v in NAME_TO_PROPERTY.items()}
    +2083
    +2084    # CREATE property locations
    +2085    # Form: schema specified
    +2086    #   create [POST_CREATE]
    +2087    #     table a [POST_NAME]
    +2088    #     (b int) [POST_SCHEMA]
    +2089    #     with ([POST_WITH])
    +2090    #     index (b) [POST_INDEX]
    +2091    #
    +2092    # Form: alias selection
    +2093    #   create [POST_CREATE]
    +2094    #     table a [POST_NAME]
    +2095    #     as [POST_ALIAS] (select * from b) [POST_EXPRESSION]
    +2096    #     index (c) [POST_INDEX]
    +2097    class Location(AutoName):
    +2098        POST_CREATE = auto()
    +2099        POST_NAME = auto()
    +2100        POST_SCHEMA = auto()
    +2101        POST_WITH = auto()
    +2102        POST_ALIAS = auto()
    +2103        POST_EXPRESSION = auto()
    +2104        POST_INDEX = auto()
    +2105        UNSUPPORTED = auto()
    +2106
    +2107    @classmethod
    +2108    def from_dict(cls, properties_dict: t.Dict) -> Properties:
    +2109        expressions = []
    +2110        for key, value in properties_dict.items():
    +2111            property_cls = cls.NAME_TO_PROPERTY.get(key.upper())
    +2112            if property_cls:
    +2113                expressions.append(property_cls(this=convert(value)))
    +2114            else:
    +2115                expressions.append(Property(this=Literal.string(key), value=convert(value)))
    +2116
    +2117        return cls(expressions=expressions)
     
    @@ -22276,17 +22512,17 @@ Otherwise, this resets the expression.
    -
    2096    @classmethod
    -2097    def from_dict(cls, properties_dict: t.Dict) -> Properties:
    -2098        expressions = []
    -2099        for key, value in properties_dict.items():
    -2100            property_cls = cls.NAME_TO_PROPERTY.get(key.upper())
    -2101            if property_cls:
    -2102                expressions.append(property_cls(this=convert(value)))
    -2103            else:
    -2104                expressions.append(Property(this=Literal.string(key), value=convert(value)))
    -2105
    -2106        return cls(expressions=expressions)
    +            
    2107    @classmethod
    +2108    def from_dict(cls, properties_dict: t.Dict) -> Properties:
    +2109        expressions = []
    +2110        for key, value in properties_dict.items():
    +2111            property_cls = cls.NAME_TO_PROPERTY.get(key.upper())
    +2112            if property_cls:
    +2113                expressions.append(property_cls(this=convert(value)))
    +2114            else:
    +2115                expressions.append(Property(this=Literal.string(key), value=convert(value)))
    +2116
    +2117        return cls(expressions=expressions)
     
    @@ -22351,15 +22587,15 @@ Otherwise, this resets the expression.
    -
    2086    class Location(AutoName):
    -2087        POST_CREATE = auto()
    -2088        POST_NAME = auto()
    -2089        POST_SCHEMA = auto()
    -2090        POST_WITH = auto()
    -2091        POST_ALIAS = auto()
    -2092        POST_EXPRESSION = auto()
    -2093        POST_INDEX = auto()
    -2094        UNSUPPORTED = auto()
    +            
    2097    class Location(AutoName):
    +2098        POST_CREATE = auto()
    +2099        POST_NAME = auto()
    +2100        POST_SCHEMA = auto()
    +2101        POST_WITH = auto()
    +2102        POST_ALIAS = auto()
    +2103        POST_EXPRESSION = auto()
    +2104        POST_INDEX = auto()
    +2105        UNSUPPORTED = auto()
     
    @@ -22485,8 +22721,8 @@ Otherwise, this resets the expression.
    -
    2109class Qualify(Expression):
    -2110    pass
    +            
    2120class Qualify(Expression):
    +2121    pass
     
    @@ -22550,8 +22786,8 @@ Otherwise, this resets the expression.
    -
    2114class Return(Expression):
    -2115    pass
    +            
    2125class Return(Expression):
    +2126    pass
     
    @@ -22615,8 +22851,8 @@ Otherwise, this resets the expression.
    -
    2118class Reference(Expression):
    -2119    arg_types = {"this": True, "expressions": False, "options": False}
    +            
    2129class Reference(Expression):
    +2130    arg_types = {"this": True, "expressions": False, "options": False}
     
    @@ -22680,17 +22916,17 @@ Otherwise, this resets the expression.
    -
    2122class Tuple(Expression):
    -2123    arg_types = {"expressions": False}
    -2124
    -2125    def isin(
    -2126        self, *expressions: t.Any, query: t.Optional[ExpOrStr] = None, copy: bool = True, **opts
    -2127    ) -> In:
    -2128        return In(
    -2129            this=_maybe_copy(self, copy),
    -2130            expressions=[convert(e, copy=copy) for e in expressions],
    -2131            query=maybe_parse(query, copy=copy, **opts) if query else None,
    -2132        )
    +            
    2133class Tuple(Expression):
    +2134    arg_types = {"expressions": False}
    +2135
    +2136    def isin(
    +2137        self, *expressions: t.Any, query: t.Optional[ExpOrStr] = None, copy: bool = True, **opts
    +2138    ) -> In:
    +2139        return In(
    +2140            this=_maybe_copy(self, copy),
    +2141            expressions=[convert(e, copy=copy) for e in expressions],
    +2142            query=maybe_parse(query, copy=copy, **opts) if query else None,
    +2143        )
     
    @@ -22707,14 +22943,14 @@ Otherwise, this resets the expression.
    -
    2125    def isin(
    -2126        self, *expressions: t.Any, query: t.Optional[ExpOrStr] = None, copy: bool = True, **opts
    -2127    ) -> In:
    -2128        return In(
    -2129            this=_maybe_copy(self, copy),
    -2130            expressions=[convert(e, copy=copy) for e in expressions],
    -2131            query=maybe_parse(query, copy=copy, **opts) if query else None,
    -2132        )
    +            
    2136    def isin(
    +2137        self, *expressions: t.Any, query: t.Optional[ExpOrStr] = None, copy: bool = True, **opts
    +2138    ) -> In:
    +2139        return In(
    +2140            this=_maybe_copy(self, copy),
    +2141            expressions=[convert(e, copy=copy) for e in expressions],
    +2142            query=maybe_parse(query, copy=copy, **opts) if query else None,
    +2143        )
     
    @@ -22779,84 +23015,84 @@ Otherwise, this resets the expression.
    -
    2135class Subqueryable(Unionable):
    -2136    def subquery(self, alias: t.Optional[ExpOrStr] = None, copy: bool = True) -> Subquery:
    -2137        """
    -2138        Convert this expression to an aliased expression that can be used as a Subquery.
    -2139
    -2140        Example:
    -2141            >>> subquery = Select().select("x").from_("tbl").subquery()
    -2142            >>> Select().select("x").from_(subquery).sql()
    -2143            'SELECT x FROM (SELECT x FROM tbl)'
    -2144
    -2145        Args:
    -2146            alias (str | Identifier): an optional alias for the subquery
    -2147            copy (bool): if `False`, modify this expression instance in-place.
    -2148
    -2149        Returns:
    -2150            Alias: the subquery
    -2151        """
    -2152        instance = _maybe_copy(self, copy)
    -2153        if not isinstance(alias, Expression):
    -2154            alias = TableAlias(this=to_identifier(alias)) if alias else None
    +            
    2146class Subqueryable(Unionable):
    +2147    def subquery(self, alias: t.Optional[ExpOrStr] = None, copy: bool = True) -> Subquery:
    +2148        """
    +2149        Convert this expression to an aliased expression that can be used as a Subquery.
    +2150
    +2151        Example:
    +2152            >>> subquery = Select().select("x").from_("tbl").subquery()
    +2153            >>> Select().select("x").from_(subquery).sql()
    +2154            'SELECT x FROM (SELECT x FROM tbl)'
     2155
    -2156        return Subquery(this=instance, alias=alias)
    -2157
    -2158    def limit(
    -2159        self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts
    -2160    ) -> Select:
    -2161        raise NotImplementedError
    -2162
    -2163    @property
    -2164    def ctes(self):
    -2165        with_ = self.args.get("with")
    -2166        if not with_:
    -2167            return []
    -2168        return with_.expressions
    -2169
    -2170    @property
    -2171    def selects(self):
    -2172        raise NotImplementedError("Subqueryable objects must implement `selects`")
    +2156        Args:
    +2157            alias (str | Identifier): an optional alias for the subquery
    +2158            copy (bool): if `False`, modify this expression instance in-place.
    +2159
    +2160        Returns:
    +2161            Alias: the subquery
    +2162        """
    +2163        instance = _maybe_copy(self, copy)
    +2164        if not isinstance(alias, Expression):
    +2165            alias = TableAlias(this=to_identifier(alias)) if alias else None
    +2166
    +2167        return Subquery(this=instance, alias=alias)
    +2168
    +2169    def limit(
    +2170        self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts
    +2171    ) -> Select:
    +2172        raise NotImplementedError
     2173
     2174    @property
    -2175    def named_selects(self):
    -2176        raise NotImplementedError("Subqueryable objects must implement `named_selects`")
    -2177
    -2178    def with_(
    -2179        self,
    -2180        alias: ExpOrStr,
    -2181        as_: ExpOrStr,
    -2182        recursive: t.Optional[bool] = None,
    -2183        append: bool = True,
    -2184        dialect: DialectType = None,
    -2185        copy: bool = True,
    -2186        **opts,
    -2187    ) -> Subqueryable:
    -2188        """
    -2189        Append to or set the common table expressions.
    -2190
    -2191        Example:
    -2192            >>> Select().with_("tbl2", as_="SELECT * FROM tbl").select("x").from_("tbl2").sql()
    -2193            'WITH tbl2 AS (SELECT * FROM tbl) SELECT x FROM tbl2'
    -2194
    -2195        Args:
    -2196            alias: the SQL code string to parse as the table name.
    -2197                If an `Expression` instance is passed, this is used as-is.
    -2198            as_: the SQL code string to parse as the table expression.
    -2199                If an `Expression` instance is passed, it will be used as-is.
    -2200            recursive: set the RECURSIVE part of the expression. Defaults to `False`.
    -2201            append: if `True`, add to any existing expressions.
    -2202                Otherwise, this resets the expressions.
    -2203            dialect: the dialect used to parse the input expression.
    -2204            copy: if `False`, modify this expression instance in-place.
    -2205            opts: other options to use to parse the input expressions.
    -2206
    -2207        Returns:
    -2208            The modified expression.
    -2209        """
    -2210        return _apply_cte_builder(
    -2211            self, alias, as_, recursive=recursive, append=append, dialect=dialect, copy=copy, **opts
    -2212        )
    +2175    def ctes(self):
    +2176        with_ = self.args.get("with")
    +2177        if not with_:
    +2178            return []
    +2179        return with_.expressions
    +2180
    +2181    @property
    +2182    def selects(self):
    +2183        raise NotImplementedError("Subqueryable objects must implement `selects`")
    +2184
    +2185    @property
    +2186    def named_selects(self):
    +2187        raise NotImplementedError("Subqueryable objects must implement `named_selects`")
    +2188
    +2189    def with_(
    +2190        self,
    +2191        alias: ExpOrStr,
    +2192        as_: ExpOrStr,
    +2193        recursive: t.Optional[bool] = None,
    +2194        append: bool = True,
    +2195        dialect: DialectType = None,
    +2196        copy: bool = True,
    +2197        **opts,
    +2198    ) -> Subqueryable:
    +2199        """
    +2200        Append to or set the common table expressions.
    +2201
    +2202        Example:
    +2203            >>> Select().with_("tbl2", as_="SELECT * FROM tbl").select("x").from_("tbl2").sql()
    +2204            'WITH tbl2 AS (SELECT * FROM tbl) SELECT x FROM tbl2'
    +2205
    +2206        Args:
    +2207            alias: the SQL code string to parse as the table name.
    +2208                If an `Expression` instance is passed, this is used as-is.
    +2209            as_: the SQL code string to parse as the table expression.
    +2210                If an `Expression` instance is passed, it will be used as-is.
    +2211            recursive: set the RECURSIVE part of the expression. Defaults to `False`.
    +2212            append: if `True`, add to any existing expressions.
    +2213                Otherwise, this resets the expressions.
    +2214            dialect: the dialect used to parse the input expression.
    +2215            copy: if `False`, modify this expression instance in-place.
    +2216            opts: other options to use to parse the input expressions.
    +2217
    +2218        Returns:
    +2219            The modified expression.
    +2220        """
    +2221        return _apply_cte_builder(
    +2222            self, alias, as_, recursive=recursive, append=append, dialect=dialect, copy=copy, **opts
    +2223        )
     
    @@ -22873,27 +23109,27 @@ Otherwise, this resets the expression.
    -
    2136    def subquery(self, alias: t.Optional[ExpOrStr] = None, copy: bool = True) -> Subquery:
    -2137        """
    -2138        Convert this expression to an aliased expression that can be used as a Subquery.
    -2139
    -2140        Example:
    -2141            >>> subquery = Select().select("x").from_("tbl").subquery()
    -2142            >>> Select().select("x").from_(subquery).sql()
    -2143            'SELECT x FROM (SELECT x FROM tbl)'
    -2144
    -2145        Args:
    -2146            alias (str | Identifier): an optional alias for the subquery
    -2147            copy (bool): if `False`, modify this expression instance in-place.
    -2148
    -2149        Returns:
    -2150            Alias: the subquery
    -2151        """
    -2152        instance = _maybe_copy(self, copy)
    -2153        if not isinstance(alias, Expression):
    -2154            alias = TableAlias(this=to_identifier(alias)) if alias else None
    +            
    2147    def subquery(self, alias: t.Optional[ExpOrStr] = None, copy: bool = True) -> Subquery:
    +2148        """
    +2149        Convert this expression to an aliased expression that can be used as a Subquery.
    +2150
    +2151        Example:
    +2152            >>> subquery = Select().select("x").from_("tbl").subquery()
    +2153            >>> Select().select("x").from_(subquery).sql()
    +2154            'SELECT x FROM (SELECT x FROM tbl)'
     2155
    -2156        return Subquery(this=instance, alias=alias)
    +2156        Args:
    +2157            alias (str | Identifier): an optional alias for the subquery
    +2158            copy (bool): if `False`, modify this expression instance in-place.
    +2159
    +2160        Returns:
    +2161            Alias: the subquery
    +2162        """
    +2163        instance = _maybe_copy(self, copy)
    +2164        if not isinstance(alias, Expression):
    +2165            alias = TableAlias(this=to_identifier(alias)) if alias else None
    +2166
    +2167        return Subquery(this=instance, alias=alias)
     
    @@ -22937,10 +23173,10 @@ Otherwise, this resets the expression.
    -
    2158    def limit(
    -2159        self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts
    -2160    ) -> Select:
    -2161        raise NotImplementedError
    +            
    2169    def limit(
    +2170        self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts
    +2171    ) -> Select:
    +2172        raise NotImplementedError
     
    @@ -22958,41 +23194,41 @@ Otherwise, this resets the expression.
    -
    2178    def with_(
    -2179        self,
    -2180        alias: ExpOrStr,
    -2181        as_: ExpOrStr,
    -2182        recursive: t.Optional[bool] = None,
    -2183        append: bool = True,
    -2184        dialect: DialectType = None,
    -2185        copy: bool = True,
    -2186        **opts,
    -2187    ) -> Subqueryable:
    -2188        """
    -2189        Append to or set the common table expressions.
    -2190
    -2191        Example:
    -2192            >>> Select().with_("tbl2", as_="SELECT * FROM tbl").select("x").from_("tbl2").sql()
    -2193            'WITH tbl2 AS (SELECT * FROM tbl) SELECT x FROM tbl2'
    -2194
    -2195        Args:
    -2196            alias: the SQL code string to parse as the table name.
    -2197                If an `Expression` instance is passed, this is used as-is.
    -2198            as_: the SQL code string to parse as the table expression.
    -2199                If an `Expression` instance is passed, it will be used as-is.
    -2200            recursive: set the RECURSIVE part of the expression. Defaults to `False`.
    -2201            append: if `True`, add to any existing expressions.
    -2202                Otherwise, this resets the expressions.
    -2203            dialect: the dialect used to parse the input expression.
    -2204            copy: if `False`, modify this expression instance in-place.
    -2205            opts: other options to use to parse the input expressions.
    -2206
    -2207        Returns:
    -2208            The modified expression.
    -2209        """
    -2210        return _apply_cte_builder(
    -2211            self, alias, as_, recursive=recursive, append=append, dialect=dialect, copy=copy, **opts
    -2212        )
    +            
    2189    def with_(
    +2190        self,
    +2191        alias: ExpOrStr,
    +2192        as_: ExpOrStr,
    +2193        recursive: t.Optional[bool] = None,
    +2194        append: bool = True,
    +2195        dialect: DialectType = None,
    +2196        copy: bool = True,
    +2197        **opts,
    +2198    ) -> Subqueryable:
    +2199        """
    +2200        Append to or set the common table expressions.
    +2201
    +2202        Example:
    +2203            >>> Select().with_("tbl2", as_="SELECT * FROM tbl").select("x").from_("tbl2").sql()
    +2204            'WITH tbl2 AS (SELECT * FROM tbl) SELECT x FROM tbl2'
    +2205
    +2206        Args:
    +2207            alias: the SQL code string to parse as the table name.
    +2208                If an `Expression` instance is passed, this is used as-is.
    +2209            as_: the SQL code string to parse as the table expression.
    +2210                If an `Expression` instance is passed, it will be used as-is.
    +2211            recursive: set the RECURSIVE part of the expression. Defaults to `False`.
    +2212            append: if `True`, add to any existing expressions.
    +2213                Otherwise, this resets the expressions.
    +2214            dialect: the dialect used to parse the input expression.
    +2215            copy: if `False`, modify this expression instance in-place.
    +2216            opts: other options to use to parse the input expressions.
    +2217
    +2218        Returns:
    +2219            The modified expression.
    +2220        """
    +2221        return _apply_cte_builder(
    +2222            self, alias, as_, recursive=recursive, append=append, dialect=dialect, copy=copy, **opts
    +2223        )
     
    @@ -23096,35 +23332,35 @@ Otherwise, this resets the expressions.
    -
    2238class Table(Expression):
    -2239    arg_types = {
    -2240        "this": True,
    -2241        "alias": False,
    -2242        "db": False,
    -2243        "catalog": False,
    -2244        "laterals": False,
    -2245        "joins": False,
    -2246        "pivots": False,
    -2247        "hints": False,
    -2248        "system_time": False,
    -2249    }
    -2250
    -2251    @property
    -2252    def db(self) -> str:
    -2253        return self.text("db")
    -2254
    -2255    @property
    -2256    def catalog(self) -> str:
    -2257        return self.text("catalog")
    -2258
    -2259    @property
    -2260    def parts(self) -> t.List[Identifier]:
    -2261        """Return the parts of a table in order catalog, db, table."""
    -2262        return [
    -2263            t.cast(Identifier, self.args[part])
    -2264            for part in ("catalog", "db", "this")
    -2265            if self.args.get(part)
    -2266        ]
    +            
    2249class Table(Expression):
    +2250    arg_types = {
    +2251        "this": True,
    +2252        "alias": False,
    +2253        "db": False,
    +2254        "catalog": False,
    +2255        "laterals": False,
    +2256        "joins": False,
    +2257        "pivots": False,
    +2258        "hints": False,
    +2259        "system_time": False,
    +2260    }
    +2261
    +2262    @property
    +2263    def db(self) -> str:
    +2264        return self.text("db")
    +2265
    +2266    @property
    +2267    def catalog(self) -> str:
    +2268        return self.text("catalog")
    +2269
    +2270    @property
    +2271    def parts(self) -> t.List[Identifier]:
    +2272        """Return the parts of a table in order catalog, db, table."""
    +2273        return [
    +2274            t.cast(Identifier, self.args[part])
    +2275            for part in ("catalog", "db", "this")
    +2276            if self.args.get(part)
    +2277        ]
     
    @@ -23201,12 +23437,12 @@ Otherwise, this resets the expressions.
    -
    2270class SystemTime(Expression):
    -2271    arg_types = {
    -2272        "this": False,
    -2273        "expression": False,
    -2274        "kind": True,
    -2275    }
    +            
    2281class SystemTime(Expression):
    +2282    arg_types = {
    +2283        "this": False,
    +2284        "expression": False,
    +2285        "kind": True,
    +2286    }
     
    @@ -23270,96 +23506,96 @@ Otherwise, this resets the expressions.
    -
    2278class Union(Subqueryable):
    -2279    arg_types = {
    -2280        "with": False,
    -2281        "this": True,
    -2282        "expression": True,
    -2283        "distinct": False,
    -2284        **QUERY_MODIFIERS,
    -2285    }
    -2286
    -2287    def limit(
    -2288        self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts
    -2289    ) -> Select:
    -2290        """
    -2291        Set the LIMIT expression.
    -2292
    -2293        Example:
    -2294            >>> select("1").union(select("1")).limit(1).sql()
    -2295            'SELECT * FROM (SELECT 1 UNION SELECT 1) AS _l_0 LIMIT 1'
    -2296
    -2297        Args:
    -2298            expression: the SQL code string to parse.
    -2299                This can also be an integer.
    -2300                If a `Limit` instance is passed, this is used as-is.
    -2301                If another `Expression` instance is passed, it will be wrapped in a `Limit`.
    -2302            dialect: the dialect used to parse the input expression.
    -2303            copy: if `False`, modify this expression instance in-place.
    -2304            opts: other options to use to parse the input expressions.
    -2305
    -2306        Returns:
    -2307            The limited subqueryable.
    -2308        """
    -2309        return (
    -2310            select("*")
    -2311            .from_(self.subquery(alias="_l_0", copy=copy))
    -2312            .limit(expression, dialect=dialect, copy=False, **opts)
    -2313        )
    -2314
    -2315    def select(
    -2316        self,
    -2317        *expressions: t.Optional[ExpOrStr],
    -2318        append: bool = True,
    -2319        dialect: DialectType = None,
    -2320        copy: bool = True,
    -2321        **opts,
    -2322    ) -> Union:
    -2323        """Append to or set the SELECT of the union recursively.
    -2324
    -2325        Example:
    -2326            >>> from sqlglot import parse_one
    -2327            >>> parse_one("select a from x union select a from y union select a from z").select("b").sql()
    -2328            'SELECT a, b FROM x UNION SELECT a, b FROM y UNION SELECT a, b FROM z'
    -2329
    -2330        Args:
    -2331            *expressions: the SQL code strings to parse.
    -2332                If an `Expression` instance is passed, it will be used as-is.
    -2333            append: if `True`, add to any existing expressions.
    -2334                Otherwise, this resets the expressions.
    -2335            dialect: the dialect used to parse the input expressions.
    -2336            copy: if `False`, modify this expression instance in-place.
    -2337            opts: other options to use to parse the input expressions.
    -2338
    -2339        Returns:
    -2340            Union: the modified expression.
    -2341        """
    -2342        this = self.copy() if copy else self
    -2343        this.this.unnest().select(*expressions, append=append, dialect=dialect, copy=False, **opts)
    -2344        this.expression.unnest().select(
    -2345            *expressions, append=append, dialect=dialect, copy=False, **opts
    -2346        )
    -2347        return this
    -2348
    -2349    @property
    -2350    def named_selects(self):
    -2351        return self.this.unnest().named_selects
    -2352
    -2353    @property
    -2354    def is_star(self) -> bool:
    -2355        return self.this.is_star or self.expression.is_star
    -2356
    -2357    @property
    -2358    def selects(self):
    -2359        return self.this.unnest().selects
    -2360
    -2361    @property
    -2362    def left(self):
    -2363        return self.this
    -2364
    -2365    @property
    -2366    def right(self):
    -2367        return self.expression
    +            
    2289class Union(Subqueryable):
    +2290    arg_types = {
    +2291        "with": False,
    +2292        "this": True,
    +2293        "expression": True,
    +2294        "distinct": False,
    +2295        **QUERY_MODIFIERS,
    +2296    }
    +2297
    +2298    def limit(
    +2299        self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts
    +2300    ) -> Select:
    +2301        """
    +2302        Set the LIMIT expression.
    +2303
    +2304        Example:
    +2305            >>> select("1").union(select("1")).limit(1).sql()
    +2306            'SELECT * FROM (SELECT 1 UNION SELECT 1) AS _l_0 LIMIT 1'
    +2307
    +2308        Args:
    +2309            expression: the SQL code string to parse.
    +2310                This can also be an integer.
    +2311                If a `Limit` instance is passed, this is used as-is.
    +2312                If another `Expression` instance is passed, it will be wrapped in a `Limit`.
    +2313            dialect: the dialect used to parse the input expression.
    +2314            copy: if `False`, modify this expression instance in-place.
    +2315            opts: other options to use to parse the input expressions.
    +2316
    +2317        Returns:
    +2318            The limited subqueryable.
    +2319        """
    +2320        return (
    +2321            select("*")
    +2322            .from_(self.subquery(alias="_l_0", copy=copy))
    +2323            .limit(expression, dialect=dialect, copy=False, **opts)
    +2324        )
    +2325
    +2326    def select(
    +2327        self,
    +2328        *expressions: t.Optional[ExpOrStr],
    +2329        append: bool = True,
    +2330        dialect: DialectType = None,
    +2331        copy: bool = True,
    +2332        **opts,
    +2333    ) -> Union:
    +2334        """Append to or set the SELECT of the union recursively.
    +2335
    +2336        Example:
    +2337            >>> from sqlglot import parse_one
    +2338            >>> parse_one("select a from x union select a from y union select a from z").select("b").sql()
    +2339            'SELECT a, b FROM x UNION SELECT a, b FROM y UNION SELECT a, b FROM z'
    +2340
    +2341        Args:
    +2342            *expressions: the SQL code strings to parse.
    +2343                If an `Expression` instance is passed, it will be used as-is.
    +2344            append: if `True`, add to any existing expressions.
    +2345                Otherwise, this resets the expressions.
    +2346            dialect: the dialect used to parse the input expressions.
    +2347            copy: if `False`, modify this expression instance in-place.
    +2348            opts: other options to use to parse the input expressions.
    +2349
    +2350        Returns:
    +2351            Union: the modified expression.
    +2352        """
    +2353        this = self.copy() if copy else self
    +2354        this.this.unnest().select(*expressions, append=append, dialect=dialect, copy=False, **opts)
    +2355        this.expression.unnest().select(
    +2356            *expressions, append=append, dialect=dialect, copy=False, **opts
    +2357        )
    +2358        return this
    +2359
    +2360    @property
    +2361    def named_selects(self):
    +2362        return self.this.unnest().named_selects
    +2363
    +2364    @property
    +2365    def is_star(self) -> bool:
    +2366        return self.this.is_star or self.expression.is_star
    +2367
    +2368    @property
    +2369    def selects(self):
    +2370        return self.this.unnest().selects
    +2371
    +2372    @property
    +2373    def left(self):
    +2374        return self.this
    +2375
    +2376    @property
    +2377    def right(self):
    +2378        return self.expression
     
    @@ -23376,33 +23612,33 @@ Otherwise, this resets the expressions.
    -
    2287    def limit(
    -2288        self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts
    -2289    ) -> Select:
    -2290        """
    -2291        Set the LIMIT expression.
    -2292
    -2293        Example:
    -2294            >>> select("1").union(select("1")).limit(1).sql()
    -2295            'SELECT * FROM (SELECT 1 UNION SELECT 1) AS _l_0 LIMIT 1'
    -2296
    -2297        Args:
    -2298            expression: the SQL code string to parse.
    -2299                This can also be an integer.
    -2300                If a `Limit` instance is passed, this is used as-is.
    -2301                If another `Expression` instance is passed, it will be wrapped in a `Limit`.
    -2302            dialect: the dialect used to parse the input expression.
    -2303            copy: if `False`, modify this expression instance in-place.
    -2304            opts: other options to use to parse the input expressions.
    -2305
    -2306        Returns:
    -2307            The limited subqueryable.
    -2308        """
    -2309        return (
    -2310            select("*")
    -2311            .from_(self.subquery(alias="_l_0", copy=copy))
    -2312            .limit(expression, dialect=dialect, copy=False, **opts)
    -2313        )
    +            
    2298    def limit(
    +2299        self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts
    +2300    ) -> Select:
    +2301        """
    +2302        Set the LIMIT expression.
    +2303
    +2304        Example:
    +2305            >>> select("1").union(select("1")).limit(1).sql()
    +2306            'SELECT * FROM (SELECT 1 UNION SELECT 1) AS _l_0 LIMIT 1'
    +2307
    +2308        Args:
    +2309            expression: the SQL code string to parse.
    +2310                This can also be an integer.
    +2311                If a `Limit` instance is passed, this is used as-is.
    +2312                If another `Expression` instance is passed, it will be wrapped in a `Limit`.
    +2313            dialect: the dialect used to parse the input expression.
    +2314            copy: if `False`, modify this expression instance in-place.
    +2315            opts: other options to use to parse the input expressions.
    +2316
    +2317        Returns:
    +2318            The limited subqueryable.
    +2319        """
    +2320        return (
    +2321            select("*")
    +2322            .from_(self.subquery(alias="_l_0", copy=copy))
    +2323            .limit(expression, dialect=dialect, copy=False, **opts)
    +2324        )
     
    @@ -23450,39 +23686,39 @@ If another Expression instance is passed,
    -
    2315    def select(
    -2316        self,
    -2317        *expressions: t.Optional[ExpOrStr],
    -2318        append: bool = True,
    -2319        dialect: DialectType = None,
    -2320        copy: bool = True,
    -2321        **opts,
    -2322    ) -> Union:
    -2323        """Append to or set the SELECT of the union recursively.
    -2324
    -2325        Example:
    -2326            >>> from sqlglot import parse_one
    -2327            >>> parse_one("select a from x union select a from y union select a from z").select("b").sql()
    -2328            'SELECT a, b FROM x UNION SELECT a, b FROM y UNION SELECT a, b FROM z'
    -2329
    -2330        Args:
    -2331            *expressions: the SQL code strings to parse.
    -2332                If an `Expression` instance is passed, it will be used as-is.
    -2333            append: if `True`, add to any existing expressions.
    -2334                Otherwise, this resets the expressions.
    -2335            dialect: the dialect used to parse the input expressions.
    -2336            copy: if `False`, modify this expression instance in-place.
    -2337            opts: other options to use to parse the input expressions.
    -2338
    -2339        Returns:
    -2340            Union: the modified expression.
    -2341        """
    -2342        this = self.copy() if copy else self
    -2343        this.this.unnest().select(*expressions, append=append, dialect=dialect, copy=False, **opts)
    -2344        this.expression.unnest().select(
    -2345            *expressions, append=append, dialect=dialect, copy=False, **opts
    -2346        )
    -2347        return this
    +            
    2326    def select(
    +2327        self,
    +2328        *expressions: t.Optional[ExpOrStr],
    +2329        append: bool = True,
    +2330        dialect: DialectType = None,
    +2331        copy: bool = True,
    +2332        **opts,
    +2333    ) -> Union:
    +2334        """Append to or set the SELECT of the union recursively.
    +2335
    +2336        Example:
    +2337            >>> from sqlglot import parse_one
    +2338            >>> parse_one("select a from x union select a from y union select a from z").select("b").sql()
    +2339            'SELECT a, b FROM x UNION SELECT a, b FROM y UNION SELECT a, b FROM z'
    +2340
    +2341        Args:
    +2342            *expressions: the SQL code strings to parse.
    +2343                If an `Expression` instance is passed, it will be used as-is.
    +2344            append: if `True`, add to any existing expressions.
    +2345                Otherwise, this resets the expressions.
    +2346            dialect: the dialect used to parse the input expressions.
    +2347            copy: if `False`, modify this expression instance in-place.
    +2348            opts: other options to use to parse the input expressions.
    +2349
    +2350        Returns:
    +2351            Union: the modified expression.
    +2352        """
    +2353        this = self.copy() if copy else self
    +2354        this.this.unnest().select(*expressions, append=append, dialect=dialect, copy=False, **opts)
    +2355        this.expression.unnest().select(
    +2356            *expressions, append=append, dialect=dialect, copy=False, **opts
    +2357        )
    +2358        return this
     
    @@ -23601,8 +23837,8 @@ Otherwise, this resets the expressions.
    -
    2370class Except(Union):
    -2371    pass
    +            
    2381class Except(Union):
    +2382    pass
     
    @@ -23682,8 +23918,8 @@ Otherwise, this resets the expressions.
    -
    2374class Intersect(Union):
    -2375    pass
    +            
    2385class Intersect(Union):
    +2386    pass
     
    @@ -23763,13 +23999,13 @@ Otherwise, this resets the expressions.
    -
    2378class Unnest(UDTF):
    -2379    arg_types = {
    -2380        "expressions": True,
    -2381        "ordinality": False,
    -2382        "alias": False,
    -2383        "offset": False,
    -2384    }
    +            
    2389class Unnest(UDTF):
    +2390    arg_types = {
    +2391        "expressions": True,
    +2392        "ordinality": False,
    +2393        "alias": False,
    +2394        "offset": False,
    +2395    }
     
    @@ -23839,15 +24075,15 @@ Otherwise, this resets the expressions.
    -
    2387class Update(Expression):
    -2388    arg_types = {
    -2389        "with": False,
    -2390        "this": False,
    -2391        "expressions": True,
    -2392        "from": False,
    -2393        "where": False,
    -2394        "returning": False,
    -2395    }
    +            
    2398class Update(Expression):
    +2399    arg_types = {
    +2400        "with": False,
    +2401        "this": False,
    +2402        "expressions": True,
    +2403        "from": False,
    +2404        "where": False,
    +2405        "returning": False,
    +2406    }
     
    @@ -23911,12 +24147,12 @@ Otherwise, this resets the expressions.
    -
    2398class Values(UDTF):
    -2399    arg_types = {
    -2400        "expressions": True,
    -2401        "ordinality": False,
    -2402        "alias": False,
    -2403    }
    +            
    2409class Values(UDTF):
    +2410    arg_types = {
    +2411        "expressions": True,
    +2412        "ordinality": False,
    +2413        "alias": False,
    +2414    }
     
    @@ -23986,8 +24222,8 @@ Otherwise, this resets the expressions.
    -
    2406class Var(Expression):
    -2407    pass
    +            
    2417class Var(Expression):
    +2418    pass
     
    @@ -24051,8 +24287,8 @@ Otherwise, this resets the expressions.
    -
    2410class Schema(Expression):
    -2411    arg_types = {"this": False, "expressions": False}
    +            
    2421class Schema(Expression):
    +2422    arg_types = {"this": False, "expressions": False}
     
    @@ -24116,8 +24352,8 @@ Otherwise, this resets the expressions.
    -
    2416class Lock(Expression):
    -2417    arg_types = {"update": True, "expressions": False, "wait": False}
    +            
    2427class Lock(Expression):
    +2428    arg_types = {"update": True, "expressions": False, "wait": False}
     
    @@ -24181,673 +24417,696 @@ Otherwise, this resets the expressions.
    -
    2420class Select(Subqueryable):
    -2421    arg_types = {
    -2422        "with": False,
    -2423        "kind": False,
    -2424        "expressions": False,
    -2425        "hint": False,
    -2426        "distinct": False,
    -2427        "struct": False,  # https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#return_query_results_as_a_value_table
    -2428        "value": False,
    -2429        "into": False,
    -2430        "from": False,
    -2431        **QUERY_MODIFIERS,
    -2432    }
    -2433
    -2434    def from_(
    -2435        self, expression: ExpOrStr, dialect: DialectType = None, copy: bool = True, **opts
    -2436    ) -> Select:
    -2437        """
    -2438        Set the FROM expression.
    -2439
    -2440        Example:
    -2441            >>> Select().from_("tbl").select("x").sql()
    -2442            'SELECT x FROM tbl'
    -2443
    -2444        Args:
    -2445            expression : the SQL code strings to parse.
    -2446                If a `From` instance is passed, this is used as-is.
    -2447                If another `Expression` instance is passed, it will be wrapped in a `From`.
    -2448            dialect: the dialect used to parse the input expression.
    -2449            copy: if `False`, modify this expression instance in-place.
    -2450            opts: other options to use to parse the input expressions.
    -2451
    -2452        Returns:
    -2453            The modified Select expression.
    -2454        """
    -2455        return _apply_builder(
    -2456            expression=expression,
    -2457            instance=self,
    -2458            arg="from",
    -2459            into=From,
    -2460            prefix="FROM",
    -2461            dialect=dialect,
    -2462            copy=copy,
    -2463            **opts,
    -2464        )
    -2465
    -2466    def group_by(
    -2467        self,
    -2468        *expressions: t.Optional[ExpOrStr],
    -2469        append: bool = True,
    -2470        dialect: DialectType = None,
    -2471        copy: bool = True,
    -2472        **opts,
    -2473    ) -> Select:
    -2474        """
    -2475        Set the GROUP BY expression.
    +            
    2431class Select(Subqueryable):
    +2432    arg_types = {
    +2433        "with": False,
    +2434        "kind": False,
    +2435        "expressions": False,
    +2436        "hint": False,
    +2437        "distinct": False,
    +2438        "struct": False,  # https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#return_query_results_as_a_value_table
    +2439        "value": False,
    +2440        "into": False,
    +2441        "from": False,
    +2442        **QUERY_MODIFIERS,
    +2443    }
    +2444
    +2445    def from_(
    +2446        self, expression: ExpOrStr, dialect: DialectType = None, copy: bool = True, **opts
    +2447    ) -> Select:
    +2448        """
    +2449        Set the FROM expression.
    +2450
    +2451        Example:
    +2452            >>> Select().from_("tbl").select("x").sql()
    +2453            'SELECT x FROM tbl'
    +2454
    +2455        Args:
    +2456            expression : the SQL code strings to parse.
    +2457                If a `From` instance is passed, this is used as-is.
    +2458                If another `Expression` instance is passed, it will be wrapped in a `From`.
    +2459            dialect: the dialect used to parse the input expression.
    +2460            copy: if `False`, modify this expression instance in-place.
    +2461            opts: other options to use to parse the input expressions.
    +2462
    +2463        Returns:
    +2464            The modified Select expression.
    +2465        """
    +2466        return _apply_builder(
    +2467            expression=expression,
    +2468            instance=self,
    +2469            arg="from",
    +2470            into=From,
    +2471            prefix="FROM",
    +2472            dialect=dialect,
    +2473            copy=copy,
    +2474            **opts,
    +2475        )
     2476
    -2477        Example:
    -2478            >>> Select().from_("tbl").select("x", "COUNT(1)").group_by("x").sql()
    -2479            'SELECT x, COUNT(1) FROM tbl GROUP BY x'
    -2480
    -2481        Args:
    -2482            *expressions: the SQL code strings to parse.
    -2483                If a `Group` instance is passed, this is used as-is.
    -2484                If another `Expression` instance is passed, it will be wrapped in a `Group`.
    -2485                If nothing is passed in then a group by is not applied to the expression
    -2486            append: if `True`, add to any existing expressions.
    -2487                Otherwise, this flattens all the `Group` expression into a single expression.
    -2488            dialect: the dialect used to parse the input expression.
    -2489            copy: if `False`, modify this expression instance in-place.
    -2490            opts: other options to use to parse the input expressions.
    +2477    def group_by(
    +2478        self,
    +2479        *expressions: t.Optional[ExpOrStr],
    +2480        append: bool = True,
    +2481        dialect: DialectType = None,
    +2482        copy: bool = True,
    +2483        **opts,
    +2484    ) -> Select:
    +2485        """
    +2486        Set the GROUP BY expression.
    +2487
    +2488        Example:
    +2489            >>> Select().from_("tbl").select("x", "COUNT(1)").group_by("x").sql()
    +2490            'SELECT x, COUNT(1) FROM tbl GROUP BY x'
     2491
    -2492        Returns:
    -2493            The modified Select expression.
    -2494        """
    -2495        if not expressions:
    -2496            return self if not copy else self.copy()
    -2497
    -2498        return _apply_child_list_builder(
    -2499            *expressions,
    -2500            instance=self,
    -2501            arg="group",
    -2502            append=append,
    -2503            copy=copy,
    -2504            prefix="GROUP BY",
    -2505            into=Group,
    -2506            dialect=dialect,
    -2507            **opts,
    -2508        )
    -2509
    -2510    def order_by(
    -2511        self,
    -2512        *expressions: t.Optional[ExpOrStr],
    -2513        append: bool = True,
    -2514        dialect: DialectType = None,
    -2515        copy: bool = True,
    -2516        **opts,
    -2517    ) -> Select:
    -2518        """
    -2519        Set the ORDER BY expression.
    +2492        Args:
    +2493            *expressions: the SQL code strings to parse.
    +2494                If a `Group` instance is passed, this is used as-is.
    +2495                If another `Expression` instance is passed, it will be wrapped in a `Group`.
    +2496                If nothing is passed in then a group by is not applied to the expression
    +2497            append: if `True`, add to any existing expressions.
    +2498                Otherwise, this flattens all the `Group` expression into a single expression.
    +2499            dialect: the dialect used to parse the input expression.
    +2500            copy: if `False`, modify this expression instance in-place.
    +2501            opts: other options to use to parse the input expressions.
    +2502
    +2503        Returns:
    +2504            The modified Select expression.
    +2505        """
    +2506        if not expressions:
    +2507            return self if not copy else self.copy()
    +2508
    +2509        return _apply_child_list_builder(
    +2510            *expressions,
    +2511            instance=self,
    +2512            arg="group",
    +2513            append=append,
    +2514            copy=copy,
    +2515            prefix="GROUP BY",
    +2516            into=Group,
    +2517            dialect=dialect,
    +2518            **opts,
    +2519        )
     2520
    -2521        Example:
    -2522            >>> Select().from_("tbl").select("x").order_by("x DESC").sql()
    -2523            'SELECT x FROM tbl ORDER BY x DESC'
    -2524
    -2525        Args:
    -2526            *expressions: the SQL code strings to parse.
    -2527                If a `Group` instance is passed, this is used as-is.
    -2528                If another `Expression` instance is passed, it will be wrapped in a `Order`.
    -2529            append: if `True`, add to any existing expressions.
    -2530                Otherwise, this flattens all the `Order` expression into a single expression.
    -2531            dialect: the dialect used to parse the input expression.
    -2532            copy: if `False`, modify this expression instance in-place.
    -2533            opts: other options to use to parse the input expressions.
    -2534
    -2535        Returns:
    -2536            The modified Select expression.
    -2537        """
    -2538        return _apply_child_list_builder(
    -2539            *expressions,
    -2540            instance=self,
    -2541            arg="order",
    -2542            append=append,
    -2543            copy=copy,
    -2544            prefix="ORDER BY",
    -2545            into=Order,
    -2546            dialect=dialect,
    -2547            **opts,
    -2548        )
    -2549
    -2550    def sort_by(
    -2551        self,
    -2552        *expressions: t.Optional[ExpOrStr],
    -2553        append: bool = True,
    -2554        dialect: DialectType = None,
    -2555        copy: bool = True,
    -2556        **opts,
    -2557    ) -> Select:
    -2558        """
    -2559        Set the SORT BY expression.
    +2521    def order_by(
    +2522        self,
    +2523        *expressions: t.Optional[ExpOrStr],
    +2524        append: bool = True,
    +2525        dialect: DialectType = None,
    +2526        copy: bool = True,
    +2527        **opts,
    +2528    ) -> Select:
    +2529        """
    +2530        Set the ORDER BY expression.
    +2531
    +2532        Example:
    +2533            >>> Select().from_("tbl").select("x").order_by("x DESC").sql()
    +2534            'SELECT x FROM tbl ORDER BY x DESC'
    +2535
    +2536        Args:
    +2537            *expressions: the SQL code strings to parse.
    +2538                If a `Group` instance is passed, this is used as-is.
    +2539                If another `Expression` instance is passed, it will be wrapped in a `Order`.
    +2540            append: if `True`, add to any existing expressions.
    +2541                Otherwise, this flattens all the `Order` expression into a single expression.
    +2542            dialect: the dialect used to parse the input expression.
    +2543            copy: if `False`, modify this expression instance in-place.
    +2544            opts: other options to use to parse the input expressions.
    +2545
    +2546        Returns:
    +2547            The modified Select expression.
    +2548        """
    +2549        return _apply_child_list_builder(
    +2550            *expressions,
    +2551            instance=self,
    +2552            arg="order",
    +2553            append=append,
    +2554            copy=copy,
    +2555            prefix="ORDER BY",
    +2556            into=Order,
    +2557            dialect=dialect,
    +2558            **opts,
    +2559        )
     2560
    -2561        Example:
    -2562            >>> Select().from_("tbl").select("x").sort_by("x DESC").sql(dialect="hive")
    -2563            'SELECT x FROM tbl SORT BY x DESC'
    -2564
    -2565        Args:
    -2566            *expressions: the SQL code strings to parse.
    -2567                If a `Group` instance is passed, this is used as-is.
    -2568                If another `Expression` instance is passed, it will be wrapped in a `SORT`.
    -2569            append: if `True`, add to any existing expressions.
    -2570                Otherwise, this flattens all the `Order` expression into a single expression.
    -2571            dialect: the dialect used to parse the input expression.
    -2572            copy: if `False`, modify this expression instance in-place.
    -2573            opts: other options to use to parse the input expressions.
    -2574
    -2575        Returns:
    -2576            The modified Select expression.
    -2577        """
    -2578        return _apply_child_list_builder(
    -2579            *expressions,
    -2580            instance=self,
    -2581            arg="sort",
    -2582            append=append,
    -2583            copy=copy,
    -2584            prefix="SORT BY",
    -2585            into=Sort,
    -2586            dialect=dialect,
    -2587            **opts,
    -2588        )
    -2589
    -2590    def cluster_by(
    -2591        self,
    -2592        *expressions: t.Optional[ExpOrStr],
    -2593        append: bool = True,
    -2594        dialect: DialectType = None,
    -2595        copy: bool = True,
    -2596        **opts,
    -2597    ) -> Select:
    -2598        """
    -2599        Set the CLUSTER BY expression.
    +2561    def sort_by(
    +2562        self,
    +2563        *expressions: t.Optional[ExpOrStr],
    +2564        append: bool = True,
    +2565        dialect: DialectType = None,
    +2566        copy: bool = True,
    +2567        **opts,
    +2568    ) -> Select:
    +2569        """
    +2570        Set the SORT BY expression.
    +2571
    +2572        Example:
    +2573            >>> Select().from_("tbl").select("x").sort_by("x DESC").sql(dialect="hive")
    +2574            'SELECT x FROM tbl SORT BY x DESC'
    +2575
    +2576        Args:
    +2577            *expressions: the SQL code strings to parse.
    +2578                If a `Group` instance is passed, this is used as-is.
    +2579                If another `Expression` instance is passed, it will be wrapped in a `SORT`.
    +2580            append: if `True`, add to any existing expressions.
    +2581                Otherwise, this flattens all the `Order` expression into a single expression.
    +2582            dialect: the dialect used to parse the input expression.
    +2583            copy: if `False`, modify this expression instance in-place.
    +2584            opts: other options to use to parse the input expressions.
    +2585
    +2586        Returns:
    +2587            The modified Select expression.
    +2588        """
    +2589        return _apply_child_list_builder(
    +2590            *expressions,
    +2591            instance=self,
    +2592            arg="sort",
    +2593            append=append,
    +2594            copy=copy,
    +2595            prefix="SORT BY",
    +2596            into=Sort,
    +2597            dialect=dialect,
    +2598            **opts,
    +2599        )
     2600
    -2601        Example:
    -2602            >>> Select().from_("tbl").select("x").cluster_by("x DESC").sql(dialect="hive")
    -2603            'SELECT x FROM tbl CLUSTER BY x DESC'
    -2604
    -2605        Args:
    -2606            *expressions: the SQL code strings to parse.
    -2607                If a `Group` instance is passed, this is used as-is.
    -2608                If another `Expression` instance is passed, it will be wrapped in a `Cluster`.
    -2609            append: if `True`, add to any existing expressions.
    -2610                Otherwise, this flattens all the `Order` expression into a single expression.
    -2611            dialect: the dialect used to parse the input expression.
    -2612            copy: if `False`, modify this expression instance in-place.
    -2613            opts: other options to use to parse the input expressions.
    -2614
    -2615        Returns:
    -2616            The modified Select expression.
    -2617        """
    -2618        return _apply_child_list_builder(
    -2619            *expressions,
    -2620            instance=self,
    -2621            arg="cluster",
    -2622            append=append,
    -2623            copy=copy,
    -2624            prefix="CLUSTER BY",
    -2625            into=Cluster,
    -2626            dialect=dialect,
    -2627            **opts,
    -2628        )
    -2629
    -2630    def limit(
    -2631        self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts
    -2632    ) -> Select:
    -2633        """
    -2634        Set the LIMIT expression.
    -2635
    -2636        Example:
    -2637            >>> Select().from_("tbl").select("x").limit(10).sql()
    -2638            'SELECT x FROM tbl LIMIT 10'
    -2639
    -2640        Args:
    -2641            expression: the SQL code string to parse.
    -2642                This can also be an integer.
    -2643                If a `Limit` instance is passed, this is used as-is.
    -2644                If another `Expression` instance is passed, it will be wrapped in a `Limit`.
    -2645            dialect: the dialect used to parse the input expression.
    -2646            copy: if `False`, modify this expression instance in-place.
    -2647            opts: other options to use to parse the input expressions.
    -2648
    -2649        Returns:
    -2650            Select: the modified expression.
    -2651        """
    -2652        return _apply_builder(
    -2653            expression=expression,
    -2654            instance=self,
    -2655            arg="limit",
    -2656            into=Limit,
    -2657            prefix="LIMIT",
    -2658            dialect=dialect,
    -2659            copy=copy,
    -2660            **opts,
    -2661        )
    -2662
    -2663    def offset(
    -2664        self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts
    -2665    ) -> Select:
    -2666        """
    -2667        Set the OFFSET expression.
    -2668
    -2669        Example:
    -2670            >>> Select().from_("tbl").select("x").offset(10).sql()
    -2671            'SELECT x FROM tbl OFFSET 10'
    -2672
    -2673        Args:
    -2674            expression: the SQL code string to parse.
    -2675                This can also be an integer.
    -2676                If a `Offset` instance is passed, this is used as-is.
    -2677                If another `Expression` instance is passed, it will be wrapped in a `Offset`.
    -2678            dialect: the dialect used to parse the input expression.
    -2679            copy: if `False`, modify this expression instance in-place.
    -2680            opts: other options to use to parse the input expressions.
    -2681
    -2682        Returns:
    -2683            The modified Select expression.
    -2684        """
    -2685        return _apply_builder(
    -2686            expression=expression,
    -2687            instance=self,
    -2688            arg="offset",
    -2689            into=Offset,
    -2690            prefix="OFFSET",
    -2691            dialect=dialect,
    -2692            copy=copy,
    -2693            **opts,
    -2694        )
    -2695
    -2696    def select(
    -2697        self,
    -2698        *expressions: t.Optional[ExpOrStr],
    -2699        append: bool = True,
    -2700        dialect: DialectType = None,
    -2701        copy: bool = True,
    -2702        **opts,
    -2703    ) -> Select:
    -2704        """
    -2705        Append to or set the SELECT expressions.
    +2601    def cluster_by(
    +2602        self,
    +2603        *expressions: t.Optional[ExpOrStr],
    +2604        append: bool = True,
    +2605        dialect: DialectType = None,
    +2606        copy: bool = True,
    +2607        **opts,
    +2608    ) -> Select:
    +2609        """
    +2610        Set the CLUSTER BY expression.
    +2611
    +2612        Example:
    +2613            >>> Select().from_("tbl").select("x").cluster_by("x DESC").sql(dialect="hive")
    +2614            'SELECT x FROM tbl CLUSTER BY x DESC'
    +2615
    +2616        Args:
    +2617            *expressions: the SQL code strings to parse.
    +2618                If a `Group` instance is passed, this is used as-is.
    +2619                If another `Expression` instance is passed, it will be wrapped in a `Cluster`.
    +2620            append: if `True`, add to any existing expressions.
    +2621                Otherwise, this flattens all the `Order` expression into a single expression.
    +2622            dialect: the dialect used to parse the input expression.
    +2623            copy: if `False`, modify this expression instance in-place.
    +2624            opts: other options to use to parse the input expressions.
    +2625
    +2626        Returns:
    +2627            The modified Select expression.
    +2628        """
    +2629        return _apply_child_list_builder(
    +2630            *expressions,
    +2631            instance=self,
    +2632            arg="cluster",
    +2633            append=append,
    +2634            copy=copy,
    +2635            prefix="CLUSTER BY",
    +2636            into=Cluster,
    +2637            dialect=dialect,
    +2638            **opts,
    +2639        )
    +2640
    +2641    def limit(
    +2642        self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts
    +2643    ) -> Select:
    +2644        """
    +2645        Set the LIMIT expression.
    +2646
    +2647        Example:
    +2648            >>> Select().from_("tbl").select("x").limit(10).sql()
    +2649            'SELECT x FROM tbl LIMIT 10'
    +2650
    +2651        Args:
    +2652            expression: the SQL code string to parse.
    +2653                This can also be an integer.
    +2654                If a `Limit` instance is passed, this is used as-is.
    +2655                If another `Expression` instance is passed, it will be wrapped in a `Limit`.
    +2656            dialect: the dialect used to parse the input expression.
    +2657            copy: if `False`, modify this expression instance in-place.
    +2658            opts: other options to use to parse the input expressions.
    +2659
    +2660        Returns:
    +2661            Select: the modified expression.
    +2662        """
    +2663        return _apply_builder(
    +2664            expression=expression,
    +2665            instance=self,
    +2666            arg="limit",
    +2667            into=Limit,
    +2668            prefix="LIMIT",
    +2669            dialect=dialect,
    +2670            copy=copy,
    +2671            **opts,
    +2672        )
    +2673
    +2674    def offset(
    +2675        self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts
    +2676    ) -> Select:
    +2677        """
    +2678        Set the OFFSET expression.
    +2679
    +2680        Example:
    +2681            >>> Select().from_("tbl").select("x").offset(10).sql()
    +2682            'SELECT x FROM tbl OFFSET 10'
    +2683
    +2684        Args:
    +2685            expression: the SQL code string to parse.
    +2686                This can also be an integer.
    +2687                If a `Offset` instance is passed, this is used as-is.
    +2688                If another `Expression` instance is passed, it will be wrapped in a `Offset`.
    +2689            dialect: the dialect used to parse the input expression.
    +2690            copy: if `False`, modify this expression instance in-place.
    +2691            opts: other options to use to parse the input expressions.
    +2692
    +2693        Returns:
    +2694            The modified Select expression.
    +2695        """
    +2696        return _apply_builder(
    +2697            expression=expression,
    +2698            instance=self,
    +2699            arg="offset",
    +2700            into=Offset,
    +2701            prefix="OFFSET",
    +2702            dialect=dialect,
    +2703            copy=copy,
    +2704            **opts,
    +2705        )
     2706
    -2707        Example:
    -2708            >>> Select().select("x", "y").sql()
    -2709            'SELECT x, y'
    -2710
    -2711        Args:
    -2712            *expressions: the SQL code strings to parse.
    -2713                If an `Expression` instance is passed, it will be used as-is.
    -2714            append: if `True`, add to any existing expressions.
    -2715                Otherwise, this resets the expressions.
    -2716            dialect: the dialect used to parse the input expressions.
    -2717            copy: if `False`, modify this expression instance in-place.
    -2718            opts: other options to use to parse the input expressions.
    -2719
    -2720        Returns:
    -2721            The modified Select expression.
    -2722        """
    -2723        return _apply_list_builder(
    -2724            *expressions,
    -2725            instance=self,
    -2726            arg="expressions",
    -2727            append=append,
    -2728            dialect=dialect,
    -2729            copy=copy,
    -2730            **opts,
    -2731        )
    -2732
    -2733    def lateral(
    -2734        self,
    -2735        *expressions: t.Optional[ExpOrStr],
    -2736        append: bool = True,
    -2737        dialect: DialectType = None,
    -2738        copy: bool = True,
    -2739        **opts,
    -2740    ) -> Select:
    -2741        """
    -2742        Append to or set the LATERAL expressions.
    +2707    def select(
    +2708        self,
    +2709        *expressions: t.Optional[ExpOrStr],
    +2710        append: bool = True,
    +2711        dialect: DialectType = None,
    +2712        copy: bool = True,
    +2713        **opts,
    +2714    ) -> Select:
    +2715        """
    +2716        Append to or set the SELECT expressions.
    +2717
    +2718        Example:
    +2719            >>> Select().select("x", "y").sql()
    +2720            'SELECT x, y'
    +2721
    +2722        Args:
    +2723            *expressions: the SQL code strings to parse.
    +2724                If an `Expression` instance is passed, it will be used as-is.
    +2725            append: if `True`, add to any existing expressions.
    +2726                Otherwise, this resets the expressions.
    +2727            dialect: the dialect used to parse the input expressions.
    +2728            copy: if `False`, modify this expression instance in-place.
    +2729            opts: other options to use to parse the input expressions.
    +2730
    +2731        Returns:
    +2732            The modified Select expression.
    +2733        """
    +2734        return _apply_list_builder(
    +2735            *expressions,
    +2736            instance=self,
    +2737            arg="expressions",
    +2738            append=append,
    +2739            dialect=dialect,
    +2740            copy=copy,
    +2741            **opts,
    +2742        )
     2743
    -2744        Example:
    -2745            >>> Select().select("x").lateral("OUTER explode(y) tbl2 AS z").from_("tbl").sql()
    -2746            'SELECT x FROM tbl LATERAL VIEW OUTER EXPLODE(y) tbl2 AS z'
    -2747
    -2748        Args:
    -2749            *expressions: the SQL code strings to parse.
    -2750                If an `Expression` instance is passed, it will be used as-is.
    -2751            append: if `True`, add to any existing expressions.
    -2752                Otherwise, this resets the expressions.
    -2753            dialect: the dialect used to parse the input expressions.
    -2754            copy: if `False`, modify this expression instance in-place.
    -2755            opts: other options to use to parse the input expressions.
    -2756
    -2757        Returns:
    -2758            The modified Select expression.
    -2759        """
    -2760        return _apply_list_builder(
    -2761            *expressions,
    -2762            instance=self,
    -2763            arg="laterals",
    -2764            append=append,
    -2765            into=Lateral,
    -2766            prefix="LATERAL VIEW",
    -2767            dialect=dialect,
    -2768            copy=copy,
    -2769            **opts,
    -2770        )
    -2771
    -2772    def join(
    -2773        self,
    -2774        expression: ExpOrStr,
    -2775        on: t.Optional[ExpOrStr] = None,
    -2776        using: t.Optional[ExpOrStr | t.List[ExpOrStr]] = None,
    -2777        append: bool = True,
    -2778        join_type: t.Optional[str] = None,
    -2779        join_alias: t.Optional[Identifier | str] = None,
    -2780        dialect: DialectType = None,
    -2781        copy: bool = True,
    -2782        **opts,
    -2783    ) -> Select:
    -2784        """
    -2785        Append to or set the JOIN expressions.
    -2786
    -2787        Example:
    -2788            >>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y").sql()
    -2789            'SELECT * FROM tbl JOIN tbl2 ON tbl1.y = tbl2.y'
    -2790
    -2791            >>> Select().select("1").from_("a").join("b", using=["x", "y", "z"]).sql()
    -2792            'SELECT 1 FROM a JOIN b USING (x, y, z)'
    -2793
    -2794            Use `join_type` to change the type of join:
    -2795
    -2796            >>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y", join_type="left outer").sql()
    -2797            'SELECT * FROM tbl LEFT OUTER JOIN tbl2 ON tbl1.y = tbl2.y'
    -2798
    -2799        Args:
    -2800            expression: the SQL code string to parse.
    -2801                If an `Expression` instance is passed, it will be used as-is.
    -2802            on: optionally specify the join "on" criteria as a SQL string.
    -2803                If an `Expression` instance is passed, it will be used as-is.
    -2804            using: optionally specify the join "using" criteria as a SQL string.
    -2805                If an `Expression` instance is passed, it will be used as-is.
    -2806            append: if `True`, add to any existing expressions.
    -2807                Otherwise, this resets the expressions.
    -2808            join_type: if set, alter the parsed join type.
    -2809            join_alias: an optional alias for the joined source.
    -2810            dialect: the dialect used to parse the input expressions.
    -2811            copy: if `False`, modify this expression instance in-place.
    -2812            opts: other options to use to parse the input expressions.
    -2813
    -2814        Returns:
    -2815            Select: the modified expression.
    -2816        """
    -2817        parse_args: t.Dict[str, t.Any] = {"dialect": dialect, **opts}
    -2818
    -2819        try:
    -2820            expression = maybe_parse(expression, into=Join, prefix="JOIN", **parse_args)
    -2821        except ParseError:
    -2822            expression = maybe_parse(expression, into=(Join, Expression), **parse_args)
    -2823
    -2824        join = expression if isinstance(expression, Join) else Join(this=expression)
    -2825
    -2826        if isinstance(join.this, Select):
    -2827            join.this.replace(join.this.subquery())
    -2828
    -2829        if join_type:
    -2830            method: t.Optional[Token]
    -2831            side: t.Optional[Token]
    -2832            kind: t.Optional[Token]
    -2833
    -2834            method, side, kind = maybe_parse(join_type, into="JOIN_TYPE", **parse_args)  # type: ignore
    -2835
    -2836            if method:
    -2837                join.set("method", method.text)
    -2838            if side:
    -2839                join.set("side", side.text)
    -2840            if kind:
    -2841                join.set("kind", kind.text)
    -2842
    -2843        if on:
    -2844            on = and_(*ensure_list(on), dialect=dialect, copy=copy, **opts)
    -2845            join.set("on", on)
    +2744    def lateral(
    +2745        self,
    +2746        *expressions: t.Optional[ExpOrStr],
    +2747        append: bool = True,
    +2748        dialect: DialectType = None,
    +2749        copy: bool = True,
    +2750        **opts,
    +2751    ) -> Select:
    +2752        """
    +2753        Append to or set the LATERAL expressions.
    +2754
    +2755        Example:
    +2756            >>> Select().select("x").lateral("OUTER explode(y) tbl2 AS z").from_("tbl").sql()
    +2757            'SELECT x FROM tbl LATERAL VIEW OUTER EXPLODE(y) tbl2 AS z'
    +2758
    +2759        Args:
    +2760            *expressions: the SQL code strings to parse.
    +2761                If an `Expression` instance is passed, it will be used as-is.
    +2762            append: if `True`, add to any existing expressions.
    +2763                Otherwise, this resets the expressions.
    +2764            dialect: the dialect used to parse the input expressions.
    +2765            copy: if `False`, modify this expression instance in-place.
    +2766            opts: other options to use to parse the input expressions.
    +2767
    +2768        Returns:
    +2769            The modified Select expression.
    +2770        """
    +2771        return _apply_list_builder(
    +2772            *expressions,
    +2773            instance=self,
    +2774            arg="laterals",
    +2775            append=append,
    +2776            into=Lateral,
    +2777            prefix="LATERAL VIEW",
    +2778            dialect=dialect,
    +2779            copy=copy,
    +2780            **opts,
    +2781        )
    +2782
    +2783    def join(
    +2784        self,
    +2785        expression: ExpOrStr,
    +2786        on: t.Optional[ExpOrStr] = None,
    +2787        using: t.Optional[ExpOrStr | t.List[ExpOrStr]] = None,
    +2788        append: bool = True,
    +2789        join_type: t.Optional[str] = None,
    +2790        join_alias: t.Optional[Identifier | str] = None,
    +2791        dialect: DialectType = None,
    +2792        copy: bool = True,
    +2793        **opts,
    +2794    ) -> Select:
    +2795        """
    +2796        Append to or set the JOIN expressions.
    +2797
    +2798        Example:
    +2799            >>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y").sql()
    +2800            'SELECT * FROM tbl JOIN tbl2 ON tbl1.y = tbl2.y'
    +2801
    +2802            >>> Select().select("1").from_("a").join("b", using=["x", "y", "z"]).sql()
    +2803            'SELECT 1 FROM a JOIN b USING (x, y, z)'
    +2804
    +2805            Use `join_type` to change the type of join:
    +2806
    +2807            >>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y", join_type="left outer").sql()
    +2808            'SELECT * FROM tbl LEFT OUTER JOIN tbl2 ON tbl1.y = tbl2.y'
    +2809
    +2810        Args:
    +2811            expression: the SQL code string to parse.
    +2812                If an `Expression` instance is passed, it will be used as-is.
    +2813            on: optionally specify the join "on" criteria as a SQL string.
    +2814                If an `Expression` instance is passed, it will be used as-is.
    +2815            using: optionally specify the join "using" criteria as a SQL string.
    +2816                If an `Expression` instance is passed, it will be used as-is.
    +2817            append: if `True`, add to any existing expressions.
    +2818                Otherwise, this resets the expressions.
    +2819            join_type: if set, alter the parsed join type.
    +2820            join_alias: an optional alias for the joined source.
    +2821            dialect: the dialect used to parse the input expressions.
    +2822            copy: if `False`, modify this expression instance in-place.
    +2823            opts: other options to use to parse the input expressions.
    +2824
    +2825        Returns:
    +2826            Select: the modified expression.
    +2827        """
    +2828        parse_args: t.Dict[str, t.Any] = {"dialect": dialect, **opts}
    +2829
    +2830        try:
    +2831            expression = maybe_parse(expression, into=Join, prefix="JOIN", **parse_args)
    +2832        except ParseError:
    +2833            expression = maybe_parse(expression, into=(Join, Expression), **parse_args)
    +2834
    +2835        join = expression if isinstance(expression, Join) else Join(this=expression)
    +2836
    +2837        if isinstance(join.this, Select):
    +2838            join.this.replace(join.this.subquery())
    +2839
    +2840        if join_type:
    +2841            method: t.Optional[Token]
    +2842            side: t.Optional[Token]
    +2843            kind: t.Optional[Token]
    +2844
    +2845            method, side, kind = maybe_parse(join_type, into="JOIN_TYPE", **parse_args)  # type: ignore
     2846
    -2847        if using:
    -2848            join = _apply_list_builder(
    -2849                *ensure_list(using),
    -2850                instance=join,
    -2851                arg="using",
    -2852                append=append,
    -2853                copy=copy,
    -2854                **opts,
    -2855            )
    -2856
    -2857        if join_alias:
    -2858            join.set("this", alias_(join.this, join_alias, table=True))
    -2859
    -2860        return _apply_list_builder(
    -2861            join,
    -2862            instance=self,
    -2863            arg="joins",
    -2864            append=append,
    -2865            copy=copy,
    -2866            **opts,
    -2867        )
    -2868
    -2869    def where(
    -2870        self,
    -2871        *expressions: t.Optional[ExpOrStr],
    -2872        append: bool = True,
    -2873        dialect: DialectType = None,
    -2874        copy: bool = True,
    -2875        **opts,
    -2876    ) -> Select:
    -2877        """
    -2878        Append to or set the WHERE expressions.
    +2847            if method:
    +2848                join.set("method", method.text)
    +2849            if side:
    +2850                join.set("side", side.text)
    +2851            if kind:
    +2852                join.set("kind", kind.text)
    +2853
    +2854        if on:
    +2855            on = and_(*ensure_list(on), dialect=dialect, copy=copy, **opts)
    +2856            join.set("on", on)
    +2857
    +2858        if using:
    +2859            join = _apply_list_builder(
    +2860                *ensure_list(using),
    +2861                instance=join,
    +2862                arg="using",
    +2863                append=append,
    +2864                copy=copy,
    +2865                **opts,
    +2866            )
    +2867
    +2868        if join_alias:
    +2869            join.set("this", alias_(join.this, join_alias, table=True))
    +2870
    +2871        return _apply_list_builder(
    +2872            join,
    +2873            instance=self,
    +2874            arg="joins",
    +2875            append=append,
    +2876            copy=copy,
    +2877            **opts,
    +2878        )
     2879
    -2880        Example:
    -2881            >>> Select().select("x").from_("tbl").where("x = 'a' OR x < 'b'").sql()
    -2882            "SELECT x FROM tbl WHERE x = 'a' OR x < 'b'"
    -2883
    -2884        Args:
    -2885            *expressions: the SQL code strings to parse.
    -2886                If an `Expression` instance is passed, it will be used as-is.
    -2887                Multiple expressions are combined with an AND operator.
    -2888            append: if `True`, AND the new expressions to any existing expression.
    -2889                Otherwise, this resets the expression.
    -2890            dialect: the dialect used to parse the input expressions.
    -2891            copy: if `False`, modify this expression instance in-place.
    -2892            opts: other options to use to parse the input expressions.
    -2893
    -2894        Returns:
    -2895            Select: the modified expression.
    -2896        """
    -2897        return _apply_conjunction_builder(
    -2898            *expressions,
    -2899            instance=self,
    -2900            arg="where",
    -2901            append=append,
    -2902            into=Where,
    -2903            dialect=dialect,
    -2904            copy=copy,
    -2905            **opts,
    -2906        )
    -2907
    -2908    def having(
    -2909        self,
    -2910        *expressions: t.Optional[ExpOrStr],
    -2911        append: bool = True,
    -2912        dialect: DialectType = None,
    -2913        copy: bool = True,
    -2914        **opts,
    -2915    ) -> Select:
    -2916        """
    -2917        Append to or set the HAVING expressions.
    +2880    def where(
    +2881        self,
    +2882        *expressions: t.Optional[ExpOrStr],
    +2883        append: bool = True,
    +2884        dialect: DialectType = None,
    +2885        copy: bool = True,
    +2886        **opts,
    +2887    ) -> Select:
    +2888        """
    +2889        Append to or set the WHERE expressions.
    +2890
    +2891        Example:
    +2892            >>> Select().select("x").from_("tbl").where("x = 'a' OR x < 'b'").sql()
    +2893            "SELECT x FROM tbl WHERE x = 'a' OR x < 'b'"
    +2894
    +2895        Args:
    +2896            *expressions: the SQL code strings to parse.
    +2897                If an `Expression` instance is passed, it will be used as-is.
    +2898                Multiple expressions are combined with an AND operator.
    +2899            append: if `True`, AND the new expressions to any existing expression.
    +2900                Otherwise, this resets the expression.
    +2901            dialect: the dialect used to parse the input expressions.
    +2902            copy: if `False`, modify this expression instance in-place.
    +2903            opts: other options to use to parse the input expressions.
    +2904
    +2905        Returns:
    +2906            Select: the modified expression.
    +2907        """
    +2908        return _apply_conjunction_builder(
    +2909            *expressions,
    +2910            instance=self,
    +2911            arg="where",
    +2912            append=append,
    +2913            into=Where,
    +2914            dialect=dialect,
    +2915            copy=copy,
    +2916            **opts,
    +2917        )
     2918
    -2919        Example:
    -2920            >>> Select().select("x", "COUNT(y)").from_("tbl").group_by("x").having("COUNT(y) > 3").sql()
    -2921            'SELECT x, COUNT(y) FROM tbl GROUP BY x HAVING COUNT(y) > 3'
    -2922
    -2923        Args:
    -2924            *expressions: the SQL code strings to parse.
    -2925                If an `Expression` instance is passed, it will be used as-is.
    -2926                Multiple expressions are combined with an AND operator.
    -2927            append: if `True`, AND the new expressions to any existing expression.
    -2928                Otherwise, this resets the expression.
    -2929            dialect: the dialect used to parse the input expressions.
    -2930            copy: if `False`, modify this expression instance in-place.
    -2931            opts: other options to use to parse the input expressions.
    -2932
    -2933        Returns:
    -2934            The modified Select expression.
    -2935        """
    -2936        return _apply_conjunction_builder(
    -2937            *expressions,
    -2938            instance=self,
    -2939            arg="having",
    -2940            append=append,
    -2941            into=Having,
    -2942            dialect=dialect,
    -2943            copy=copy,
    -2944            **opts,
    -2945        )
    -2946
    -2947    def window(
    -2948        self,
    -2949        *expressions: t.Optional[ExpOrStr],
    -2950        append: bool = True,
    -2951        dialect: DialectType = None,
    -2952        copy: bool = True,
    -2953        **opts,
    -2954    ) -> Select:
    -2955        return _apply_list_builder(
    -2956            *expressions,
    -2957            instance=self,
    -2958            arg="windows",
    -2959            append=append,
    -2960            into=Window,
    -2961            dialect=dialect,
    -2962            copy=copy,
    -2963            **opts,
    -2964        )
    -2965
    -2966    def qualify(
    -2967        self,
    -2968        *expressions: t.Optional[ExpOrStr],
    -2969        append: bool = True,
    -2970        dialect: DialectType = None,
    -2971        copy: bool = True,
    -2972        **opts,
    -2973    ) -> Select:
    -2974        return _apply_conjunction_builder(
    -2975            *expressions,
    -2976            instance=self,
    -2977            arg="qualify",
    -2978            append=append,
    -2979            into=Qualify,
    -2980            dialect=dialect,
    -2981            copy=copy,
    -2982            **opts,
    -2983        )
    -2984
    -2985    def distinct(
    -2986        self, *ons: t.Optional[ExpOrStr], distinct: bool = True, copy: bool = True
    -2987    ) -> Select:
    -2988        """
    -2989        Set the OFFSET expression.
    -2990
    -2991        Example:
    -2992            >>> Select().from_("tbl").select("x").distinct().sql()
    -2993            'SELECT DISTINCT x FROM tbl'
    -2994
    -2995        Args:
    -2996            ons: the expressions to distinct on
    -2997            distinct: whether the Select should be distinct
    -2998            copy: if `False`, modify this expression instance in-place.
    -2999
    -3000        Returns:
    -3001            Select: the modified expression.
    -3002        """
    -3003        instance = _maybe_copy(self, copy)
    -3004        on = Tuple(expressions=[maybe_parse(on, copy=copy) for on in ons if on]) if ons else None
    -3005        instance.set("distinct", Distinct(on=on) if distinct else None)
    -3006        return instance
    -3007
    -3008    def ctas(
    -3009        self,
    -3010        table: ExpOrStr,
    -3011        properties: t.Optional[t.Dict] = None,
    -3012        dialect: DialectType = None,
    -3013        copy: bool = True,
    -3014        **opts,
    -3015    ) -> Create:
    -3016        """
    -3017        Convert this expression to a CREATE TABLE AS statement.
    +2919    def having(
    +2920        self,
    +2921        *expressions: t.Optional[ExpOrStr],
    +2922        append: bool = True,
    +2923        dialect: DialectType = None,
    +2924        copy: bool = True,
    +2925        **opts,
    +2926    ) -> Select:
    +2927        """
    +2928        Append to or set the HAVING expressions.
    +2929
    +2930        Example:
    +2931            >>> Select().select("x", "COUNT(y)").from_("tbl").group_by("x").having("COUNT(y) > 3").sql()
    +2932            'SELECT x, COUNT(y) FROM tbl GROUP BY x HAVING COUNT(y) > 3'
    +2933
    +2934        Args:
    +2935            *expressions: the SQL code strings to parse.
    +2936                If an `Expression` instance is passed, it will be used as-is.
    +2937                Multiple expressions are combined with an AND operator.
    +2938            append: if `True`, AND the new expressions to any existing expression.
    +2939                Otherwise, this resets the expression.
    +2940            dialect: the dialect used to parse the input expressions.
    +2941            copy: if `False`, modify this expression instance in-place.
    +2942            opts: other options to use to parse the input expressions.
    +2943
    +2944        Returns:
    +2945            The modified Select expression.
    +2946        """
    +2947        return _apply_conjunction_builder(
    +2948            *expressions,
    +2949            instance=self,
    +2950            arg="having",
    +2951            append=append,
    +2952            into=Having,
    +2953            dialect=dialect,
    +2954            copy=copy,
    +2955            **opts,
    +2956        )
    +2957
    +2958    def window(
    +2959        self,
    +2960        *expressions: t.Optional[ExpOrStr],
    +2961        append: bool = True,
    +2962        dialect: DialectType = None,
    +2963        copy: bool = True,
    +2964        **opts,
    +2965    ) -> Select:
    +2966        return _apply_list_builder(
    +2967            *expressions,
    +2968            instance=self,
    +2969            arg="windows",
    +2970            append=append,
    +2971            into=Window,
    +2972            dialect=dialect,
    +2973            copy=copy,
    +2974            **opts,
    +2975        )
    +2976
    +2977    def qualify(
    +2978        self,
    +2979        *expressions: t.Optional[ExpOrStr],
    +2980        append: bool = True,
    +2981        dialect: DialectType = None,
    +2982        copy: bool = True,
    +2983        **opts,
    +2984    ) -> Select:
    +2985        return _apply_conjunction_builder(
    +2986            *expressions,
    +2987            instance=self,
    +2988            arg="qualify",
    +2989            append=append,
    +2990            into=Qualify,
    +2991            dialect=dialect,
    +2992            copy=copy,
    +2993            **opts,
    +2994        )
    +2995
    +2996    def distinct(
    +2997        self, *ons: t.Optional[ExpOrStr], distinct: bool = True, copy: bool = True
    +2998    ) -> Select:
    +2999        """
    +3000        Set the OFFSET expression.
    +3001
    +3002        Example:
    +3003            >>> Select().from_("tbl").select("x").distinct().sql()
    +3004            'SELECT DISTINCT x FROM tbl'
    +3005
    +3006        Args:
    +3007            ons: the expressions to distinct on
    +3008            distinct: whether the Select should be distinct
    +3009            copy: if `False`, modify this expression instance in-place.
    +3010
    +3011        Returns:
    +3012            Select: the modified expression.
    +3013        """
    +3014        instance = _maybe_copy(self, copy)
    +3015        on = Tuple(expressions=[maybe_parse(on, copy=copy) for on in ons if on]) if ons else None
    +3016        instance.set("distinct", Distinct(on=on) if distinct else None)
    +3017        return instance
     3018
    -3019        Example:
    -3020            >>> Select().select("*").from_("tbl").ctas("x").sql()
    -3021            'CREATE TABLE x AS SELECT * FROM tbl'
    -3022
    -3023        Args:
    -3024            table: the SQL code string to parse as the table name.
    -3025                If another `Expression` instance is passed, it will be used as-is.
    -3026            properties: an optional mapping of table properties
    -3027            dialect: the dialect used to parse the input table.
    -3028            copy: if `False`, modify this expression instance in-place.
    -3029            opts: other options to use to parse the input table.
    -3030
    -3031        Returns:
    -3032            The new Create expression.
    -3033        """
    -3034        instance = _maybe_copy(self, copy)
    -3035        table_expression = maybe_parse(
    -3036            table,
    -3037            into=Table,
    -3038            dialect=dialect,
    -3039            **opts,
    -3040        )
    -3041        properties_expression = None
    -3042        if properties:
    -3043            properties_expression = Properties.from_dict(properties)
    -3044
    -3045        return Create(
    -3046            this=table_expression,
    -3047            kind="table",
    -3048            expression=instance,
    -3049            properties=properties_expression,
    -3050        )
    -3051
    -3052    def lock(self, update: bool = True, copy: bool = True) -> Select:
    -3053        """
    -3054        Set the locking read mode for this expression.
    +3019    def ctas(
    +3020        self,
    +3021        table: ExpOrStr,
    +3022        properties: t.Optional[t.Dict] = None,
    +3023        dialect: DialectType = None,
    +3024        copy: bool = True,
    +3025        **opts,
    +3026    ) -> Create:
    +3027        """
    +3028        Convert this expression to a CREATE TABLE AS statement.
    +3029
    +3030        Example:
    +3031            >>> Select().select("*").from_("tbl").ctas("x").sql()
    +3032            'CREATE TABLE x AS SELECT * FROM tbl'
    +3033
    +3034        Args:
    +3035            table: the SQL code string to parse as the table name.
    +3036                If another `Expression` instance is passed, it will be used as-is.
    +3037            properties: an optional mapping of table properties
    +3038            dialect: the dialect used to parse the input table.
    +3039            copy: if `False`, modify this expression instance in-place.
    +3040            opts: other options to use to parse the input table.
    +3041
    +3042        Returns:
    +3043            The new Create expression.
    +3044        """
    +3045        instance = _maybe_copy(self, copy)
    +3046        table_expression = maybe_parse(
    +3047            table,
    +3048            into=Table,
    +3049            dialect=dialect,
    +3050            **opts,
    +3051        )
    +3052        properties_expression = None
    +3053        if properties:
    +3054            properties_expression = Properties.from_dict(properties)
     3055
    -3056        Examples:
    -3057            >>> Select().select("x").from_("tbl").where("x = 'a'").lock().sql("mysql")
    -3058            "SELECT x FROM tbl WHERE x = 'a' FOR UPDATE"
    -3059
    -3060            >>> Select().select("x").from_("tbl").where("x = 'a'").lock(update=False).sql("mysql")
    -3061            "SELECT x FROM tbl WHERE x = 'a' FOR SHARE"
    +3056        return Create(
    +3057            this=table_expression,
    +3058            kind="table",
    +3059            expression=instance,
    +3060            properties=properties_expression,
    +3061        )
     3062
    -3063        Args:
    -3064            update: if `True`, the locking type will be `FOR UPDATE`, else it will be `FOR SHARE`.
    -3065            copy: if `False`, modify this expression instance in-place.
    +3063    def lock(self, update: bool = True, copy: bool = True) -> Select:
    +3064        """
    +3065        Set the locking read mode for this expression.
     3066
    -3067        Returns:
    -3068            The modified expression.
    -3069        """
    +3067        Examples:
    +3068            >>> Select().select("x").from_("tbl").where("x = 'a'").lock().sql("mysql")
    +3069            "SELECT x FROM tbl WHERE x = 'a' FOR UPDATE"
     3070
    -3071        inst = _maybe_copy(self, copy)
    -3072        inst.set("locks", [Lock(update=update)])
    +3071            >>> Select().select("x").from_("tbl").where("x = 'a'").lock(update=False).sql("mysql")
    +3072            "SELECT x FROM tbl WHERE x = 'a' FOR SHARE"
     3073
    -3074        return inst
    -3075
    -3076    @property
    -3077    def named_selects(self) -> t.List[str]:
    -3078        return [e.output_name for e in self.expressions if e.alias_or_name]
    -3079
    -3080    @property
    -3081    def is_star(self) -> bool:
    -3082        return any(expression.is_star for expression in self.expressions)
    +3074        Args:
    +3075            update: if `True`, the locking type will be `FOR UPDATE`, else it will be `FOR SHARE`.
    +3076            copy: if `False`, modify this expression instance in-place.
    +3077
    +3078        Returns:
    +3079            The modified expression.
    +3080        """
    +3081        inst = _maybe_copy(self, copy)
    +3082        inst.set("locks", [Lock(update=update)])
     3083
    -3084    @property
    -3085    def selects(self) -> t.List[Expression]:
    -3086        return self.expressions
    +3084        return inst
    +3085
    +3086    def hint(self, *hints: ExpOrStr, dialect: DialectType = None, copy: bool = True) -> Select:
    +3087        """
    +3088        Set hints for this expression.
    +3089
    +3090        Examples:
    +3091            >>> Select().select("x").from_("tbl").hint("BROADCAST(y)").sql(dialect="spark")
    +3092            'SELECT /*+ BROADCAST(y) */ x FROM tbl'
    +3093
    +3094        Args:
    +3095            hints: The SQL code strings to parse as the hints.
    +3096                If an `Expression` instance is passed, it will be used as-is.
    +3097            dialect: The dialect used to parse the hints.
    +3098            copy: If `False`, modify this expression instance in-place.
    +3099
    +3100        Returns:
    +3101            The modified expression.
    +3102        """
    +3103        inst = _maybe_copy(self, copy)
    +3104        inst.set(
    +3105            "hint", Hint(expressions=[maybe_parse(h, copy=copy, dialect=dialect) for h in hints])
    +3106        )
    +3107
    +3108        return inst
    +3109
    +3110    @property
    +3111    def named_selects(self) -> t.List[str]:
    +3112        return [e.output_name for e in self.expressions if e.alias_or_name]
    +3113
    +3114    @property
    +3115    def is_star(self) -> bool:
    +3116        return any(expression.is_star for expression in self.expressions)
    +3117
    +3118    @property
    +3119    def selects(self) -> t.List[Expression]:
    +3120        return self.expressions
     
    @@ -24864,37 +25123,37 @@ Otherwise, this resets the expressions.
    -
    2434    def from_(
    -2435        self, expression: ExpOrStr, dialect: DialectType = None, copy: bool = True, **opts
    -2436    ) -> Select:
    -2437        """
    -2438        Set the FROM expression.
    -2439
    -2440        Example:
    -2441            >>> Select().from_("tbl").select("x").sql()
    -2442            'SELECT x FROM tbl'
    -2443
    -2444        Args:
    -2445            expression : the SQL code strings to parse.
    -2446                If a `From` instance is passed, this is used as-is.
    -2447                If another `Expression` instance is passed, it will be wrapped in a `From`.
    -2448            dialect: the dialect used to parse the input expression.
    -2449            copy: if `False`, modify this expression instance in-place.
    -2450            opts: other options to use to parse the input expressions.
    -2451
    -2452        Returns:
    -2453            The modified Select expression.
    -2454        """
    -2455        return _apply_builder(
    -2456            expression=expression,
    -2457            instance=self,
    -2458            arg="from",
    -2459            into=From,
    -2460            prefix="FROM",
    -2461            dialect=dialect,
    -2462            copy=copy,
    -2463            **opts,
    -2464        )
    +            
    2445    def from_(
    +2446        self, expression: ExpOrStr, dialect: DialectType = None, copy: bool = True, **opts
    +2447    ) -> Select:
    +2448        """
    +2449        Set the FROM expression.
    +2450
    +2451        Example:
    +2452            >>> Select().from_("tbl").select("x").sql()
    +2453            'SELECT x FROM tbl'
    +2454
    +2455        Args:
    +2456            expression : the SQL code strings to parse.
    +2457                If a `From` instance is passed, this is used as-is.
    +2458                If another `Expression` instance is passed, it will be wrapped in a `From`.
    +2459            dialect: the dialect used to parse the input expression.
    +2460            copy: if `False`, modify this expression instance in-place.
    +2461            opts: other options to use to parse the input expressions.
    +2462
    +2463        Returns:
    +2464            The modified Select expression.
    +2465        """
    +2466        return _apply_builder(
    +2467            expression=expression,
    +2468            instance=self,
    +2469            arg="from",
    +2470            into=From,
    +2471            prefix="FROM",
    +2472            dialect=dialect,
    +2473            copy=copy,
    +2474            **opts,
    +2475        )
     
    @@ -24941,49 +25200,49 @@ If another Expression instance is passed,
    -
    2466    def group_by(
    -2467        self,
    -2468        *expressions: t.Optional[ExpOrStr],
    -2469        append: bool = True,
    -2470        dialect: DialectType = None,
    -2471        copy: bool = True,
    -2472        **opts,
    -2473    ) -> Select:
    -2474        """
    -2475        Set the GROUP BY expression.
    -2476
    -2477        Example:
    -2478            >>> Select().from_("tbl").select("x", "COUNT(1)").group_by("x").sql()
    -2479            'SELECT x, COUNT(1) FROM tbl GROUP BY x'
    -2480
    -2481        Args:
    -2482            *expressions: the SQL code strings to parse.
    -2483                If a `Group` instance is passed, this is used as-is.
    -2484                If another `Expression` instance is passed, it will be wrapped in a `Group`.
    -2485                If nothing is passed in then a group by is not applied to the expression
    -2486            append: if `True`, add to any existing expressions.
    -2487                Otherwise, this flattens all the `Group` expression into a single expression.
    -2488            dialect: the dialect used to parse the input expression.
    -2489            copy: if `False`, modify this expression instance in-place.
    -2490            opts: other options to use to parse the input expressions.
    +            
    2477    def group_by(
    +2478        self,
    +2479        *expressions: t.Optional[ExpOrStr],
    +2480        append: bool = True,
    +2481        dialect: DialectType = None,
    +2482        copy: bool = True,
    +2483        **opts,
    +2484    ) -> Select:
    +2485        """
    +2486        Set the GROUP BY expression.
    +2487
    +2488        Example:
    +2489            >>> Select().from_("tbl").select("x", "COUNT(1)").group_by("x").sql()
    +2490            'SELECT x, COUNT(1) FROM tbl GROUP BY x'
     2491
    -2492        Returns:
    -2493            The modified Select expression.
    -2494        """
    -2495        if not expressions:
    -2496            return self if not copy else self.copy()
    -2497
    -2498        return _apply_child_list_builder(
    -2499            *expressions,
    -2500            instance=self,
    -2501            arg="group",
    -2502            append=append,
    -2503            copy=copy,
    -2504            prefix="GROUP BY",
    -2505            into=Group,
    -2506            dialect=dialect,
    -2507            **opts,
    -2508        )
    +2492        Args:
    +2493            *expressions: the SQL code strings to parse.
    +2494                If a `Group` instance is passed, this is used as-is.
    +2495                If another `Expression` instance is passed, it will be wrapped in a `Group`.
    +2496                If nothing is passed in then a group by is not applied to the expression
    +2497            append: if `True`, add to any existing expressions.
    +2498                Otherwise, this flattens all the `Group` expression into a single expression.
    +2499            dialect: the dialect used to parse the input expression.
    +2500            copy: if `False`, modify this expression instance in-place.
    +2501            opts: other options to use to parse the input expressions.
    +2502
    +2503        Returns:
    +2504            The modified Select expression.
    +2505        """
    +2506        if not expressions:
    +2507            return self if not copy else self.copy()
    +2508
    +2509        return _apply_child_list_builder(
    +2510            *expressions,
    +2511            instance=self,
    +2512            arg="group",
    +2513            append=append,
    +2514            copy=copy,
    +2515            prefix="GROUP BY",
    +2516            into=Group,
    +2517            dialect=dialect,
    +2518            **opts,
    +2519        )
     
    @@ -25033,45 +25292,45 @@ Otherwise, this flattens all the Group express
    -
    2510    def order_by(
    -2511        self,
    -2512        *expressions: t.Optional[ExpOrStr],
    -2513        append: bool = True,
    -2514        dialect: DialectType = None,
    -2515        copy: bool = True,
    -2516        **opts,
    -2517    ) -> Select:
    -2518        """
    -2519        Set the ORDER BY expression.
    -2520
    -2521        Example:
    -2522            >>> Select().from_("tbl").select("x").order_by("x DESC").sql()
    -2523            'SELECT x FROM tbl ORDER BY x DESC'
    -2524
    -2525        Args:
    -2526            *expressions: the SQL code strings to parse.
    -2527                If a `Group` instance is passed, this is used as-is.
    -2528                If another `Expression` instance is passed, it will be wrapped in a `Order`.
    -2529            append: if `True`, add to any existing expressions.
    -2530                Otherwise, this flattens all the `Order` expression into a single expression.
    -2531            dialect: the dialect used to parse the input expression.
    -2532            copy: if `False`, modify this expression instance in-place.
    -2533            opts: other options to use to parse the input expressions.
    -2534
    -2535        Returns:
    -2536            The modified Select expression.
    -2537        """
    -2538        return _apply_child_list_builder(
    -2539            *expressions,
    -2540            instance=self,
    -2541            arg="order",
    -2542            append=append,
    -2543            copy=copy,
    -2544            prefix="ORDER BY",
    -2545            into=Order,
    -2546            dialect=dialect,
    -2547            **opts,
    -2548        )
    +            
    2521    def order_by(
    +2522        self,
    +2523        *expressions: t.Optional[ExpOrStr],
    +2524        append: bool = True,
    +2525        dialect: DialectType = None,
    +2526        copy: bool = True,
    +2527        **opts,
    +2528    ) -> Select:
    +2529        """
    +2530        Set the ORDER BY expression.
    +2531
    +2532        Example:
    +2533            >>> Select().from_("tbl").select("x").order_by("x DESC").sql()
    +2534            'SELECT x FROM tbl ORDER BY x DESC'
    +2535
    +2536        Args:
    +2537            *expressions: the SQL code strings to parse.
    +2538                If a `Group` instance is passed, this is used as-is.
    +2539                If another `Expression` instance is passed, it will be wrapped in a `Order`.
    +2540            append: if `True`, add to any existing expressions.
    +2541                Otherwise, this flattens all the `Order` expression into a single expression.
    +2542            dialect: the dialect used to parse the input expression.
    +2543            copy: if `False`, modify this expression instance in-place.
    +2544            opts: other options to use to parse the input expressions.
    +2545
    +2546        Returns:
    +2547            The modified Select expression.
    +2548        """
    +2549        return _apply_child_list_builder(
    +2550            *expressions,
    +2551            instance=self,
    +2552            arg="order",
    +2553            append=append,
    +2554            copy=copy,
    +2555            prefix="ORDER BY",
    +2556            into=Order,
    +2557            dialect=dialect,
    +2558            **opts,
    +2559        )
     
    @@ -25120,45 +25379,45 @@ Otherwise, this flattens all the Order express
    -
    2550    def sort_by(
    -2551        self,
    -2552        *expressions: t.Optional[ExpOrStr],
    -2553        append: bool = True,
    -2554        dialect: DialectType = None,
    -2555        copy: bool = True,
    -2556        **opts,
    -2557    ) -> Select:
    -2558        """
    -2559        Set the SORT BY expression.
    -2560
    -2561        Example:
    -2562            >>> Select().from_("tbl").select("x").sort_by("x DESC").sql(dialect="hive")
    -2563            'SELECT x FROM tbl SORT BY x DESC'
    -2564
    -2565        Args:
    -2566            *expressions: the SQL code strings to parse.
    -2567                If a `Group` instance is passed, this is used as-is.
    -2568                If another `Expression` instance is passed, it will be wrapped in a `SORT`.
    -2569            append: if `True`, add to any existing expressions.
    -2570                Otherwise, this flattens all the `Order` expression into a single expression.
    -2571            dialect: the dialect used to parse the input expression.
    -2572            copy: if `False`, modify this expression instance in-place.
    -2573            opts: other options to use to parse the input expressions.
    -2574
    -2575        Returns:
    -2576            The modified Select expression.
    -2577        """
    -2578        return _apply_child_list_builder(
    -2579            *expressions,
    -2580            instance=self,
    -2581            arg="sort",
    -2582            append=append,
    -2583            copy=copy,
    -2584            prefix="SORT BY",
    -2585            into=Sort,
    -2586            dialect=dialect,
    -2587            **opts,
    -2588        )
    +            
    2561    def sort_by(
    +2562        self,
    +2563        *expressions: t.Optional[ExpOrStr],
    +2564        append: bool = True,
    +2565        dialect: DialectType = None,
    +2566        copy: bool = True,
    +2567        **opts,
    +2568    ) -> Select:
    +2569        """
    +2570        Set the SORT BY expression.
    +2571
    +2572        Example:
    +2573            >>> Select().from_("tbl").select("x").sort_by("x DESC").sql(dialect="hive")
    +2574            'SELECT x FROM tbl SORT BY x DESC'
    +2575
    +2576        Args:
    +2577            *expressions: the SQL code strings to parse.
    +2578                If a `Group` instance is passed, this is used as-is.
    +2579                If another `Expression` instance is passed, it will be wrapped in a `SORT`.
    +2580            append: if `True`, add to any existing expressions.
    +2581                Otherwise, this flattens all the `Order` expression into a single expression.
    +2582            dialect: the dialect used to parse the input expression.
    +2583            copy: if `False`, modify this expression instance in-place.
    +2584            opts: other options to use to parse the input expressions.
    +2585
    +2586        Returns:
    +2587            The modified Select expression.
    +2588        """
    +2589        return _apply_child_list_builder(
    +2590            *expressions,
    +2591            instance=self,
    +2592            arg="sort",
    +2593            append=append,
    +2594            copy=copy,
    +2595            prefix="SORT BY",
    +2596            into=Sort,
    +2597            dialect=dialect,
    +2598            **opts,
    +2599        )
     
    @@ -25207,45 +25466,45 @@ Otherwise, this flattens all the Order express
    -
    2590    def cluster_by(
    -2591        self,
    -2592        *expressions: t.Optional[ExpOrStr],
    -2593        append: bool = True,
    -2594        dialect: DialectType = None,
    -2595        copy: bool = True,
    -2596        **opts,
    -2597    ) -> Select:
    -2598        """
    -2599        Set the CLUSTER BY expression.
    -2600
    -2601        Example:
    -2602            >>> Select().from_("tbl").select("x").cluster_by("x DESC").sql(dialect="hive")
    -2603            'SELECT x FROM tbl CLUSTER BY x DESC'
    -2604
    -2605        Args:
    -2606            *expressions: the SQL code strings to parse.
    -2607                If a `Group` instance is passed, this is used as-is.
    -2608                If another `Expression` instance is passed, it will be wrapped in a `Cluster`.
    -2609            append: if `True`, add to any existing expressions.
    -2610                Otherwise, this flattens all the `Order` expression into a single expression.
    -2611            dialect: the dialect used to parse the input expression.
    -2612            copy: if `False`, modify this expression instance in-place.
    -2613            opts: other options to use to parse the input expressions.
    -2614
    -2615        Returns:
    -2616            The modified Select expression.
    -2617        """
    -2618        return _apply_child_list_builder(
    -2619            *expressions,
    -2620            instance=self,
    -2621            arg="cluster",
    -2622            append=append,
    -2623            copy=copy,
    -2624            prefix="CLUSTER BY",
    -2625            into=Cluster,
    -2626            dialect=dialect,
    -2627            **opts,
    -2628        )
    +            
    2601    def cluster_by(
    +2602        self,
    +2603        *expressions: t.Optional[ExpOrStr],
    +2604        append: bool = True,
    +2605        dialect: DialectType = None,
    +2606        copy: bool = True,
    +2607        **opts,
    +2608    ) -> Select:
    +2609        """
    +2610        Set the CLUSTER BY expression.
    +2611
    +2612        Example:
    +2613            >>> Select().from_("tbl").select("x").cluster_by("x DESC").sql(dialect="hive")
    +2614            'SELECT x FROM tbl CLUSTER BY x DESC'
    +2615
    +2616        Args:
    +2617            *expressions: the SQL code strings to parse.
    +2618                If a `Group` instance is passed, this is used as-is.
    +2619                If another `Expression` instance is passed, it will be wrapped in a `Cluster`.
    +2620            append: if `True`, add to any existing expressions.
    +2621                Otherwise, this flattens all the `Order` expression into a single expression.
    +2622            dialect: the dialect used to parse the input expression.
    +2623            copy: if `False`, modify this expression instance in-place.
    +2624            opts: other options to use to parse the input expressions.
    +2625
    +2626        Returns:
    +2627            The modified Select expression.
    +2628        """
    +2629        return _apply_child_list_builder(
    +2630            *expressions,
    +2631            instance=self,
    +2632            arg="cluster",
    +2633            append=append,
    +2634            copy=copy,
    +2635            prefix="CLUSTER BY",
    +2636            into=Cluster,
    +2637            dialect=dialect,
    +2638            **opts,
    +2639        )
     
    @@ -25294,38 +25553,38 @@ Otherwise, this flattens all the Order express
    -
    2630    def limit(
    -2631        self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts
    -2632    ) -> Select:
    -2633        """
    -2634        Set the LIMIT expression.
    -2635
    -2636        Example:
    -2637            >>> Select().from_("tbl").select("x").limit(10).sql()
    -2638            'SELECT x FROM tbl LIMIT 10'
    -2639
    -2640        Args:
    -2641            expression: the SQL code string to parse.
    -2642                This can also be an integer.
    -2643                If a `Limit` instance is passed, this is used as-is.
    -2644                If another `Expression` instance is passed, it will be wrapped in a `Limit`.
    -2645            dialect: the dialect used to parse the input expression.
    -2646            copy: if `False`, modify this expression instance in-place.
    -2647            opts: other options to use to parse the input expressions.
    -2648
    -2649        Returns:
    -2650            Select: the modified expression.
    -2651        """
    -2652        return _apply_builder(
    -2653            expression=expression,
    -2654            instance=self,
    -2655            arg="limit",
    -2656            into=Limit,
    -2657            prefix="LIMIT",
    -2658            dialect=dialect,
    -2659            copy=copy,
    -2660            **opts,
    -2661        )
    +            
    2641    def limit(
    +2642        self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts
    +2643    ) -> Select:
    +2644        """
    +2645        Set the LIMIT expression.
    +2646
    +2647        Example:
    +2648            >>> Select().from_("tbl").select("x").limit(10).sql()
    +2649            'SELECT x FROM tbl LIMIT 10'
    +2650
    +2651        Args:
    +2652            expression: the SQL code string to parse.
    +2653                This can also be an integer.
    +2654                If a `Limit` instance is passed, this is used as-is.
    +2655                If another `Expression` instance is passed, it will be wrapped in a `Limit`.
    +2656            dialect: the dialect used to parse the input expression.
    +2657            copy: if `False`, modify this expression instance in-place.
    +2658            opts: other options to use to parse the input expressions.
    +2659
    +2660        Returns:
    +2661            Select: the modified expression.
    +2662        """
    +2663        return _apply_builder(
    +2664            expression=expression,
    +2665            instance=self,
    +2666            arg="limit",
    +2667            into=Limit,
    +2668            prefix="LIMIT",
    +2669            dialect=dialect,
    +2670            copy=copy,
    +2671            **opts,
    +2672        )
     
    @@ -25373,38 +25632,38 @@ If another Expression instance is passed,
    -
    2663    def offset(
    -2664        self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts
    -2665    ) -> Select:
    -2666        """
    -2667        Set the OFFSET expression.
    -2668
    -2669        Example:
    -2670            >>> Select().from_("tbl").select("x").offset(10).sql()
    -2671            'SELECT x FROM tbl OFFSET 10'
    -2672
    -2673        Args:
    -2674            expression: the SQL code string to parse.
    -2675                This can also be an integer.
    -2676                If a `Offset` instance is passed, this is used as-is.
    -2677                If another `Expression` instance is passed, it will be wrapped in a `Offset`.
    -2678            dialect: the dialect used to parse the input expression.
    -2679            copy: if `False`, modify this expression instance in-place.
    -2680            opts: other options to use to parse the input expressions.
    -2681
    -2682        Returns:
    -2683            The modified Select expression.
    -2684        """
    -2685        return _apply_builder(
    -2686            expression=expression,
    -2687            instance=self,
    -2688            arg="offset",
    -2689            into=Offset,
    -2690            prefix="OFFSET",
    -2691            dialect=dialect,
    -2692            copy=copy,
    -2693            **opts,
    -2694        )
    +            
    2674    def offset(
    +2675        self, expression: ExpOrStr | int, dialect: DialectType = None, copy: bool = True, **opts
    +2676    ) -> Select:
    +2677        """
    +2678        Set the OFFSET expression.
    +2679
    +2680        Example:
    +2681            >>> Select().from_("tbl").select("x").offset(10).sql()
    +2682            'SELECT x FROM tbl OFFSET 10'
    +2683
    +2684        Args:
    +2685            expression: the SQL code string to parse.
    +2686                This can also be an integer.
    +2687                If a `Offset` instance is passed, this is used as-is.
    +2688                If another `Expression` instance is passed, it will be wrapped in a `Offset`.
    +2689            dialect: the dialect used to parse the input expression.
    +2690            copy: if `False`, modify this expression instance in-place.
    +2691            opts: other options to use to parse the input expressions.
    +2692
    +2693        Returns:
    +2694            The modified Select expression.
    +2695        """
    +2696        return _apply_builder(
    +2697            expression=expression,
    +2698            instance=self,
    +2699            arg="offset",
    +2700            into=Offset,
    +2701            prefix="OFFSET",
    +2702            dialect=dialect,
    +2703            copy=copy,
    +2704            **opts,
    +2705        )
     
    @@ -25452,42 +25711,42 @@ If another Expression instance is passed,
    -
    2696    def select(
    -2697        self,
    -2698        *expressions: t.Optional[ExpOrStr],
    -2699        append: bool = True,
    -2700        dialect: DialectType = None,
    -2701        copy: bool = True,
    -2702        **opts,
    -2703    ) -> Select:
    -2704        """
    -2705        Append to or set the SELECT expressions.
    -2706
    -2707        Example:
    -2708            >>> Select().select("x", "y").sql()
    -2709            'SELECT x, y'
    -2710
    -2711        Args:
    -2712            *expressions: the SQL code strings to parse.
    -2713                If an `Expression` instance is passed, it will be used as-is.
    -2714            append: if `True`, add to any existing expressions.
    -2715                Otherwise, this resets the expressions.
    -2716            dialect: the dialect used to parse the input expressions.
    -2717            copy: if `False`, modify this expression instance in-place.
    -2718            opts: other options to use to parse the input expressions.
    -2719
    -2720        Returns:
    -2721            The modified Select expression.
    -2722        """
    -2723        return _apply_list_builder(
    -2724            *expressions,
    -2725            instance=self,
    -2726            arg="expressions",
    -2727            append=append,
    -2728            dialect=dialect,
    -2729            copy=copy,
    -2730            **opts,
    -2731        )
    +            
    2707    def select(
    +2708        self,
    +2709        *expressions: t.Optional[ExpOrStr],
    +2710        append: bool = True,
    +2711        dialect: DialectType = None,
    +2712        copy: bool = True,
    +2713        **opts,
    +2714    ) -> Select:
    +2715        """
    +2716        Append to or set the SELECT expressions.
    +2717
    +2718        Example:
    +2719            >>> Select().select("x", "y").sql()
    +2720            'SELECT x, y'
    +2721
    +2722        Args:
    +2723            *expressions: the SQL code strings to parse.
    +2724                If an `Expression` instance is passed, it will be used as-is.
    +2725            append: if `True`, add to any existing expressions.
    +2726                Otherwise, this resets the expressions.
    +2727            dialect: the dialect used to parse the input expressions.
    +2728            copy: if `False`, modify this expression instance in-place.
    +2729            opts: other options to use to parse the input expressions.
    +2730
    +2731        Returns:
    +2732            The modified Select expression.
    +2733        """
    +2734        return _apply_list_builder(
    +2735            *expressions,
    +2736            instance=self,
    +2737            arg="expressions",
    +2738            append=append,
    +2739            dialect=dialect,
    +2740            copy=copy,
    +2741            **opts,
    +2742        )
     
    @@ -25535,44 +25794,44 @@ Otherwise, this resets the expressions.
    -
    2733    def lateral(
    -2734        self,
    -2735        *expressions: t.Optional[ExpOrStr],
    -2736        append: bool = True,
    -2737        dialect: DialectType = None,
    -2738        copy: bool = True,
    -2739        **opts,
    -2740    ) -> Select:
    -2741        """
    -2742        Append to or set the LATERAL expressions.
    -2743
    -2744        Example:
    -2745            >>> Select().select("x").lateral("OUTER explode(y) tbl2 AS z").from_("tbl").sql()
    -2746            'SELECT x FROM tbl LATERAL VIEW OUTER EXPLODE(y) tbl2 AS z'
    -2747
    -2748        Args:
    -2749            *expressions: the SQL code strings to parse.
    -2750                If an `Expression` instance is passed, it will be used as-is.
    -2751            append: if `True`, add to any existing expressions.
    -2752                Otherwise, this resets the expressions.
    -2753            dialect: the dialect used to parse the input expressions.
    -2754            copy: if `False`, modify this expression instance in-place.
    -2755            opts: other options to use to parse the input expressions.
    -2756
    -2757        Returns:
    -2758            The modified Select expression.
    -2759        """
    -2760        return _apply_list_builder(
    -2761            *expressions,
    -2762            instance=self,
    -2763            arg="laterals",
    -2764            append=append,
    -2765            into=Lateral,
    -2766            prefix="LATERAL VIEW",
    -2767            dialect=dialect,
    -2768            copy=copy,
    -2769            **opts,
    -2770        )
    +            
    2744    def lateral(
    +2745        self,
    +2746        *expressions: t.Optional[ExpOrStr],
    +2747        append: bool = True,
    +2748        dialect: DialectType = None,
    +2749        copy: bool = True,
    +2750        **opts,
    +2751    ) -> Select:
    +2752        """
    +2753        Append to or set the LATERAL expressions.
    +2754
    +2755        Example:
    +2756            >>> Select().select("x").lateral("OUTER explode(y) tbl2 AS z").from_("tbl").sql()
    +2757            'SELECT x FROM tbl LATERAL VIEW OUTER EXPLODE(y) tbl2 AS z'
    +2758
    +2759        Args:
    +2760            *expressions: the SQL code strings to parse.
    +2761                If an `Expression` instance is passed, it will be used as-is.
    +2762            append: if `True`, add to any existing expressions.
    +2763                Otherwise, this resets the expressions.
    +2764            dialect: the dialect used to parse the input expressions.
    +2765            copy: if `False`, modify this expression instance in-place.
    +2766            opts: other options to use to parse the input expressions.
    +2767
    +2768        Returns:
    +2769            The modified Select expression.
    +2770        """
    +2771        return _apply_list_builder(
    +2772            *expressions,
    +2773            instance=self,
    +2774            arg="laterals",
    +2775            append=append,
    +2776            into=Lateral,
    +2777            prefix="LATERAL VIEW",
    +2778            dialect=dialect,
    +2779            copy=copy,
    +2780            **opts,
    +2781        )
     
    @@ -25620,102 +25879,102 @@ Otherwise, this resets the expressions.
    -
    2772    def join(
    -2773        self,
    -2774        expression: ExpOrStr,
    -2775        on: t.Optional[ExpOrStr] = None,
    -2776        using: t.Optional[ExpOrStr | t.List[ExpOrStr]] = None,
    -2777        append: bool = True,
    -2778        join_type: t.Optional[str] = None,
    -2779        join_alias: t.Optional[Identifier | str] = None,
    -2780        dialect: DialectType = None,
    -2781        copy: bool = True,
    -2782        **opts,
    -2783    ) -> Select:
    -2784        """
    -2785        Append to or set the JOIN expressions.
    -2786
    -2787        Example:
    -2788            >>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y").sql()
    -2789            'SELECT * FROM tbl JOIN tbl2 ON tbl1.y = tbl2.y'
    -2790
    -2791            >>> Select().select("1").from_("a").join("b", using=["x", "y", "z"]).sql()
    -2792            'SELECT 1 FROM a JOIN b USING (x, y, z)'
    -2793
    -2794            Use `join_type` to change the type of join:
    -2795
    -2796            >>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y", join_type="left outer").sql()
    -2797            'SELECT * FROM tbl LEFT OUTER JOIN tbl2 ON tbl1.y = tbl2.y'
    -2798
    -2799        Args:
    -2800            expression: the SQL code string to parse.
    -2801                If an `Expression` instance is passed, it will be used as-is.
    -2802            on: optionally specify the join "on" criteria as a SQL string.
    -2803                If an `Expression` instance is passed, it will be used as-is.
    -2804            using: optionally specify the join "using" criteria as a SQL string.
    -2805                If an `Expression` instance is passed, it will be used as-is.
    -2806            append: if `True`, add to any existing expressions.
    -2807                Otherwise, this resets the expressions.
    -2808            join_type: if set, alter the parsed join type.
    -2809            join_alias: an optional alias for the joined source.
    -2810            dialect: the dialect used to parse the input expressions.
    -2811            copy: if `False`, modify this expression instance in-place.
    -2812            opts: other options to use to parse the input expressions.
    -2813
    -2814        Returns:
    -2815            Select: the modified expression.
    -2816        """
    -2817        parse_args: t.Dict[str, t.Any] = {"dialect": dialect, **opts}
    -2818
    -2819        try:
    -2820            expression = maybe_parse(expression, into=Join, prefix="JOIN", **parse_args)
    -2821        except ParseError:
    -2822            expression = maybe_parse(expression, into=(Join, Expression), **parse_args)
    -2823
    -2824        join = expression if isinstance(expression, Join) else Join(this=expression)
    -2825
    -2826        if isinstance(join.this, Select):
    -2827            join.this.replace(join.this.subquery())
    -2828
    -2829        if join_type:
    -2830            method: t.Optional[Token]
    -2831            side: t.Optional[Token]
    -2832            kind: t.Optional[Token]
    -2833
    -2834            method, side, kind = maybe_parse(join_type, into="JOIN_TYPE", **parse_args)  # type: ignore
    -2835
    -2836            if method:
    -2837                join.set("method", method.text)
    -2838            if side:
    -2839                join.set("side", side.text)
    -2840            if kind:
    -2841                join.set("kind", kind.text)
    -2842
    -2843        if on:
    -2844            on = and_(*ensure_list(on), dialect=dialect, copy=copy, **opts)
    -2845            join.set("on", on)
    +            
    2783    def join(
    +2784        self,
    +2785        expression: ExpOrStr,
    +2786        on: t.Optional[ExpOrStr] = None,
    +2787        using: t.Optional[ExpOrStr | t.List[ExpOrStr]] = None,
    +2788        append: bool = True,
    +2789        join_type: t.Optional[str] = None,
    +2790        join_alias: t.Optional[Identifier | str] = None,
    +2791        dialect: DialectType = None,
    +2792        copy: bool = True,
    +2793        **opts,
    +2794    ) -> Select:
    +2795        """
    +2796        Append to or set the JOIN expressions.
    +2797
    +2798        Example:
    +2799            >>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y").sql()
    +2800            'SELECT * FROM tbl JOIN tbl2 ON tbl1.y = tbl2.y'
    +2801
    +2802            >>> Select().select("1").from_("a").join("b", using=["x", "y", "z"]).sql()
    +2803            'SELECT 1 FROM a JOIN b USING (x, y, z)'
    +2804
    +2805            Use `join_type` to change the type of join:
    +2806
    +2807            >>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y", join_type="left outer").sql()
    +2808            'SELECT * FROM tbl LEFT OUTER JOIN tbl2 ON tbl1.y = tbl2.y'
    +2809
    +2810        Args:
    +2811            expression: the SQL code string to parse.
    +2812                If an `Expression` instance is passed, it will be used as-is.
    +2813            on: optionally specify the join "on" criteria as a SQL string.
    +2814                If an `Expression` instance is passed, it will be used as-is.
    +2815            using: optionally specify the join "using" criteria as a SQL string.
    +2816                If an `Expression` instance is passed, it will be used as-is.
    +2817            append: if `True`, add to any existing expressions.
    +2818                Otherwise, this resets the expressions.
    +2819            join_type: if set, alter the parsed join type.
    +2820            join_alias: an optional alias for the joined source.
    +2821            dialect: the dialect used to parse the input expressions.
    +2822            copy: if `False`, modify this expression instance in-place.
    +2823            opts: other options to use to parse the input expressions.
    +2824
    +2825        Returns:
    +2826            Select: the modified expression.
    +2827        """
    +2828        parse_args: t.Dict[str, t.Any] = {"dialect": dialect, **opts}
    +2829
    +2830        try:
    +2831            expression = maybe_parse(expression, into=Join, prefix="JOIN", **parse_args)
    +2832        except ParseError:
    +2833            expression = maybe_parse(expression, into=(Join, Expression), **parse_args)
    +2834
    +2835        join = expression if isinstance(expression, Join) else Join(this=expression)
    +2836
    +2837        if isinstance(join.this, Select):
    +2838            join.this.replace(join.this.subquery())
    +2839
    +2840        if join_type:
    +2841            method: t.Optional[Token]
    +2842            side: t.Optional[Token]
    +2843            kind: t.Optional[Token]
    +2844
    +2845            method, side, kind = maybe_parse(join_type, into="JOIN_TYPE", **parse_args)  # type: ignore
     2846
    -2847        if using:
    -2848            join = _apply_list_builder(
    -2849                *ensure_list(using),
    -2850                instance=join,
    -2851                arg="using",
    -2852                append=append,
    -2853                copy=copy,
    -2854                **opts,
    -2855            )
    -2856
    -2857        if join_alias:
    -2858            join.set("this", alias_(join.this, join_alias, table=True))
    -2859
    -2860        return _apply_list_builder(
    -2861            join,
    -2862            instance=self,
    -2863            arg="joins",
    -2864            append=append,
    -2865            copy=copy,
    -2866            **opts,
    -2867        )
    +2847            if method:
    +2848                join.set("method", method.text)
    +2849            if side:
    +2850                join.set("side", side.text)
    +2851            if kind:
    +2852                join.set("kind", kind.text)
    +2853
    +2854        if on:
    +2855            on = and_(*ensure_list(on), dialect=dialect, copy=copy, **opts)
    +2856            join.set("on", on)
    +2857
    +2858        if using:
    +2859            join = _apply_list_builder(
    +2860                *ensure_list(using),
    +2861                instance=join,
    +2862                arg="using",
    +2863                append=append,
    +2864                copy=copy,
    +2865                **opts,
    +2866            )
    +2867
    +2868        if join_alias:
    +2869            join.set("this", alias_(join.this, join_alias, table=True))
    +2870
    +2871        return _apply_list_builder(
    +2872            join,
    +2873            instance=self,
    +2874            arg="joins",
    +2875            append=append,
    +2876            copy=copy,
    +2877            **opts,
    +2878        )
     
    @@ -25783,44 +26042,44 @@ Otherwise, this resets the expressions.
    -
    2869    def where(
    -2870        self,
    -2871        *expressions: t.Optional[ExpOrStr],
    -2872        append: bool = True,
    -2873        dialect: DialectType = None,
    -2874        copy: bool = True,
    -2875        **opts,
    -2876    ) -> Select:
    -2877        """
    -2878        Append to or set the WHERE expressions.
    -2879
    -2880        Example:
    -2881            >>> Select().select("x").from_("tbl").where("x = 'a' OR x < 'b'").sql()
    -2882            "SELECT x FROM tbl WHERE x = 'a' OR x < 'b'"
    -2883
    -2884        Args:
    -2885            *expressions: the SQL code strings to parse.
    -2886                If an `Expression` instance is passed, it will be used as-is.
    -2887                Multiple expressions are combined with an AND operator.
    -2888            append: if `True`, AND the new expressions to any existing expression.
    -2889                Otherwise, this resets the expression.
    -2890            dialect: the dialect used to parse the input expressions.
    -2891            copy: if `False`, modify this expression instance in-place.
    -2892            opts: other options to use to parse the input expressions.
    -2893
    -2894        Returns:
    -2895            Select: the modified expression.
    -2896        """
    -2897        return _apply_conjunction_builder(
    -2898            *expressions,
    -2899            instance=self,
    -2900            arg="where",
    -2901            append=append,
    -2902            into=Where,
    -2903            dialect=dialect,
    -2904            copy=copy,
    -2905            **opts,
    -2906        )
    +            
    2880    def where(
    +2881        self,
    +2882        *expressions: t.Optional[ExpOrStr],
    +2883        append: bool = True,
    +2884        dialect: DialectType = None,
    +2885        copy: bool = True,
    +2886        **opts,
    +2887    ) -> Select:
    +2888        """
    +2889        Append to or set the WHERE expressions.
    +2890
    +2891        Example:
    +2892            >>> Select().select("x").from_("tbl").where("x = 'a' OR x < 'b'").sql()
    +2893            "SELECT x FROM tbl WHERE x = 'a' OR x < 'b'"
    +2894
    +2895        Args:
    +2896            *expressions: the SQL code strings to parse.
    +2897                If an `Expression` instance is passed, it will be used as-is.
    +2898                Multiple expressions are combined with an AND operator.
    +2899            append: if `True`, AND the new expressions to any existing expression.
    +2900                Otherwise, this resets the expression.
    +2901            dialect: the dialect used to parse the input expressions.
    +2902            copy: if `False`, modify this expression instance in-place.
    +2903            opts: other options to use to parse the input expressions.
    +2904
    +2905        Returns:
    +2906            Select: the modified expression.
    +2907        """
    +2908        return _apply_conjunction_builder(
    +2909            *expressions,
    +2910            instance=self,
    +2911            arg="where",
    +2912            append=append,
    +2913            into=Where,
    +2914            dialect=dialect,
    +2915            copy=copy,
    +2916            **opts,
    +2917        )
     
    @@ -25869,44 +26128,44 @@ Otherwise, this resets the expression.
    -
    2908    def having(
    -2909        self,
    -2910        *expressions: t.Optional[ExpOrStr],
    -2911        append: bool = True,
    -2912        dialect: DialectType = None,
    -2913        copy: bool = True,
    -2914        **opts,
    -2915    ) -> Select:
    -2916        """
    -2917        Append to or set the HAVING expressions.
    -2918
    -2919        Example:
    -2920            >>> Select().select("x", "COUNT(y)").from_("tbl").group_by("x").having("COUNT(y) > 3").sql()
    -2921            'SELECT x, COUNT(y) FROM tbl GROUP BY x HAVING COUNT(y) > 3'
    -2922
    -2923        Args:
    -2924            *expressions: the SQL code strings to parse.
    -2925                If an `Expression` instance is passed, it will be used as-is.
    -2926                Multiple expressions are combined with an AND operator.
    -2927            append: if `True`, AND the new expressions to any existing expression.
    -2928                Otherwise, this resets the expression.
    -2929            dialect: the dialect used to parse the input expressions.
    -2930            copy: if `False`, modify this expression instance in-place.
    -2931            opts: other options to use to parse the input expressions.
    -2932
    -2933        Returns:
    -2934            The modified Select expression.
    -2935        """
    -2936        return _apply_conjunction_builder(
    -2937            *expressions,
    -2938            instance=self,
    -2939            arg="having",
    -2940            append=append,
    -2941            into=Having,
    -2942            dialect=dialect,
    -2943            copy=copy,
    -2944            **opts,
    -2945        )
    +            
    2919    def having(
    +2920        self,
    +2921        *expressions: t.Optional[ExpOrStr],
    +2922        append: bool = True,
    +2923        dialect: DialectType = None,
    +2924        copy: bool = True,
    +2925        **opts,
    +2926    ) -> Select:
    +2927        """
    +2928        Append to or set the HAVING expressions.
    +2929
    +2930        Example:
    +2931            >>> Select().select("x", "COUNT(y)").from_("tbl").group_by("x").having("COUNT(y) > 3").sql()
    +2932            'SELECT x, COUNT(y) FROM tbl GROUP BY x HAVING COUNT(y) > 3'
    +2933
    +2934        Args:
    +2935            *expressions: the SQL code strings to parse.
    +2936                If an `Expression` instance is passed, it will be used as-is.
    +2937                Multiple expressions are combined with an AND operator.
    +2938            append: if `True`, AND the new expressions to any existing expression.
    +2939                Otherwise, this resets the expression.
    +2940            dialect: the dialect used to parse the input expressions.
    +2941            copy: if `False`, modify this expression instance in-place.
    +2942            opts: other options to use to parse the input expressions.
    +2943
    +2944        Returns:
    +2945            The modified Select expression.
    +2946        """
    +2947        return _apply_conjunction_builder(
    +2948            *expressions,
    +2949            instance=self,
    +2950            arg="having",
    +2951            append=append,
    +2952            into=Having,
    +2953            dialect=dialect,
    +2954            copy=copy,
    +2955            **opts,
    +2956        )
     
    @@ -25955,24 +26214,24 @@ Otherwise, this resets the expression.
    -
    2947    def window(
    -2948        self,
    -2949        *expressions: t.Optional[ExpOrStr],
    -2950        append: bool = True,
    -2951        dialect: DialectType = None,
    -2952        copy: bool = True,
    -2953        **opts,
    -2954    ) -> Select:
    -2955        return _apply_list_builder(
    -2956            *expressions,
    -2957            instance=self,
    -2958            arg="windows",
    -2959            append=append,
    -2960            into=Window,
    -2961            dialect=dialect,
    -2962            copy=copy,
    -2963            **opts,
    -2964        )
    +            
    2958    def window(
    +2959        self,
    +2960        *expressions: t.Optional[ExpOrStr],
    +2961        append: bool = True,
    +2962        dialect: DialectType = None,
    +2963        copy: bool = True,
    +2964        **opts,
    +2965    ) -> Select:
    +2966        return _apply_list_builder(
    +2967            *expressions,
    +2968            instance=self,
    +2969            arg="windows",
    +2970            append=append,
    +2971            into=Window,
    +2972            dialect=dialect,
    +2973            copy=copy,
    +2974            **opts,
    +2975        )
     
    @@ -25990,24 +26249,24 @@ Otherwise, this resets the expression.
    -
    2966    def qualify(
    -2967        self,
    -2968        *expressions: t.Optional[ExpOrStr],
    -2969        append: bool = True,
    -2970        dialect: DialectType = None,
    -2971        copy: bool = True,
    -2972        **opts,
    -2973    ) -> Select:
    -2974        return _apply_conjunction_builder(
    -2975            *expressions,
    -2976            instance=self,
    -2977            arg="qualify",
    -2978            append=append,
    -2979            into=Qualify,
    -2980            dialect=dialect,
    -2981            copy=copy,
    -2982            **opts,
    -2983        )
    +            
    2977    def qualify(
    +2978        self,
    +2979        *expressions: t.Optional[ExpOrStr],
    +2980        append: bool = True,
    +2981        dialect: DialectType = None,
    +2982        copy: bool = True,
    +2983        **opts,
    +2984    ) -> Select:
    +2985        return _apply_conjunction_builder(
    +2986            *expressions,
    +2987            instance=self,
    +2988            arg="qualify",
    +2989            append=append,
    +2990            into=Qualify,
    +2991            dialect=dialect,
    +2992            copy=copy,
    +2993            **opts,
    +2994        )
     
    @@ -26025,28 +26284,28 @@ Otherwise, this resets the expression.
    -
    2985    def distinct(
    -2986        self, *ons: t.Optional[ExpOrStr], distinct: bool = True, copy: bool = True
    -2987    ) -> Select:
    -2988        """
    -2989        Set the OFFSET expression.
    -2990
    -2991        Example:
    -2992            >>> Select().from_("tbl").select("x").distinct().sql()
    -2993            'SELECT DISTINCT x FROM tbl'
    -2994
    -2995        Args:
    -2996            ons: the expressions to distinct on
    -2997            distinct: whether the Select should be distinct
    -2998            copy: if `False`, modify this expression instance in-place.
    -2999
    -3000        Returns:
    -3001            Select: the modified expression.
    -3002        """
    -3003        instance = _maybe_copy(self, copy)
    -3004        on = Tuple(expressions=[maybe_parse(on, copy=copy) for on in ons if on]) if ons else None
    -3005        instance.set("distinct", Distinct(on=on) if distinct else None)
    -3006        return instance
    +            
    2996    def distinct(
    +2997        self, *ons: t.Optional[ExpOrStr], distinct: bool = True, copy: bool = True
    +2998    ) -> Select:
    +2999        """
    +3000        Set the OFFSET expression.
    +3001
    +3002        Example:
    +3003            >>> Select().from_("tbl").select("x").distinct().sql()
    +3004            'SELECT DISTINCT x FROM tbl'
    +3005
    +3006        Args:
    +3007            ons: the expressions to distinct on
    +3008            distinct: whether the Select should be distinct
    +3009            copy: if `False`, modify this expression instance in-place.
    +3010
    +3011        Returns:
    +3012            Select: the modified expression.
    +3013        """
    +3014        instance = _maybe_copy(self, copy)
    +3015        on = Tuple(expressions=[maybe_parse(on, copy=copy) for on in ons if on]) if ons else None
    +3016        instance.set("distinct", Distinct(on=on) if distinct else None)
    +3017        return instance
     
    @@ -26090,49 +26349,49 @@ Otherwise, this resets the expression.
    -
    3008    def ctas(
    -3009        self,
    -3010        table: ExpOrStr,
    -3011        properties: t.Optional[t.Dict] = None,
    -3012        dialect: DialectType = None,
    -3013        copy: bool = True,
    -3014        **opts,
    -3015    ) -> Create:
    -3016        """
    -3017        Convert this expression to a CREATE TABLE AS statement.
    -3018
    -3019        Example:
    -3020            >>> Select().select("*").from_("tbl").ctas("x").sql()
    -3021            'CREATE TABLE x AS SELECT * FROM tbl'
    -3022
    -3023        Args:
    -3024            table: the SQL code string to parse as the table name.
    -3025                If another `Expression` instance is passed, it will be used as-is.
    -3026            properties: an optional mapping of table properties
    -3027            dialect: the dialect used to parse the input table.
    -3028            copy: if `False`, modify this expression instance in-place.
    -3029            opts: other options to use to parse the input table.
    -3030
    -3031        Returns:
    -3032            The new Create expression.
    -3033        """
    -3034        instance = _maybe_copy(self, copy)
    -3035        table_expression = maybe_parse(
    -3036            table,
    -3037            into=Table,
    -3038            dialect=dialect,
    -3039            **opts,
    -3040        )
    -3041        properties_expression = None
    -3042        if properties:
    -3043            properties_expression = Properties.from_dict(properties)
    -3044
    -3045        return Create(
    -3046            this=table_expression,
    -3047            kind="table",
    -3048            expression=instance,
    -3049            properties=properties_expression,
    -3050        )
    +            
    3019    def ctas(
    +3020        self,
    +3021        table: ExpOrStr,
    +3022        properties: t.Optional[t.Dict] = None,
    +3023        dialect: DialectType = None,
    +3024        copy: bool = True,
    +3025        **opts,
    +3026    ) -> Create:
    +3027        """
    +3028        Convert this expression to a CREATE TABLE AS statement.
    +3029
    +3030        Example:
    +3031            >>> Select().select("*").from_("tbl").ctas("x").sql()
    +3032            'CREATE TABLE x AS SELECT * FROM tbl'
    +3033
    +3034        Args:
    +3035            table: the SQL code string to parse as the table name.
    +3036                If another `Expression` instance is passed, it will be used as-is.
    +3037            properties: an optional mapping of table properties
    +3038            dialect: the dialect used to parse the input table.
    +3039            copy: if `False`, modify this expression instance in-place.
    +3040            opts: other options to use to parse the input table.
    +3041
    +3042        Returns:
    +3043            The new Create expression.
    +3044        """
    +3045        instance = _maybe_copy(self, copy)
    +3046        table_expression = maybe_parse(
    +3047            table,
    +3048            into=Table,
    +3049            dialect=dialect,
    +3050            **opts,
    +3051        )
    +3052        properties_expression = None
    +3053        if properties:
    +3054            properties_expression = Properties.from_dict(properties)
    +3055
    +3056        return Create(
    +3057            this=table_expression,
    +3058            kind="table",
    +3059            expression=instance,
    +3060            properties=properties_expression,
    +3061        )
     
    @@ -26179,29 +26438,28 @@ If another Expression instance is passed,
    -
    3052    def lock(self, update: bool = True, copy: bool = True) -> Select:
    -3053        """
    -3054        Set the locking read mode for this expression.
    -3055
    -3056        Examples:
    -3057            >>> Select().select("x").from_("tbl").where("x = 'a'").lock().sql("mysql")
    -3058            "SELECT x FROM tbl WHERE x = 'a' FOR UPDATE"
    -3059
    -3060            >>> Select().select("x").from_("tbl").where("x = 'a'").lock(update=False).sql("mysql")
    -3061            "SELECT x FROM tbl WHERE x = 'a' FOR SHARE"
    -3062
    -3063        Args:
    -3064            update: if `True`, the locking type will be `FOR UPDATE`, else it will be `FOR SHARE`.
    -3065            copy: if `False`, modify this expression instance in-place.
    +            
    3063    def lock(self, update: bool = True, copy: bool = True) -> Select:
    +3064        """
    +3065        Set the locking read mode for this expression.
     3066
    -3067        Returns:
    -3068            The modified expression.
    -3069        """
    +3067        Examples:
    +3068            >>> Select().select("x").from_("tbl").where("x = 'a'").lock().sql("mysql")
    +3069            "SELECT x FROM tbl WHERE x = 'a' FOR UPDATE"
     3070
    -3071        inst = _maybe_copy(self, copy)
    -3072        inst.set("locks", [Lock(update=update)])
    +3071            >>> Select().select("x").from_("tbl").where("x = 'a'").lock(update=False).sql("mysql")
    +3072            "SELECT x FROM tbl WHERE x = 'a' FOR SHARE"
     3073
    -3074        return inst
    +3074        Args:
    +3075            update: if `True`, the locking type will be `FOR UPDATE`, else it will be `FOR SHARE`.
    +3076            copy: if `False`, modify this expression instance in-place.
    +3077
    +3078        Returns:
    +3079            The modified expression.
    +3080        """
    +3081        inst = _maybe_copy(self, copy)
    +3082        inst.set("locks", [Lock(update=update)])
    +3083
    +3084        return inst
     
    @@ -26232,6 +26490,73 @@ If another Expression instance is passed,
    Returns:
    +
    +

    The modified expression.

    +
    +
    + + + +
    + +
    + + def + hint( self, *hints: Union[str, sqlglot.expressions.Expression], dialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None, copy: bool = True) -> sqlglot.expressions.Select: + + + +
    + +
    3086    def hint(self, *hints: ExpOrStr, dialect: DialectType = None, copy: bool = True) -> Select:
    +3087        """
    +3088        Set hints for this expression.
    +3089
    +3090        Examples:
    +3091            >>> Select().select("x").from_("tbl").hint("BROADCAST(y)").sql(dialect="spark")
    +3092            'SELECT /*+ BROADCAST(y) */ x FROM tbl'
    +3093
    +3094        Args:
    +3095            hints: The SQL code strings to parse as the hints.
    +3096                If an `Expression` instance is passed, it will be used as-is.
    +3097            dialect: The dialect used to parse the hints.
    +3098            copy: If `False`, modify this expression instance in-place.
    +3099
    +3100        Returns:
    +3101            The modified expression.
    +3102        """
    +3103        inst = _maybe_copy(self, copy)
    +3104        inst.set(
    +3105            "hint", Hint(expressions=[maybe_parse(h, copy=copy, dialect=dialect) for h in hints])
    +3106        )
    +3107
    +3108        return inst
    +
    + + +

    Set hints for this expression.

    + +
    Examples:
    + +
    +
    +
    >>> Select().select("x").from_("tbl").hint("BROADCAST(y)").sql(dialect="spark")
    +'SELECT /*+ BROADCAST(y) */ x FROM tbl'
    +
    +
    +
    + +
    Arguments:
    + +
      +
    • hints: The SQL code strings to parse as the hints. +If an Expression instance is passed, it will be used as-is.
    • +
    • dialect: The dialect used to parse the hints.
    • +
    • copy: If False, modify this expression instance in-place.
    • +
    + +
    Returns:
    +

    The modified expression.

    @@ -26320,30 +26645,30 @@ If another Expression instance is passed,
    -
    3089class Subquery(DerivedTable, Unionable):
    -3090    arg_types = {
    -3091        "this": True,
    -3092        "alias": False,
    -3093        "with": False,
    -3094        **QUERY_MODIFIERS,
    -3095    }
    -3096
    -3097    def unnest(self):
    -3098        """
    -3099        Returns the first non subquery.
    -3100        """
    -3101        expression = self
    -3102        while isinstance(expression, Subquery):
    -3103            expression = expression.this
    -3104        return expression
    -3105
    -3106    @property
    -3107    def is_star(self) -> bool:
    -3108        return self.this.is_star
    -3109
    -3110    @property
    -3111    def output_name(self) -> str:
    -3112        return self.alias
    +            
    3123class Subquery(DerivedTable, Unionable):
    +3124    arg_types = {
    +3125        "this": True,
    +3126        "alias": False,
    +3127        "with": False,
    +3128        **QUERY_MODIFIERS,
    +3129    }
    +3130
    +3131    def unnest(self):
    +3132        """
    +3133        Returns the first non subquery.
    +3134        """
    +3135        expression = self
    +3136        while isinstance(expression, Subquery):
    +3137            expression = expression.this
    +3138        return expression
    +3139
    +3140    @property
    +3141    def is_star(self) -> bool:
    +3142        return self.this.is_star
    +3143
    +3144    @property
    +3145    def output_name(self) -> str:
    +3146        return self.alias
     
    @@ -26360,14 +26685,14 @@ If another Expression instance is passed,
    -
    3097    def unnest(self):
    -3098        """
    -3099        Returns the first non subquery.
    -3100        """
    -3101        expression = self
    -3102        while isinstance(expression, Subquery):
    -3103            expression = expression.this
    -3104        return expression
    +            
    3131    def unnest(self):
    +3132        """
    +3133        Returns the first non subquery.
    +3134        """
    +3135        expression = self
    +3136        while isinstance(expression, Subquery):
    +3137            expression = expression.this
    +3138        return expression
     
    @@ -26480,19 +26805,19 @@ If another Expression instance is passed,
    -
    3115class TableSample(Expression):
    -3116    arg_types = {
    -3117        "this": False,
    -3118        "method": False,
    -3119        "bucket_numerator": False,
    -3120        "bucket_denominator": False,
    -3121        "bucket_field": False,
    -3122        "percent": False,
    -3123        "rows": False,
    -3124        "size": False,
    -3125        "seed": False,
    -3126        "kind": False,
    -3127    }
    +            
    3149class TableSample(Expression):
    +3150    arg_types = {
    +3151        "this": False,
    +3152        "method": False,
    +3153        "bucket_numerator": False,
    +3154        "bucket_denominator": False,
    +3155        "bucket_field": False,
    +3156        "percent": False,
    +3157        "rows": False,
    +3158        "size": False,
    +3159        "seed": False,
    +3160        "kind": False,
    +3161    }
     
    @@ -26556,14 +26881,14 @@ If another Expression instance is passed,
    -
    3130class Tag(Expression):
    -3131    """Tags are used for generating arbitrary sql like SELECT <span>x</span>."""
    -3132
    -3133    arg_types = {
    -3134        "this": False,
    -3135        "prefix": False,
    -3136        "postfix": False,
    -3137    }
    +            
    3164class Tag(Expression):
    +3165    """Tags are used for generating arbitrary sql like SELECT <span>x</span>."""
    +3166
    +3167    arg_types = {
    +3168        "this": False,
    +3169        "prefix": False,
    +3170        "postfix": False,
    +3171    }
     
    @@ -26629,17 +26954,17 @@ If another Expression instance is passed,
    -
    3142class Pivot(Expression):
    -3143    arg_types = {
    -3144        "this": False,
    -3145        "alias": False,
    -3146        "expressions": True,
    -3147        "field": False,
    -3148        "unpivot": False,
    -3149        "using": False,
    -3150        "group": False,
    -3151        "columns": False,
    -3152    }
    +            
    3176class Pivot(Expression):
    +3177    arg_types = {
    +3178        "this": False,
    +3179        "alias": False,
    +3180        "expressions": True,
    +3181        "field": False,
    +3182        "unpivot": False,
    +3183        "using": False,
    +3184        "group": False,
    +3185        "columns": False,
    +3186    }
     
    @@ -26703,16 +27028,16 @@ If another Expression instance is passed,
    -
    3155class Window(Expression):
    -3156    arg_types = {
    -3157        "this": True,
    -3158        "partition_by": False,
    -3159        "order": False,
    -3160        "spec": False,
    -3161        "alias": False,
    -3162        "over": False,
    -3163        "first": False,
    -3164    }
    +            
    3189class Window(Expression):
    +3190    arg_types = {
    +3191        "this": True,
    +3192        "partition_by": False,
    +3193        "order": False,
    +3194        "spec": False,
    +3195        "alias": False,
    +3196        "over": False,
    +3197        "first": False,
    +3198    }
     
    @@ -26776,14 +27101,14 @@ If another Expression instance is passed,
    -
    3167class WindowSpec(Expression):
    -3168    arg_types = {
    -3169        "kind": False,
    -3170        "start": False,
    -3171        "start_side": False,
    -3172        "end": False,
    -3173        "end_side": False,
    -3174    }
    +            
    3201class WindowSpec(Expression):
    +3202    arg_types = {
    +3203        "kind": False,
    +3204        "start": False,
    +3205        "start_side": False,
    +3206        "end": False,
    +3207        "end_side": False,
    +3208    }
     
    @@ -26847,8 +27172,8 @@ If another Expression instance is passed,
    -
    3177class Where(Expression):
    -3178    pass
    +            
    3211class Where(Expression):
    +3212    pass
     
    @@ -26912,16 +27237,16 @@ If another Expression instance is passed,
    -
    3181class Star(Expression):
    -3182    arg_types = {"except": False, "replace": False}
    -3183
    -3184    @property
    -3185    def name(self) -> str:
    -3186        return "*"
    -3187
    -3188    @property
    -3189    def output_name(self) -> str:
    -3190        return self.name
    +            
    3215class Star(Expression):
    +3216    arg_types = {"except": False, "replace": False}
    +3217
    +3218    @property
    +3219    def name(self) -> str:
    +3220        return "*"
    +3221
    +3222    @property
    +3223    def output_name(self) -> str:
    +3224        return self.name
     
    @@ -27014,8 +27339,8 @@ If another Expression instance is passed,
    -
    3193class Parameter(Expression):
    -3194    arg_types = {"this": True, "wrapped": False}
    +            
    3227class Parameter(Expression):
    +3228    arg_types = {"this": True, "wrapped": False}
     
    @@ -27079,8 +27404,8 @@ If another Expression instance is passed,
    -
    3197class SessionParameter(Expression):
    -3198    arg_types = {"this": True, "kind": False}
    +            
    3231class SessionParameter(Expression):
    +3232    arg_types = {"this": True, "kind": False}
     
    @@ -27144,8 +27469,8 @@ If another Expression instance is passed,
    -
    3201class Placeholder(Expression):
    -3202    arg_types = {"this": False, "kind": False}
    +            
    3235class Placeholder(Expression):
    +3236    arg_types = {"this": False, "kind": False}
     
    @@ -27209,12 +27534,12 @@ If another Expression instance is passed,
    -
    3205class Null(Condition):
    -3206    arg_types: t.Dict[str, t.Any] = {}
    -3207
    -3208    @property
    -3209    def name(self) -> str:
    -3210        return "NULL"
    +            
    3239class Null(Condition):
    +3240    arg_types: t.Dict[str, t.Any] = {}
    +3241
    +3242    @property
    +3243    def name(self) -> str:
    +3244        return "NULL"
     
    @@ -27293,8 +27618,8 @@ If another Expression instance is passed,
    -
    3213class Boolean(Condition):
    -3214    pass
    +            
    3247class Boolean(Condition):
    +3248    pass
     
    @@ -27373,8 +27698,8 @@ If another Expression instance is passed,
    -
    3217class DataTypeSize(Expression):
    -3218    arg_types = {"this": True, "expression": False}
    +            
    3251class DataTypeSize(Expression):
    +3252    arg_types = {"this": True, "expression": False}
     
    @@ -27438,150 +27763,153 @@ If another Expression instance is passed,
    -
    3221class DataType(Expression):
    -3222    arg_types = {
    -3223        "this": True,
    -3224        "expressions": False,
    -3225        "nested": False,
    -3226        "values": False,
    -3227        "prefix": False,
    -3228    }
    -3229
    -3230    class Type(AutoName):
    -3231        ARRAY = auto()
    -3232        BIGDECIMAL = auto()
    -3233        BIGINT = auto()
    -3234        BIGSERIAL = auto()
    -3235        BINARY = auto()
    -3236        BIT = auto()
    -3237        BOOLEAN = auto()
    -3238        CHAR = auto()
    -3239        DATE = auto()
    -3240        DATETIME = auto()
    -3241        DATETIME64 = auto()
    -3242        INT4RANGE = auto()
    -3243        INT4MULTIRANGE = auto()
    -3244        INT8RANGE = auto()
    -3245        INT8MULTIRANGE = auto()
    -3246        NUMRANGE = auto()
    -3247        NUMMULTIRANGE = auto()
    -3248        TSRANGE = auto()
    -3249        TSMULTIRANGE = auto()
    -3250        TSTZRANGE = auto()
    -3251        TSTZMULTIRANGE = auto()
    -3252        DATERANGE = auto()
    -3253        DATEMULTIRANGE = auto()
    -3254        DECIMAL = auto()
    -3255        DOUBLE = auto()
    -3256        FLOAT = auto()
    -3257        GEOGRAPHY = auto()
    -3258        GEOMETRY = auto()
    -3259        HLLSKETCH = auto()
    -3260        HSTORE = auto()
    -3261        IMAGE = auto()
    -3262        INET = auto()
    -3263        INT = auto()
    -3264        INT128 = auto()
    -3265        INT256 = auto()
    -3266        INTERVAL = auto()
    -3267        JSON = auto()
    -3268        JSONB = auto()
    -3269        LONGBLOB = auto()
    -3270        LONGTEXT = auto()
    -3271        MAP = auto()
    -3272        MEDIUMBLOB = auto()
    -3273        MEDIUMTEXT = auto()
    -3274        MONEY = auto()
    -3275        NCHAR = auto()
    -3276        NULL = auto()
    -3277        NULLABLE = auto()
    -3278        NVARCHAR = auto()
    -3279        OBJECT = auto()
    -3280        ROWVERSION = auto()
    -3281        SERIAL = auto()
    -3282        SMALLINT = auto()
    -3283        SMALLMONEY = auto()
    -3284        SMALLSERIAL = auto()
    -3285        STRUCT = auto()
    -3286        SUPER = auto()
    -3287        TEXT = auto()
    -3288        TIME = auto()
    -3289        TIMESTAMP = auto()
    -3290        TIMESTAMPTZ = auto()
    -3291        TIMESTAMPLTZ = auto()
    -3292        TINYINT = auto()
    -3293        UBIGINT = auto()
    -3294        UINT = auto()
    -3295        USMALLINT = auto()
    -3296        UTINYINT = auto()
    -3297        UNKNOWN = auto()  # Sentinel value, useful for type annotation
    -3298        UINT128 = auto()
    -3299        UINT256 = auto()
    -3300        UNIQUEIDENTIFIER = auto()
    -3301        UUID = auto()
    -3302        VARBINARY = auto()
    -3303        VARCHAR = auto()
    -3304        VARIANT = auto()
    -3305        XML = auto()
    -3306
    -3307    TEXT_TYPES = {
    -3308        Type.CHAR,
    -3309        Type.NCHAR,
    -3310        Type.VARCHAR,
    -3311        Type.NVARCHAR,
    -3312        Type.TEXT,
    -3313    }
    -3314
    -3315    INTEGER_TYPES = {
    -3316        Type.INT,
    -3317        Type.TINYINT,
    -3318        Type.SMALLINT,
    -3319        Type.BIGINT,
    -3320        Type.INT128,
    -3321        Type.INT256,
    -3322    }
    -3323
    -3324    FLOAT_TYPES = {
    -3325        Type.FLOAT,
    -3326        Type.DOUBLE,
    -3327    }
    -3328
    -3329    NUMERIC_TYPES = {*INTEGER_TYPES, *FLOAT_TYPES}
    -3330
    -3331    TEMPORAL_TYPES = {
    -3332        Type.TIMESTAMP,
    -3333        Type.TIMESTAMPTZ,
    -3334        Type.TIMESTAMPLTZ,
    -3335        Type.DATE,
    -3336        Type.DATETIME,
    -3337        Type.DATETIME64,
    -3338    }
    -3339
    -3340    @classmethod
    -3341    def build(
    -3342        cls, dtype: str | DataType | DataType.Type, dialect: DialectType = None, **kwargs
    -3343    ) -> DataType:
    -3344        from sqlglot import parse_one
    -3345
    -3346        if isinstance(dtype, str):
    -3347            if dtype.upper() in cls.Type.__members__:
    -3348                data_type_exp: t.Optional[Expression] = DataType(this=DataType.Type[dtype.upper()])
    -3349            else:
    -3350                data_type_exp = parse_one(dtype, read=dialect, into=DataType)
    -3351
    -3352            if data_type_exp is None:
    -3353                raise ValueError(f"Unparsable data type value: {dtype}")
    -3354        elif isinstance(dtype, DataType.Type):
    -3355            data_type_exp = DataType(this=dtype)
    -3356        elif isinstance(dtype, DataType):
    -3357            return dtype
    -3358        else:
    -3359            raise ValueError(f"Invalid data type: {type(dtype)}. Expected str or DataType.Type")
    -3360
    -3361        return DataType(**{**data_type_exp.args, **kwargs})
    -3362
    -3363    def is_type(self, *dtypes: str | DataType | DataType.Type) -> bool:
    -3364        return any(self.this == DataType.build(dtype).this for dtype in dtypes)
    +            
    3255class DataType(Expression):
    +3256    arg_types = {
    +3257        "this": True,
    +3258        "expressions": False,
    +3259        "nested": False,
    +3260        "values": False,
    +3261        "prefix": False,
    +3262    }
    +3263
    +3264    class Type(AutoName):
    +3265        ARRAY = auto()
    +3266        BIGDECIMAL = auto()
    +3267        BIGINT = auto()
    +3268        BIGSERIAL = auto()
    +3269        BINARY = auto()
    +3270        BIT = auto()
    +3271        BOOLEAN = auto()
    +3272        CHAR = auto()
    +3273        DATE = auto()
    +3274        DATETIME = auto()
    +3275        DATETIME64 = auto()
    +3276        ENUM = auto()
    +3277        INT4RANGE = auto()
    +3278        INT4MULTIRANGE = auto()
    +3279        INT8RANGE = auto()
    +3280        INT8MULTIRANGE = auto()
    +3281        NUMRANGE = auto()
    +3282        NUMMULTIRANGE = auto()
    +3283        TSRANGE = auto()
    +3284        TSMULTIRANGE = auto()
    +3285        TSTZRANGE = auto()
    +3286        TSTZMULTIRANGE = auto()
    +3287        DATERANGE = auto()
    +3288        DATEMULTIRANGE = auto()
    +3289        DECIMAL = auto()
    +3290        DOUBLE = auto()
    +3291        FLOAT = auto()
    +3292        GEOGRAPHY = auto()
    +3293        GEOMETRY = auto()
    +3294        HLLSKETCH = auto()
    +3295        HSTORE = auto()
    +3296        IMAGE = auto()
    +3297        INET = auto()
    +3298        INT = auto()
    +3299        INT128 = auto()
    +3300        INT256 = auto()
    +3301        INTERVAL = auto()
    +3302        JSON = auto()
    +3303        JSONB = auto()
    +3304        LONGBLOB = auto()
    +3305        LONGTEXT = auto()
    +3306        MAP = auto()
    +3307        MEDIUMBLOB = auto()
    +3308        MEDIUMTEXT = auto()
    +3309        MONEY = auto()
    +3310        NCHAR = auto()
    +3311        NULL = auto()
    +3312        NULLABLE = auto()
    +3313        NVARCHAR = auto()
    +3314        OBJECT = auto()
    +3315        ROWVERSION = auto()
    +3316        SERIAL = auto()
    +3317        SET = auto()
    +3318        SMALLINT = auto()
    +3319        SMALLMONEY = auto()
    +3320        SMALLSERIAL = auto()
    +3321        STRUCT = auto()
    +3322        SUPER = auto()
    +3323        TEXT = auto()
    +3324        TIME = auto()
    +3325        TIMESTAMP = auto()
    +3326        TIMESTAMPTZ = auto()
    +3327        TIMESTAMPLTZ = auto()
    +3328        TINYINT = auto()
    +3329        UBIGINT = auto()
    +3330        UINT = auto()
    +3331        USMALLINT = auto()
    +3332        UTINYINT = auto()
    +3333        UNKNOWN = auto()  # Sentinel value, useful for type annotation
    +3334        UINT128 = auto()
    +3335        UINT256 = auto()
    +3336        UNIQUEIDENTIFIER = auto()
    +3337        UUID = auto()
    +3338        VARBINARY = auto()
    +3339        VARCHAR = auto()
    +3340        VARIANT = auto()
    +3341        XML = auto()
    +3342
    +3343    TEXT_TYPES = {
    +3344        Type.CHAR,
    +3345        Type.NCHAR,
    +3346        Type.VARCHAR,
    +3347        Type.NVARCHAR,
    +3348        Type.TEXT,
    +3349    }
    +3350
    +3351    INTEGER_TYPES = {
    +3352        Type.INT,
    +3353        Type.TINYINT,
    +3354        Type.SMALLINT,
    +3355        Type.BIGINT,
    +3356        Type.INT128,
    +3357        Type.INT256,
    +3358    }
    +3359
    +3360    FLOAT_TYPES = {
    +3361        Type.FLOAT,
    +3362        Type.DOUBLE,
    +3363    }
    +3364
    +3365    NUMERIC_TYPES = {*INTEGER_TYPES, *FLOAT_TYPES}
    +3366
    +3367    TEMPORAL_TYPES = {
    +3368        Type.TIME,
    +3369        Type.TIMESTAMP,
    +3370        Type.TIMESTAMPTZ,
    +3371        Type.TIMESTAMPLTZ,
    +3372        Type.DATE,
    +3373        Type.DATETIME,
    +3374        Type.DATETIME64,
    +3375    }
    +3376
    +3377    @classmethod
    +3378    def build(
    +3379        cls, dtype: str | DataType | DataType.Type, dialect: DialectType = None, **kwargs
    +3380    ) -> DataType:
    +3381        from sqlglot import parse_one
    +3382
    +3383        if isinstance(dtype, str):
    +3384            if dtype.upper() == "UNKNOWN":
    +3385                data_type_exp: t.Optional[Expression] = DataType(this=DataType.Type.UNKNOWN)
    +3386            else:
    +3387                data_type_exp = parse_one(dtype, read=dialect, into=DataType)
    +3388
    +3389            if data_type_exp is None:
    +3390                raise ValueError(f"Unparsable data type value: {dtype}")
    +3391        elif isinstance(dtype, DataType.Type):
    +3392            data_type_exp = DataType(this=dtype)
    +3393        elif isinstance(dtype, DataType):
    +3394            return dtype
    +3395        else:
    +3396            raise ValueError(f"Invalid data type: {type(dtype)}. Expected str or DataType.Type")
    +3397
    +3398        return DataType(**{**data_type_exp.args, **kwargs})
    +3399
    +3400    def is_type(self, *dtypes: str | DataType | DataType.Type) -> bool:
    +3401        return any(self.this == DataType.build(dtype).this for dtype in dtypes)
     
    @@ -27599,28 +27927,28 @@ If another Expression instance is passed,
    -
    3340    @classmethod
    -3341    def build(
    -3342        cls, dtype: str | DataType | DataType.Type, dialect: DialectType = None, **kwargs
    -3343    ) -> DataType:
    -3344        from sqlglot import parse_one
    -3345
    -3346        if isinstance(dtype, str):
    -3347            if dtype.upper() in cls.Type.__members__:
    -3348                data_type_exp: t.Optional[Expression] = DataType(this=DataType.Type[dtype.upper()])
    -3349            else:
    -3350                data_type_exp = parse_one(dtype, read=dialect, into=DataType)
    -3351
    -3352            if data_type_exp is None:
    -3353                raise ValueError(f"Unparsable data type value: {dtype}")
    -3354        elif isinstance(dtype, DataType.Type):
    -3355            data_type_exp = DataType(this=dtype)
    -3356        elif isinstance(dtype, DataType):
    -3357            return dtype
    -3358        else:
    -3359            raise ValueError(f"Invalid data type: {type(dtype)}. Expected str or DataType.Type")
    -3360
    -3361        return DataType(**{**data_type_exp.args, **kwargs})
    +            
    3377    @classmethod
    +3378    def build(
    +3379        cls, dtype: str | DataType | DataType.Type, dialect: DialectType = None, **kwargs
    +3380    ) -> DataType:
    +3381        from sqlglot import parse_one
    +3382
    +3383        if isinstance(dtype, str):
    +3384            if dtype.upper() == "UNKNOWN":
    +3385                data_type_exp: t.Optional[Expression] = DataType(this=DataType.Type.UNKNOWN)
    +3386            else:
    +3387                data_type_exp = parse_one(dtype, read=dialect, into=DataType)
    +3388
    +3389            if data_type_exp is None:
    +3390                raise ValueError(f"Unparsable data type value: {dtype}")
    +3391        elif isinstance(dtype, DataType.Type):
    +3392            data_type_exp = DataType(this=dtype)
    +3393        elif isinstance(dtype, DataType):
    +3394            return dtype
    +3395        else:
    +3396            raise ValueError(f"Invalid data type: {type(dtype)}. Expected str or DataType.Type")
    +3397
    +3398        return DataType(**{**data_type_exp.args, **kwargs})
     
    @@ -27638,8 +27966,8 @@ If another Expression instance is passed,
    -
    3363    def is_type(self, *dtypes: str | DataType | DataType.Type) -> bool:
    -3364        return any(self.this == DataType.build(dtype).this for dtype in dtypes)
    +            
    3400    def is_type(self, *dtypes: str | DataType | DataType.Type) -> bool:
    +3401        return any(self.this == DataType.build(dtype).this for dtype in dtypes)
     
    @@ -27704,82 +28032,84 @@ If another Expression instance is passed,
    -
    3230    class Type(AutoName):
    -3231        ARRAY = auto()
    -3232        BIGDECIMAL = auto()
    -3233        BIGINT = auto()
    -3234        BIGSERIAL = auto()
    -3235        BINARY = auto()
    -3236        BIT = auto()
    -3237        BOOLEAN = auto()
    -3238        CHAR = auto()
    -3239        DATE = auto()
    -3240        DATETIME = auto()
    -3241        DATETIME64 = auto()
    -3242        INT4RANGE = auto()
    -3243        INT4MULTIRANGE = auto()
    -3244        INT8RANGE = auto()
    -3245        INT8MULTIRANGE = auto()
    -3246        NUMRANGE = auto()
    -3247        NUMMULTIRANGE = auto()
    -3248        TSRANGE = auto()
    -3249        TSMULTIRANGE = auto()
    -3250        TSTZRANGE = auto()
    -3251        TSTZMULTIRANGE = auto()
    -3252        DATERANGE = auto()
    -3253        DATEMULTIRANGE = auto()
    -3254        DECIMAL = auto()
    -3255        DOUBLE = auto()
    -3256        FLOAT = auto()
    -3257        GEOGRAPHY = auto()
    -3258        GEOMETRY = auto()
    -3259        HLLSKETCH = auto()
    -3260        HSTORE = auto()
    -3261        IMAGE = auto()
    -3262        INET = auto()
    -3263        INT = auto()
    -3264        INT128 = auto()
    -3265        INT256 = auto()
    -3266        INTERVAL = auto()
    -3267        JSON = auto()
    -3268        JSONB = auto()
    -3269        LONGBLOB = auto()
    -3270        LONGTEXT = auto()
    -3271        MAP = auto()
    -3272        MEDIUMBLOB = auto()
    -3273        MEDIUMTEXT = auto()
    -3274        MONEY = auto()
    -3275        NCHAR = auto()
    -3276        NULL = auto()
    -3277        NULLABLE = auto()
    -3278        NVARCHAR = auto()
    -3279        OBJECT = auto()
    -3280        ROWVERSION = auto()
    -3281        SERIAL = auto()
    -3282        SMALLINT = auto()
    -3283        SMALLMONEY = auto()
    -3284        SMALLSERIAL = auto()
    -3285        STRUCT = auto()
    -3286        SUPER = auto()
    -3287        TEXT = auto()
    -3288        TIME = auto()
    -3289        TIMESTAMP = auto()
    -3290        TIMESTAMPTZ = auto()
    -3291        TIMESTAMPLTZ = auto()
    -3292        TINYINT = auto()
    -3293        UBIGINT = auto()
    -3294        UINT = auto()
    -3295        USMALLINT = auto()
    -3296        UTINYINT = auto()
    -3297        UNKNOWN = auto()  # Sentinel value, useful for type annotation
    -3298        UINT128 = auto()
    -3299        UINT256 = auto()
    -3300        UNIQUEIDENTIFIER = auto()
    -3301        UUID = auto()
    -3302        VARBINARY = auto()
    -3303        VARCHAR = auto()
    -3304        VARIANT = auto()
    -3305        XML = auto()
    +            
    3264    class Type(AutoName):
    +3265        ARRAY = auto()
    +3266        BIGDECIMAL = auto()
    +3267        BIGINT = auto()
    +3268        BIGSERIAL = auto()
    +3269        BINARY = auto()
    +3270        BIT = auto()
    +3271        BOOLEAN = auto()
    +3272        CHAR = auto()
    +3273        DATE = auto()
    +3274        DATETIME = auto()
    +3275        DATETIME64 = auto()
    +3276        ENUM = auto()
    +3277        INT4RANGE = auto()
    +3278        INT4MULTIRANGE = auto()
    +3279        INT8RANGE = auto()
    +3280        INT8MULTIRANGE = auto()
    +3281        NUMRANGE = auto()
    +3282        NUMMULTIRANGE = auto()
    +3283        TSRANGE = auto()
    +3284        TSMULTIRANGE = auto()
    +3285        TSTZRANGE = auto()
    +3286        TSTZMULTIRANGE = auto()
    +3287        DATERANGE = auto()
    +3288        DATEMULTIRANGE = auto()
    +3289        DECIMAL = auto()
    +3290        DOUBLE = auto()
    +3291        FLOAT = auto()
    +3292        GEOGRAPHY = auto()
    +3293        GEOMETRY = auto()
    +3294        HLLSKETCH = auto()
    +3295        HSTORE = auto()
    +3296        IMAGE = auto()
    +3297        INET = auto()
    +3298        INT = auto()
    +3299        INT128 = auto()
    +3300        INT256 = auto()
    +3301        INTERVAL = auto()
    +3302        JSON = auto()
    +3303        JSONB = auto()
    +3304        LONGBLOB = auto()
    +3305        LONGTEXT = auto()
    +3306        MAP = auto()
    +3307        MEDIUMBLOB = auto()
    +3308        MEDIUMTEXT = auto()
    +3309        MONEY = auto()
    +3310        NCHAR = auto()
    +3311        NULL = auto()
    +3312        NULLABLE = auto()
    +3313        NVARCHAR = auto()
    +3314        OBJECT = auto()
    +3315        ROWVERSION = auto()
    +3316        SERIAL = auto()
    +3317        SET = auto()
    +3318        SMALLINT = auto()
    +3319        SMALLMONEY = auto()
    +3320        SMALLSERIAL = auto()
    +3321        STRUCT = auto()
    +3322        SUPER = auto()
    +3323        TEXT = auto()
    +3324        TIME = auto()
    +3325        TIMESTAMP = auto()
    +3326        TIMESTAMPTZ = auto()
    +3327        TIMESTAMPLTZ = auto()
    +3328        TINYINT = auto()
    +3329        UBIGINT = auto()
    +3330        UINT = auto()
    +3331        USMALLINT = auto()
    +3332        UTINYINT = auto()
    +3333        UNKNOWN = auto()  # Sentinel value, useful for type annotation
    +3334        UINT128 = auto()
    +3335        UINT256 = auto()
    +3336        UNIQUEIDENTIFIER = auto()
    +3337        UUID = auto()
    +3338        VARBINARY = auto()
    +3339        VARCHAR = auto()
    +3340        VARIANT = auto()
    +3341        XML = auto()
     
    @@ -27918,6 +28248,18 @@ If another Expression instance is passed, +
    +
    +
    + ENUM = +<Type.ENUM: 'ENUM'> + + +
    + + + +
    @@ -28398,6 +28740,18 @@ If another Expression instance is passed, +
    +
    +
    + SET = +<Type.SET: 'SET'> + + +
    + + + +
    @@ -28709,8 +29063,8 @@ If another Expression instance is passed,
    -
    3368class PseudoType(Expression):
    -3369    pass
    +            
    3405class PseudoType(Expression):
    +3406    pass
     
    @@ -28774,8 +29128,8 @@ If another Expression instance is passed,
    -
    3373class SubqueryPredicate(Predicate):
    -3374    pass
    +            
    3410class SubqueryPredicate(Predicate):
    +3411    pass
     
    @@ -28854,8 +29208,8 @@ If another Expression instance is passed,
    -
    3377class All(SubqueryPredicate):
    -3378    pass
    +            
    3414class All(SubqueryPredicate):
    +3415    pass
     
    @@ -28934,8 +29288,8 @@ If another Expression instance is passed,
    -
    3381class Any(SubqueryPredicate):
    -3382    pass
    +            
    3418class Any(SubqueryPredicate):
    +3419    pass
     
    @@ -29014,8 +29368,8 @@ If another Expression instance is passed,
    -
    3385class Exists(SubqueryPredicate):
    -3386    pass
    +            
    3422class Exists(SubqueryPredicate):
    +3423    pass
     
    @@ -29094,8 +29448,8 @@ If another Expression instance is passed,
    -
    3391class Command(Expression):
    -3392    arg_types = {"this": True, "expression": False}
    +            
    3428class Command(Expression):
    +3429    arg_types = {"this": True, "expression": False}
     
    @@ -29159,8 +29513,8 @@ If another Expression instance is passed,
    -
    3395class Transaction(Expression):
    -3396    arg_types = {"this": False, "modes": False}
    +            
    3432class Transaction(Expression):
    +3433    arg_types = {"this": False, "modes": False}
     
    @@ -29224,8 +29578,8 @@ If another Expression instance is passed,
    -
    3399class Commit(Expression):
    -3400    arg_types = {"chain": False}
    +            
    3436class Commit(Expression):
    +3437    arg_types = {"chain": False}
     
    @@ -29289,8 +29643,8 @@ If another Expression instance is passed,
    -
    3403class Rollback(Expression):
    -3404    arg_types = {"savepoint": False}
    +            
    3440class Rollback(Expression):
    +3441    arg_types = {"savepoint": False}
     
    @@ -29354,8 +29708,8 @@ If another Expression instance is passed,
    -
    3407class AlterTable(Expression):
    -3408    arg_types = {"this": True, "actions": True, "exists": False}
    +            
    3444class AlterTable(Expression):
    +3445    arg_types = {"this": True, "actions": True, "exists": False}
     
    @@ -29419,8 +29773,8 @@ If another Expression instance is passed,
    -
    3411class AddConstraint(Expression):
    -3412    arg_types = {"this": False, "expression": False, "enforced": False}
    +            
    3448class AddConstraint(Expression):
    +3449    arg_types = {"this": False, "expression": False, "enforced": False}
     
    @@ -29484,8 +29838,8 @@ If another Expression instance is passed,
    -
    3415class DropPartition(Expression):
    -3416    arg_types = {"expressions": True, "exists": False}
    +            
    3452class DropPartition(Expression):
    +3453    arg_types = {"expressions": True, "exists": False}
     
    @@ -29549,16 +29903,16 @@ If another Expression instance is passed,
    -
    3420class Binary(Condition):
    -3421    arg_types = {"this": True, "expression": True}
    -3422
    -3423    @property
    -3424    def left(self):
    -3425        return self.this
    -3426
    -3427    @property
    -3428    def right(self):
    -3429        return self.expression
    +            
    3457class Binary(Condition):
    +3458    arg_types = {"this": True, "expression": True}
    +3459
    +3460    @property
    +3461    def left(self):
    +3462        return self.this
    +3463
    +3464    @property
    +3465    def right(self):
    +3466        return self.expression
     
    @@ -29637,8 +29991,8 @@ If another Expression instance is passed,
    -
    3432class Add(Binary):
    -3433    pass
    +            
    3469class Add(Binary):
    +3470    pass
     
    @@ -29717,8 +30071,8 @@ If another Expression instance is passed,
    -
    3436class Connector(Binary):
    -3437    pass
    +            
    3473class Connector(Binary):
    +3474    pass
     
    @@ -29797,8 +30151,8 @@ If another Expression instance is passed,
    -
    3440class And(Connector):
    -3441    pass
    +            
    3477class And(Connector):
    +3478    pass
     
    @@ -29877,8 +30231,8 @@ If another Expression instance is passed,
    -
    3444class Or(Connector):
    -3445    pass
    +            
    3481class Or(Connector):
    +3482    pass
     
    @@ -29957,8 +30311,8 @@ If another Expression instance is passed,
    -
    3448class BitwiseAnd(Binary):
    -3449    pass
    +            
    3485class BitwiseAnd(Binary):
    +3486    pass
     
    @@ -30037,8 +30391,8 @@ If another Expression instance is passed,
    -
    3452class BitwiseLeftShift(Binary):
    -3453    pass
    +            
    3489class BitwiseLeftShift(Binary):
    +3490    pass
     
    @@ -30117,8 +30471,8 @@ If another Expression instance is passed,
    -
    3456class BitwiseOr(Binary):
    -3457    pass
    +            
    3493class BitwiseOr(Binary):
    +3494    pass
     
    @@ -30197,8 +30551,8 @@ If another Expression instance is passed,
    -
    3460class BitwiseRightShift(Binary):
    -3461    pass
    +            
    3497class BitwiseRightShift(Binary):
    +3498    pass
     
    @@ -30277,8 +30631,8 @@ If another Expression instance is passed,
    -
    3464class BitwiseXor(Binary):
    -3465    pass
    +            
    3501class BitwiseXor(Binary):
    +3502    pass
     
    @@ -30357,8 +30711,8 @@ If another Expression instance is passed,
    -
    3468class Div(Binary):
    -3469    pass
    +            
    3505class Div(Binary):
    +3506    pass
     
    @@ -30437,8 +30791,8 @@ If another Expression instance is passed,
    -
    3472class Overlaps(Binary):
    -3473    pass
    +            
    3509class Overlaps(Binary):
    +3510    pass
     
    @@ -30517,29 +30871,63 @@ If another Expression instance is passed,
    -
    3476class Dot(Binary):
    -3477    @property
    -3478    def name(self) -> str:
    -3479        return self.expression.name
    -3480
    -3481    @classmethod
    -3482    def build(self, expressions: t.Sequence[Expression]) -> Dot:
    -3483        """Build a Dot object with a sequence of expressions."""
    -3484        if len(expressions) < 2:
    -3485            raise ValueError(f"Dot requires >= 2 expressions.")
    -3486
    -3487        a, b, *expressions = expressions
    -3488        dot = Dot(this=a, expression=b)
    -3489
    -3490        for expression in expressions:
    -3491            dot = Dot(this=dot, expression=expression)
    -3492
    -3493        return dot
    -
    - +
    3513class Dot(Binary):
    +3514    @property
    +3515    def name(self) -> str:
    +3516        return self.expression.name
    +3517
    +3518    @property
    +3519    def output_name(self) -> str:
    +3520        return self.name
    +3521
    +3522    @classmethod
    +3523    def build(self, expressions: t.Sequence[Expression]) -> Dot:
    +3524        """Build a Dot object with a sequence of expressions."""
    +3525        if len(expressions) < 2:
    +3526            raise ValueError(f"Dot requires >= 2 expressions.")
    +3527
    +3528        a, b, *expressions = expressions
    +3529        dot = Dot(this=a, expression=b)
    +3530
    +3531        for expression in expressions:
    +3532            dot = Dot(this=dot, expression=expression)
    +3533
    +3534        return dot
    +
    + + + + +
    +
    + output_name: str + +
    + +

    Name of the output column if this expression is a selection.

    + +

    If the Expression has no output name, an empty string is returned.

    +
    Example:
    + +
    +
    +
    >>> from sqlglot import parse_one
    +>>> parse_one("SELECT a").expressions[0].output_name
    +'a'
    +>>> parse_one("SELECT b AS c").expressions[0].output_name
    +'c'
    +>>> parse_one("SELECT 1 + 2").expressions[0].output_name
    +''
    +
    +
    +
    +
    + + +
    @@ -30552,19 +30940,19 @@ If another Expression instance is passed,
    -
    3481    @classmethod
    -3482    def build(self, expressions: t.Sequence[Expression]) -> Dot:
    -3483        """Build a Dot object with a sequence of expressions."""
    -3484        if len(expressions) < 2:
    -3485            raise ValueError(f"Dot requires >= 2 expressions.")
    -3486
    -3487        a, b, *expressions = expressions
    -3488        dot = Dot(this=a, expression=b)
    -3489
    -3490        for expression in expressions:
    -3491            dot = Dot(this=dot, expression=expression)
    -3492
    -3493        return dot
    +            
    3522    @classmethod
    +3523    def build(self, expressions: t.Sequence[Expression]) -> Dot:
    +3524        """Build a Dot object with a sequence of expressions."""
    +3525        if len(expressions) < 2:
    +3526            raise ValueError(f"Dot requires >= 2 expressions.")
    +3527
    +3528        a, b, *expressions = expressions
    +3529        dot = Dot(this=a, expression=b)
    +3530
    +3531        for expression in expressions:
    +3532            dot = Dot(this=dot, expression=expression)
    +3533
    +3534        return dot
     
    @@ -30587,7 +30975,6 @@ If another Expression instance is passed,
    is_int
    is_star
    alias
    -
    output_name
    copy
    add_comments
    append
    @@ -30646,8 +31033,8 @@ If another Expression instance is passed,
    -
    3496class DPipe(Binary):
    -3497    pass
    +            
    3537class DPipe(Binary):
    +3538    pass
     
    @@ -30711,6 +31098,86 @@ If another Expression instance is passed,
    neq
    rlike
    +
    + +
    +
    +
    + +
    + + class + SafeDPipe(DPipe): + + + +
    + +
    3541class SafeDPipe(DPipe):
    +3542    pass
    +
    + + + + + @@ -30726,8 +31193,8 @@ If another Expression instance is passed, -
    3500class EQ(Binary, Predicate):
    -3501    pass
    +            
    3545class EQ(Binary, Predicate):
    +3546    pass
     
    @@ -30806,8 +31273,8 @@ If another Expression instance is passed,
    -
    3504class NullSafeEQ(Binary, Predicate):
    -3505    pass
    +            
    3549class NullSafeEQ(Binary, Predicate):
    +3550    pass
     
    @@ -30886,8 +31353,8 @@ If another Expression instance is passed,
    -
    3508class NullSafeNEQ(Binary, Predicate):
    -3509    pass
    +            
    3553class NullSafeNEQ(Binary, Predicate):
    +3554    pass
     
    @@ -30966,8 +31433,8 @@ If another Expression instance is passed,
    -
    3512class Distance(Binary):
    -3513    pass
    +            
    3557class Distance(Binary):
    +3558    pass
     
    @@ -31046,8 +31513,8 @@ If another Expression instance is passed,
    -
    3516class Escape(Binary):
    -3517    pass
    +            
    3561class Escape(Binary):
    +3562    pass
     
    @@ -31126,8 +31593,8 @@ If another Expression instance is passed,
    -
    3520class Glob(Binary, Predicate):
    -3521    pass
    +            
    3565class Glob(Binary, Predicate):
    +3566    pass
     
    @@ -31206,8 +31673,8 @@ If another Expression instance is passed,
    -
    3524class GT(Binary, Predicate):
    -3525    pass
    +            
    3569class GT(Binary, Predicate):
    +3570    pass
     
    @@ -31286,8 +31753,8 @@ If another Expression instance is passed,
    -
    3528class GTE(Binary, Predicate):
    -3529    pass
    +            
    3573class GTE(Binary, Predicate):
    +3574    pass
     
    @@ -31366,8 +31833,8 @@ If another Expression instance is passed,
    -
    3532class ILike(Binary, Predicate):
    -3533    pass
    +            
    3577class ILike(Binary, Predicate):
    +3578    pass
     
    @@ -31446,8 +31913,8 @@ If another Expression instance is passed,
    -
    3536class ILikeAny(Binary, Predicate):
    -3537    pass
    +            
    3581class ILikeAny(Binary, Predicate):
    +3582    pass
     
    @@ -31526,8 +31993,8 @@ If another Expression instance is passed,
    -
    3540class IntDiv(Binary):
    -3541    pass
    +            
    3585class IntDiv(Binary):
    +3586    pass
     
    @@ -31606,8 +32073,8 @@ If another Expression instance is passed,
    -
    3544class Is(Binary, Predicate):
    -3545    pass
    +            
    3589class Is(Binary, Predicate):
    +3590    pass
     
    @@ -31686,8 +32153,8 @@ If another Expression instance is passed,
    -
    3548class Kwarg(Binary):
    -3549    """Kwarg in special functions like func(kwarg => y)."""
    +            
    3593class Kwarg(Binary):
    +3594    """Kwarg in special functions like func(kwarg => y)."""
     
    @@ -31768,8 +32235,8 @@ If another Expression instance is passed,
    -
    3552class Like(Binary, Predicate):
    -3553    pass
    +            
    3597class Like(Binary, Predicate):
    +3598    pass
     
    @@ -31848,8 +32315,8 @@ If another Expression instance is passed,
    -
    3556class LikeAny(Binary, Predicate):
    -3557    pass
    +            
    3601class LikeAny(Binary, Predicate):
    +3602    pass
     
    @@ -31928,8 +32395,8 @@ If another Expression instance is passed,
    -
    3560class LT(Binary, Predicate):
    -3561    pass
    +            
    3605class LT(Binary, Predicate):
    +3606    pass
     
    @@ -32008,8 +32475,8 @@ If another Expression instance is passed,
    -
    3564class LTE(Binary, Predicate):
    -3565    pass
    +            
    3609class LTE(Binary, Predicate):
    +3610    pass
     
    @@ -32088,8 +32555,8 @@ If another Expression instance is passed,
    -
    3568class Mod(Binary):
    -3569    pass
    +            
    3613class Mod(Binary):
    +3614    pass
     
    @@ -32168,8 +32635,8 @@ If another Expression instance is passed,
    -
    3572class Mul(Binary):
    -3573    pass
    +            
    3617class Mul(Binary):
    +3618    pass
     
    @@ -32248,8 +32715,8 @@ If another Expression instance is passed,
    -
    3576class NEQ(Binary, Predicate):
    -3577    pass
    +            
    3621class NEQ(Binary, Predicate):
    +3622    pass
     
    @@ -32328,8 +32795,8 @@ If another Expression instance is passed,
    -
    3580class SimilarTo(Binary, Predicate):
    -3581    pass
    +            
    3625class SimilarTo(Binary, Predicate):
    +3626    pass
     
    @@ -32408,8 +32875,8 @@ If another Expression instance is passed,
    -
    3584class Slice(Binary):
    -3585    arg_types = {"this": False, "expression": False}
    +            
    3629class Slice(Binary):
    +3630    arg_types = {"this": False, "expression": False}
     
    @@ -32488,8 +32955,8 @@ If another Expression instance is passed,
    -
    3588class Sub(Binary):
    -3589    pass
    +            
    3633class Sub(Binary):
    +3634    pass
     
    @@ -32568,8 +33035,8 @@ If another Expression instance is passed,
    -
    3592class ArrayOverlaps(Binary):
    -3593    pass
    +            
    3637class ArrayOverlaps(Binary):
    +3638    pass
     
    @@ -32648,8 +33115,8 @@ If another Expression instance is passed,
    -
    3598class Unary(Condition):
    -3599    pass
    +            
    3643class Unary(Condition):
    +3644    pass
     
    @@ -32728,8 +33195,8 @@ If another Expression instance is passed,
    -
    3602class BitwiseNot(Unary):
    -3603    pass
    +            
    3647class BitwiseNot(Unary):
    +3648    pass
     
    @@ -32808,8 +33275,8 @@ If another Expression instance is passed,
    -
    3606class Not(Unary):
    -3607    pass
    +            
    3651class Not(Unary):
    +3652    pass
     
    @@ -32888,13 +33355,47 @@ If another Expression instance is passed,
    -
    3610class Paren(Unary):
    -3611    arg_types = {"this": True, "with": False}
    +            
    3655class Paren(Unary):
    +3656    arg_types = {"this": True, "with": False}
    +3657
    +3658    @property
    +3659    def output_name(self) -> str:
    +3660        return self.this.name
     
    +
    +
    + output_name: str + + +
    + + +

    Name of the output column if this expression is a selection.

    + +

    If the Expression has no output name, an empty string is returned.

    + +
    Example:
    + +
    +
    +
    >>> from sqlglot import parse_one
    +>>> parse_one("SELECT a").expressions[0].output_name
    +'a'
    +>>> parse_one("SELECT b AS c").expressions[0].output_name
    +'c'
    +>>> parse_one("SELECT 1 + 2").expressions[0].output_name
    +''
    +
    +
    +
    +
    + + +
    Inherited Members
    @@ -32909,7 +33410,6 @@ If another Expression instance is passed,
    is_int
    is_star
    alias
    -
    output_name
    copy
    add_comments
    append
    @@ -32968,8 +33468,8 @@ If another Expression instance is passed,
    -
    3614class Neg(Unary):
    -3615    pass
    +            
    3663class Neg(Unary):
    +3664    pass
     
    @@ -33048,12 +33548,12 @@ If another Expression instance is passed,
    -
    3618class Alias(Expression):
    -3619    arg_types = {"this": True, "alias": False}
    -3620
    -3621    @property
    -3622    def output_name(self) -> str:
    -3623        return self.alias
    +            
    3667class Alias(Expression):
    +3668    arg_types = {"this": True, "alias": False}
    +3669
    +3670    @property
    +3671    def output_name(self) -> str:
    +3672        return self.alias
     
    @@ -33146,12 +33646,12 @@ If another Expression instance is passed,
    -
    3626class Aliases(Expression):
    -3627    arg_types = {"this": True, "expressions": True}
    -3628
    -3629    @property
    -3630    def aliases(self):
    -3631        return self.expressions
    +            
    3675class Aliases(Expression):
    +3676    arg_types = {"this": True, "expressions": True}
    +3677
    +3678    @property
    +3679    def aliases(self):
    +3680        return self.expressions
     
    @@ -33215,8 +33715,8 @@ If another Expression instance is passed,
    -
    3634class AtTimeZone(Expression):
    -3635    arg_types = {"this": True, "zone": True}
    +            
    3683class AtTimeZone(Expression):
    +3684    arg_types = {"this": True, "zone": True}
     
    @@ -33280,8 +33780,8 @@ If another Expression instance is passed,
    -
    3638class Between(Predicate):
    -3639    arg_types = {"this": True, "low": True, "high": True}
    +            
    3687class Between(Predicate):
    +3688    arg_types = {"this": True, "low": True, "high": True}
     
    @@ -33360,8 +33860,8 @@ If another Expression instance is passed,
    -
    3642class Bracket(Condition):
    -3643    arg_types = {"this": True, "expressions": True}
    +            
    3691class Bracket(Condition):
    +3692    arg_types = {"this": True, "expressions": True}
     
    @@ -33440,8 +33940,8 @@ If another Expression instance is passed,
    -
    3646class Distinct(Expression):
    -3647    arg_types = {"expressions": False, "on": False}
    +            
    3695class Distinct(Expression):
    +3696    arg_types = {"expressions": False, "on": False}
     
    @@ -33505,15 +34005,15 @@ If another Expression instance is passed,
    -
    3650class In(Predicate):
    -3651    arg_types = {
    -3652        "this": True,
    -3653        "expressions": False,
    -3654        "query": False,
    -3655        "unnest": False,
    -3656        "field": False,
    -3657        "is_global": False,
    -3658    }
    +            
    3699class In(Predicate):
    +3700    arg_types = {
    +3701        "this": True,
    +3702        "expressions": False,
    +3703        "query": False,
    +3704        "unnest": False,
    +3705        "field": False,
    +3706        "is_global": False,
    +3707    }
     
    @@ -33592,19 +34092,19 @@ If another Expression instance is passed,
    -
    3661class TimeUnit(Expression):
    -3662    """Automatically converts unit arg into a var."""
    -3663
    -3664    arg_types = {"unit": False}
    -3665
    -3666    def __init__(self, **args):
    -3667        unit = args.get("unit")
    -3668        if isinstance(unit, (Column, Literal)):
    -3669            args["unit"] = Var(this=unit.name)
    -3670        elif isinstance(unit, Week):
    -3671            unit.set("this", Var(this=unit.this.name))
    -3672
    -3673        super().__init__(**args)
    +            
    3710class TimeUnit(Expression):
    +3711    """Automatically converts unit arg into a var."""
    +3712
    +3713    arg_types = {"unit": False}
    +3714
    +3715    def __init__(self, **args):
    +3716        unit = args.get("unit")
    +3717        if isinstance(unit, (Column, Literal)):
    +3718            args["unit"] = Var(this=unit.name)
    +3719        elif isinstance(unit, Week):
    +3720            unit.set("this", Var(this=unit.this.name))
    +3721
    +3722        super().__init__(**args)
     
    @@ -33622,14 +34122,14 @@ If another Expression instance is passed,
    -
    3666    def __init__(self, **args):
    -3667        unit = args.get("unit")
    -3668        if isinstance(unit, (Column, Literal)):
    -3669            args["unit"] = Var(this=unit.name)
    -3670        elif isinstance(unit, Week):
    -3671            unit.set("this", Var(this=unit.this.name))
    -3672
    -3673        super().__init__(**args)
    +            
    3715    def __init__(self, **args):
    +3716        unit = args.get("unit")
    +3717        if isinstance(unit, (Column, Literal)):
    +3718            args["unit"] = Var(this=unit.name)
    +3719        elif isinstance(unit, Week):
    +3720            unit.set("this", Var(this=unit.this.name))
    +3721
    +3722        super().__init__(**args)
     
    @@ -33693,12 +34193,12 @@ If another Expression instance is passed,
    -
    3676class Interval(TimeUnit):
    -3677    arg_types = {"this": False, "unit": False}
    -3678
    -3679    @property
    -3680    def unit(self) -> t.Optional[Var]:
    -3681        return self.args.get("unit")
    +            
    3725class Interval(TimeUnit):
    +3726    arg_types = {"this": False, "unit": False}
    +3727
    +3728    @property
    +3729    def unit(self) -> t.Optional[Var]:
    +3730        return self.args.get("unit")
     
    @@ -33765,8 +34265,8 @@ If another Expression instance is passed,
    -
    3684class IgnoreNulls(Expression):
    -3685    pass
    +            
    3733class IgnoreNulls(Expression):
    +3734    pass
     
    @@ -33830,8 +34330,8 @@ If another Expression instance is passed,
    -
    3688class RespectNulls(Expression):
    -3689    pass
    +            
    3737class RespectNulls(Expression):
    +3738    pass
     
    @@ -33895,53 +34395,53 @@ If another Expression instance is passed,
    -
    3693class Func(Condition):
    -3694    """
    -3695    The base class for all function expressions.
    -3696
    -3697    Attributes:
    -3698        is_var_len_args (bool): if set to True the last argument defined in arg_types will be
    -3699            treated as a variable length argument and the argument's value will be stored as a list.
    -3700        _sql_names (list): determines the SQL name (1st item in the list) and aliases (subsequent items)
    -3701            for this function expression. These values are used to map this node to a name during parsing
    -3702            as well as to provide the function's name during SQL string generation. By default the SQL
    -3703            name is set to the expression's class name transformed to snake case.
    -3704    """
    -3705
    -3706    is_var_len_args = False
    -3707
    -3708    @classmethod
    -3709    def from_arg_list(cls, args):
    -3710        if cls.is_var_len_args:
    -3711            all_arg_keys = list(cls.arg_types)
    -3712            # If this function supports variable length argument treat the last argument as such.
    -3713            non_var_len_arg_keys = all_arg_keys[:-1] if cls.is_var_len_args else all_arg_keys
    -3714            num_non_var = len(non_var_len_arg_keys)
    -3715
    -3716            args_dict = {arg_key: arg for arg, arg_key in zip(args, non_var_len_arg_keys)}
    -3717            args_dict[all_arg_keys[-1]] = args[num_non_var:]
    -3718        else:
    -3719            args_dict = {arg_key: arg for arg, arg_key in zip(args, cls.arg_types)}
    -3720
    -3721        return cls(**args_dict)
    -3722
    -3723    @classmethod
    -3724    def sql_names(cls):
    -3725        if cls is Func:
    -3726            raise NotImplementedError(
    -3727                "SQL name is only supported by concrete function implementations"
    -3728            )
    -3729        if "_sql_names" not in cls.__dict__:
    -3730            cls._sql_names = [camel_to_snake_case(cls.__name__)]
    -3731        return cls._sql_names
    -3732
    -3733    @classmethod
    -3734    def sql_name(cls):
    -3735        return cls.sql_names()[0]
    -3736
    -3737    @classmethod
    -3738    def default_parser_mappings(cls):
    -3739        return {name: cls.from_arg_list for name in cls.sql_names()}
    +            
    3742class Func(Condition):
    +3743    """
    +3744    The base class for all function expressions.
    +3745
    +3746    Attributes:
    +3747        is_var_len_args (bool): if set to True the last argument defined in arg_types will be
    +3748            treated as a variable length argument and the argument's value will be stored as a list.
    +3749        _sql_names (list): determines the SQL name (1st item in the list) and aliases (subsequent items)
    +3750            for this function expression. These values are used to map this node to a name during parsing
    +3751            as well as to provide the function's name during SQL string generation. By default the SQL
    +3752            name is set to the expression's class name transformed to snake case.
    +3753    """
    +3754
    +3755    is_var_len_args = False
    +3756
    +3757    @classmethod
    +3758    def from_arg_list(cls, args):
    +3759        if cls.is_var_len_args:
    +3760            all_arg_keys = list(cls.arg_types)
    +3761            # If this function supports variable length argument treat the last argument as such.
    +3762            non_var_len_arg_keys = all_arg_keys[:-1] if cls.is_var_len_args else all_arg_keys
    +3763            num_non_var = len(non_var_len_arg_keys)
    +3764
    +3765            args_dict = {arg_key: arg for arg, arg_key in zip(args, non_var_len_arg_keys)}
    +3766            args_dict[all_arg_keys[-1]] = args[num_non_var:]
    +3767        else:
    +3768            args_dict = {arg_key: arg for arg, arg_key in zip(args, cls.arg_types)}
    +3769
    +3770        return cls(**args_dict)
    +3771
    +3772    @classmethod
    +3773    def sql_names(cls):
    +3774        if cls is Func:
    +3775            raise NotImplementedError(
    +3776                "SQL name is only supported by concrete function implementations"
    +3777            )
    +3778        if "_sql_names" not in cls.__dict__:
    +3779            cls._sql_names = [camel_to_snake_case(cls.__name__)]
    +3780        return cls._sql_names
    +3781
    +3782    @classmethod
    +3783    def sql_name(cls):
    +3784        return cls.sql_names()[0]
    +3785
    +3786    @classmethod
    +3787    def default_parser_mappings(cls):
    +3788        return {name: cls.from_arg_list for name in cls.sql_names()}
     
    @@ -33972,20 +34472,20 @@ name is set to the expression's class name transformed to snake case.
    -
    3708    @classmethod
    -3709    def from_arg_list(cls, args):
    -3710        if cls.is_var_len_args:
    -3711            all_arg_keys = list(cls.arg_types)
    -3712            # If this function supports variable length argument treat the last argument as such.
    -3713            non_var_len_arg_keys = all_arg_keys[:-1] if cls.is_var_len_args else all_arg_keys
    -3714            num_non_var = len(non_var_len_arg_keys)
    -3715
    -3716            args_dict = {arg_key: arg for arg, arg_key in zip(args, non_var_len_arg_keys)}
    -3717            args_dict[all_arg_keys[-1]] = args[num_non_var:]
    -3718        else:
    -3719            args_dict = {arg_key: arg for arg, arg_key in zip(args, cls.arg_types)}
    -3720
    -3721        return cls(**args_dict)
    +            
    3757    @classmethod
    +3758    def from_arg_list(cls, args):
    +3759        if cls.is_var_len_args:
    +3760            all_arg_keys = list(cls.arg_types)
    +3761            # If this function supports variable length argument treat the last argument as such.
    +3762            non_var_len_arg_keys = all_arg_keys[:-1] if cls.is_var_len_args else all_arg_keys
    +3763            num_non_var = len(non_var_len_arg_keys)
    +3764
    +3765            args_dict = {arg_key: arg for arg, arg_key in zip(args, non_var_len_arg_keys)}
    +3766            args_dict[all_arg_keys[-1]] = args[num_non_var:]
    +3767        else:
    +3768            args_dict = {arg_key: arg for arg, arg_key in zip(args, cls.arg_types)}
    +3769
    +3770        return cls(**args_dict)
     
    @@ -34004,15 +34504,15 @@ name is set to the expression's class name transformed to snake case.
    -
    3723    @classmethod
    -3724    def sql_names(cls):
    -3725        if cls is Func:
    -3726            raise NotImplementedError(
    -3727                "SQL name is only supported by concrete function implementations"
    -3728            )
    -3729        if "_sql_names" not in cls.__dict__:
    -3730            cls._sql_names = [camel_to_snake_case(cls.__name__)]
    -3731        return cls._sql_names
    +            
    3772    @classmethod
    +3773    def sql_names(cls):
    +3774        if cls is Func:
    +3775            raise NotImplementedError(
    +3776                "SQL name is only supported by concrete function implementations"
    +3777            )
    +3778        if "_sql_names" not in cls.__dict__:
    +3779            cls._sql_names = [camel_to_snake_case(cls.__name__)]
    +3780        return cls._sql_names
     
    @@ -34031,9 +34531,9 @@ name is set to the expression's class name transformed to snake case.
    -
    3733    @classmethod
    -3734    def sql_name(cls):
    -3735        return cls.sql_names()[0]
    +            
    3782    @classmethod
    +3783    def sql_name(cls):
    +3784        return cls.sql_names()[0]
     
    @@ -34052,9 +34552,9 @@ name is set to the expression's class name transformed to snake case.
    -
    3737    @classmethod
    -3738    def default_parser_mappings(cls):
    -3739        return {name: cls.from_arg_list for name in cls.sql_names()}
    +            
    3786    @classmethod
    +3787    def default_parser_mappings(cls):
    +3788        return {name: cls.from_arg_list for name in cls.sql_names()}
     
    @@ -34134,8 +34634,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3742class AggFunc(Func):
    -3743    pass
    +            
    3791class AggFunc(Func):
    +3792    pass
     
    @@ -34221,8 +34721,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3746class ParameterizedAgg(AggFunc):
    -3747    arg_types = {"this": True, "expressions": True, "params": True}
    +            
    3795class ParameterizedAgg(AggFunc):
    +3796    arg_types = {"this": True, "expressions": True, "params": True}
     
    @@ -34308,8 +34808,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3750class Abs(Func):
    -3751    pass
    +            
    3799class Abs(Func):
    +3800    pass
     
    @@ -34395,9 +34895,9 @@ name is set to the expression's class name transformed to snake case.
    -
    3754class Anonymous(Func):
    -3755    arg_types = {"this": True, "expressions": False}
    -3756    is_var_len_args = True
    +            
    3803class Anonymous(Func):
    +3804    arg_types = {"this": True, "expressions": False}
    +3805    is_var_len_args = True
     
    @@ -34483,9 +34983,9 @@ name is set to the expression's class name transformed to snake case.
    -
    3761class Hll(AggFunc):
    -3762    arg_types = {"this": True, "expressions": False}
    -3763    is_var_len_args = True
    +            
    3810class Hll(AggFunc):
    +3811    arg_types = {"this": True, "expressions": False}
    +3812    is_var_len_args = True
     
    @@ -34571,9 +35071,9 @@ name is set to the expression's class name transformed to snake case.
    -
    3766class ApproxDistinct(AggFunc):
    -3767    arg_types = {"this": True, "accuracy": False}
    -3768    _sql_names = ["APPROX_DISTINCT", "APPROX_COUNT_DISTINCT"]
    +            
    3815class ApproxDistinct(AggFunc):
    +3816    arg_types = {"this": True, "accuracy": False}
    +3817    _sql_names = ["APPROX_DISTINCT", "APPROX_COUNT_DISTINCT"]
     
    @@ -34659,9 +35159,9 @@ name is set to the expression's class name transformed to snake case.
    -
    3771class Array(Func):
    -3772    arg_types = {"expressions": False}
    -3773    is_var_len_args = True
    +            
    3820class Array(Func):
    +3821    arg_types = {"expressions": False}
    +3822    is_var_len_args = True
     
    @@ -34747,8 +35247,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3777class ToChar(Func):
    -3778    arg_types = {"this": True, "format": False}
    +            
    3826class ToChar(Func):
    +3827    arg_types = {"this": True, "format": False}
     
    @@ -34834,8 +35334,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3781class GenerateSeries(Func):
    -3782    arg_types = {"start": True, "end": True, "step": False}
    +            
    3830class GenerateSeries(Func):
    +3831    arg_types = {"start": True, "end": True, "step": False}
     
    @@ -34921,8 +35421,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3785class ArrayAgg(AggFunc):
    -3786    pass
    +            
    3834class ArrayAgg(AggFunc):
    +3835    pass
     
    @@ -35008,8 +35508,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3789class ArrayAll(Func):
    -3790    arg_types = {"this": True, "expression": True}
    +            
    3838class ArrayAll(Func):
    +3839    arg_types = {"this": True, "expression": True}
     
    @@ -35095,8 +35595,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3793class ArrayAny(Func):
    -3794    arg_types = {"this": True, "expression": True}
    +            
    3842class ArrayAny(Func):
    +3843    arg_types = {"this": True, "expression": True}
     
    @@ -35182,9 +35682,9 @@ name is set to the expression's class name transformed to snake case.
    -
    3797class ArrayConcat(Func):
    -3798    arg_types = {"this": True, "expressions": False}
    -3799    is_var_len_args = True
    +            
    3846class ArrayConcat(Func):
    +3847    arg_types = {"this": True, "expressions": False}
    +3848    is_var_len_args = True
     
    @@ -35270,8 +35770,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3802class ArrayContains(Binary, Func):
    -3803    pass
    +            
    3851class ArrayContains(Binary, Func):
    +3852    pass
     
    @@ -35357,8 +35857,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3806class ArrayContained(Binary):
    -3807    pass
    +            
    3855class ArrayContained(Binary):
    +3856    pass
     
    @@ -35437,9 +35937,9 @@ name is set to the expression's class name transformed to snake case.
    -
    3810class ArrayFilter(Func):
    -3811    arg_types = {"this": True, "expression": True}
    -3812    _sql_names = ["FILTER", "ARRAY_FILTER"]
    +            
    3859class ArrayFilter(Func):
    +3860    arg_types = {"this": True, "expression": True}
    +3861    _sql_names = ["FILTER", "ARRAY_FILTER"]
     
    @@ -35525,8 +36025,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3815class ArrayJoin(Func):
    -3816    arg_types = {"this": True, "expression": True, "null": False}
    +            
    3864class ArrayJoin(Func):
    +3865    arg_types = {"this": True, "expression": True, "null": False}
     
    @@ -35612,8 +36112,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3819class ArraySize(Func):
    -3820    arg_types = {"this": True, "expression": False}
    +            
    3868class ArraySize(Func):
    +3869    arg_types = {"this": True, "expression": False}
     
    @@ -35699,8 +36199,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3823class ArraySort(Func):
    -3824    arg_types = {"this": True, "expression": False}
    +            
    3872class ArraySort(Func):
    +3873    arg_types = {"this": True, "expression": False}
     
    @@ -35786,8 +36286,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3827class ArraySum(Func):
    -3828    pass
    +            
    3876class ArraySum(Func):
    +3877    pass
     
    @@ -35873,8 +36373,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3831class ArrayUnionAgg(AggFunc):
    -3832    pass
    +            
    3880class ArrayUnionAgg(AggFunc):
    +3881    pass
     
    @@ -35960,8 +36460,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3835class Avg(AggFunc):
    -3836    pass
    +            
    3884class Avg(AggFunc):
    +3885    pass
     
    @@ -36047,8 +36547,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3839class AnyValue(AggFunc):
    -3840    pass
    +            
    3888class AnyValue(AggFunc):
    +3889    pass
     
    @@ -36134,24 +36634,24 @@ name is set to the expression's class name transformed to snake case.
    -
    3843class Case(Func):
    -3844    arg_types = {"this": False, "ifs": True, "default": False}
    -3845
    -3846    def when(self, condition: ExpOrStr, then: ExpOrStr, copy: bool = True, **opts) -> Case:
    -3847        instance = _maybe_copy(self, copy)
    -3848        instance.append(
    -3849            "ifs",
    -3850            If(
    -3851                this=maybe_parse(condition, copy=copy, **opts),
    -3852                true=maybe_parse(then, copy=copy, **opts),
    -3853            ),
    -3854        )
    -3855        return instance
    -3856
    -3857    def else_(self, condition: ExpOrStr, copy: bool = True, **opts) -> Case:
    -3858        instance = _maybe_copy(self, copy)
    -3859        instance.set("default", maybe_parse(condition, copy=copy, **opts))
    -3860        return instance
    +            
    3892class Case(Func):
    +3893    arg_types = {"this": False, "ifs": True, "default": False}
    +3894
    +3895    def when(self, condition: ExpOrStr, then: ExpOrStr, copy: bool = True, **opts) -> Case:
    +3896        instance = _maybe_copy(self, copy)
    +3897        instance.append(
    +3898            "ifs",
    +3899            If(
    +3900                this=maybe_parse(condition, copy=copy, **opts),
    +3901                true=maybe_parse(then, copy=copy, **opts),
    +3902            ),
    +3903        )
    +3904        return instance
    +3905
    +3906    def else_(self, condition: ExpOrStr, copy: bool = True, **opts) -> Case:
    +3907        instance = _maybe_copy(self, copy)
    +3908        instance.set("default", maybe_parse(condition, copy=copy, **opts))
    +3909        return instance
     
    @@ -36168,16 +36668,16 @@ name is set to the expression's class name transformed to snake case.
    -
    3846    def when(self, condition: ExpOrStr, then: ExpOrStr, copy: bool = True, **opts) -> Case:
    -3847        instance = _maybe_copy(self, copy)
    -3848        instance.append(
    -3849            "ifs",
    -3850            If(
    -3851                this=maybe_parse(condition, copy=copy, **opts),
    -3852                true=maybe_parse(then, copy=copy, **opts),
    -3853            ),
    -3854        )
    -3855        return instance
    +            
    3895    def when(self, condition: ExpOrStr, then: ExpOrStr, copy: bool = True, **opts) -> Case:
    +3896        instance = _maybe_copy(self, copy)
    +3897        instance.append(
    +3898            "ifs",
    +3899            If(
    +3900                this=maybe_parse(condition, copy=copy, **opts),
    +3901                true=maybe_parse(then, copy=copy, **opts),
    +3902            ),
    +3903        )
    +3904        return instance
     
    @@ -36195,10 +36695,10 @@ name is set to the expression's class name transformed to snake case.
    -
    3857    def else_(self, condition: ExpOrStr, copy: bool = True, **opts) -> Case:
    -3858        instance = _maybe_copy(self, copy)
    -3859        instance.set("default", maybe_parse(condition, copy=copy, **opts))
    -3860        return instance
    +            
    3906    def else_(self, condition: ExpOrStr, copy: bool = True, **opts) -> Case:
    +3907        instance = _maybe_copy(self, copy)
    +3908        instance.set("default", maybe_parse(condition, copy=copy, **opts))
    +3909        return instance
     
    @@ -36285,23 +36785,23 @@ name is set to the expression's class name transformed to snake case.
    -
    3863class Cast(Func):
    -3864    arg_types = {"this": True, "to": True}
    -3865
    -3866    @property
    -3867    def name(self) -> str:
    -3868        return self.this.name
    -3869
    -3870    @property
    -3871    def to(self) -> DataType:
    -3872        return self.args["to"]
    -3873
    -3874    @property
    -3875    def output_name(self) -> str:
    -3876        return self.name
    -3877
    -3878    def is_type(self, *dtypes: str | DataType | DataType.Type) -> bool:
    -3879        return self.to.is_type(*dtypes)
    +            
    3912class Cast(Func):
    +3913    arg_types = {"this": True, "to": True}
    +3914
    +3915    @property
    +3916    def name(self) -> str:
    +3917        return self.this.name
    +3918
    +3919    @property
    +3920    def to(self) -> DataType:
    +3921        return self.args["to"]
    +3922
    +3923    @property
    +3924    def output_name(self) -> str:
    +3925        return self.name
    +3926
    +3927    def is_type(self, *dtypes: str | DataType | DataType.Type) -> bool:
    +3928        return self.to.is_type(*dtypes)
     
    @@ -36348,8 +36848,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3878    def is_type(self, *dtypes: str | DataType | DataType.Type) -> bool:
    -3879        return self.to.is_type(*dtypes)
    +            
    3927    def is_type(self, *dtypes: str | DataType | DataType.Type) -> bool:
    +3928        return self.to.is_type(*dtypes)
     
    @@ -36435,8 +36935,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3882class CastToStrType(Func):
    -3883    arg_types = {"this": True, "expression": True}
    +            
    3931class CastToStrType(Func):
    +3932    arg_types = {"this": True, "expression": True}
     
    @@ -36522,8 +37022,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3886class Collate(Binary):
    -3887    pass
    +            
    3935class Collate(Binary):
    +3936    pass
     
    @@ -36602,8 +37102,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3890class TryCast(Cast):
    -3891    pass
    +            
    3939class TryCast(Cast):
    +3940    pass
     
    @@ -36693,9 +37193,9 @@ name is set to the expression's class name transformed to snake case.
    -
    3894class Ceil(Func):
    -3895    arg_types = {"this": True, "decimals": False}
    -3896    _sql_names = ["CEIL", "CEILING"]
    +            
    3943class Ceil(Func):
    +3944    arg_types = {"this": True, "decimals": False}
    +3945    _sql_names = ["CEIL", "CEILING"]
     
    @@ -36781,9 +37281,10 @@ name is set to the expression's class name transformed to snake case.
    -
    3899class Coalesce(Func):
    -3900    arg_types = {"this": True, "expressions": False}
    -3901    is_var_len_args = True
    +            
    3948class Coalesce(Func):
    +3949    arg_types = {"this": True, "expressions": False}
    +3950    is_var_len_args = True
    +3951    _sql_names = ["COALESCE", "IFNULL", "NVL"]
     
    @@ -36869,9 +37370,9 @@ name is set to the expression's class name transformed to snake case.
    -
    3904class Concat(Func):
    -3905    arg_types = {"expressions": True}
    -3906    is_var_len_args = True
    +            
    3954class Concat(Func):
    +3955    arg_types = {"expressions": True}
    +3956    is_var_len_args = True
     
    @@ -36942,6 +37443,93 @@ name is set to the expression's class name transformed to snake case.
    neq
    rlike
    +
    + +
    +
    +
    + +
    + + class + SafeConcat(Concat): + + + +
    + +
    3959class SafeConcat(Concat):
    +3960    pass
    +
    + + + + + @@ -36957,8 +37545,8 @@ name is set to the expression's class name transformed to snake case. -
    3909class ConcatWs(Concat):
    -3910    _sql_names = ["CONCAT_WS"]
    +            
    3963class ConcatWs(Concat):
    +3964    _sql_names = ["CONCAT_WS"]
     
    @@ -37044,8 +37632,9 @@ name is set to the expression's class name transformed to snake case.
    -
    3913class Count(AggFunc):
    -3914    arg_types = {"this": False}
    +            
    3967class Count(AggFunc):
    +3968    arg_types = {"this": False, "expressions": False}
    +3969    is_var_len_args = True
     
    @@ -37131,8 +37720,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3917class CountIf(AggFunc):
    -3918    pass
    +            
    3972class CountIf(AggFunc):
    +3973    pass
     
    @@ -37218,8 +37807,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3921class CurrentDate(Func):
    -3922    arg_types = {"this": False}
    +            
    3976class CurrentDate(Func):
    +3977    arg_types = {"this": False}
     
    @@ -37305,8 +37894,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3925class CurrentDatetime(Func):
    -3926    arg_types = {"this": False}
    +            
    3980class CurrentDatetime(Func):
    +3981    arg_types = {"this": False}
     
    @@ -37392,8 +37981,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3929class CurrentTime(Func):
    -3930    arg_types = {"this": False}
    +            
    3984class CurrentTime(Func):
    +3985    arg_types = {"this": False}
     
    @@ -37479,8 +38068,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3933class CurrentTimestamp(Func):
    -3934    arg_types = {"this": False}
    +            
    3988class CurrentTimestamp(Func):
    +3989    arg_types = {"this": False}
     
    @@ -37566,8 +38155,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3937class CurrentUser(Func):
    -3938    arg_types = {"this": False}
    +            
    3992class CurrentUser(Func):
    +3993    arg_types = {"this": False}
     
    @@ -37653,8 +38242,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3941class DateAdd(Func, TimeUnit):
    -3942    arg_types = {"this": True, "expression": True, "unit": False}
    +            
    3996class DateAdd(Func, TimeUnit):
    +3997    arg_types = {"this": True, "expression": True, "unit": False}
     
    @@ -37743,8 +38332,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3945class DateSub(Func, TimeUnit):
    -3946    arg_types = {"this": True, "expression": True, "unit": False}
    +            
    4000class DateSub(Func, TimeUnit):
    +4001    arg_types = {"this": True, "expression": True, "unit": False}
     
    @@ -37833,9 +38422,9 @@ name is set to the expression's class name transformed to snake case.
    -
    3949class DateDiff(Func, TimeUnit):
    -3950    _sql_names = ["DATEDIFF", "DATE_DIFF"]
    -3951    arg_types = {"this": True, "expression": True, "unit": False}
    +            
    4004class DateDiff(Func, TimeUnit):
    +4005    _sql_names = ["DATEDIFF", "DATE_DIFF"]
    +4006    arg_types = {"this": True, "expression": True, "unit": False}
     
    @@ -37924,8 +38513,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3954class DateTrunc(Func):
    -3955    arg_types = {"unit": True, "this": True, "zone": False}
    +            
    4009class DateTrunc(Func):
    +4010    arg_types = {"unit": True, "this": True, "zone": False}
     
    @@ -38011,8 +38600,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3958class DatetimeAdd(Func, TimeUnit):
    -3959    arg_types = {"this": True, "expression": True, "unit": False}
    +            
    4013class DatetimeAdd(Func, TimeUnit):
    +4014    arg_types = {"this": True, "expression": True, "unit": False}
     
    @@ -38101,8 +38690,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3962class DatetimeSub(Func, TimeUnit):
    -3963    arg_types = {"this": True, "expression": True, "unit": False}
    +            
    4017class DatetimeSub(Func, TimeUnit):
    +4018    arg_types = {"this": True, "expression": True, "unit": False}
     
    @@ -38191,8 +38780,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3966class DatetimeDiff(Func, TimeUnit):
    -3967    arg_types = {"this": True, "expression": True, "unit": False}
    +            
    4021class DatetimeDiff(Func, TimeUnit):
    +4022    arg_types = {"this": True, "expression": True, "unit": False}
     
    @@ -38281,8 +38870,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3970class DatetimeTrunc(Func, TimeUnit):
    -3971    arg_types = {"this": True, "unit": True, "zone": False}
    +            
    4025class DatetimeTrunc(Func, TimeUnit):
    +4026    arg_types = {"this": True, "unit": True, "zone": False}
     
    @@ -38371,8 +38960,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3974class DayOfWeek(Func):
    -3975    _sql_names = ["DAY_OF_WEEK", "DAYOFWEEK"]
    +            
    4029class DayOfWeek(Func):
    +4030    _sql_names = ["DAY_OF_WEEK", "DAYOFWEEK"]
     
    @@ -38458,8 +39047,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3978class DayOfMonth(Func):
    -3979    _sql_names = ["DAY_OF_MONTH", "DAYOFMONTH"]
    +            
    4033class DayOfMonth(Func):
    +4034    _sql_names = ["DAY_OF_MONTH", "DAYOFMONTH"]
     
    @@ -38545,8 +39134,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3982class DayOfYear(Func):
    -3983    _sql_names = ["DAY_OF_YEAR", "DAYOFYEAR"]
    +            
    4037class DayOfYear(Func):
    +4038    _sql_names = ["DAY_OF_YEAR", "DAYOFYEAR"]
     
    @@ -38632,8 +39221,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3986class WeekOfYear(Func):
    -3987    _sql_names = ["WEEK_OF_YEAR", "WEEKOFYEAR"]
    +            
    4041class WeekOfYear(Func):
    +4042    _sql_names = ["WEEK_OF_YEAR", "WEEKOFYEAR"]
     
    @@ -38719,8 +39308,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3990class LastDateOfMonth(Func):
    -3991    pass
    +            
    4045class LastDateOfMonth(Func):
    +4046    pass
     
    @@ -38806,8 +39395,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3994class Extract(Func):
    -3995    arg_types = {"this": True, "expression": True}
    +            
    4049class Extract(Func):
    +4050    arg_types = {"this": True, "expression": True}
     
    @@ -38893,8 +39482,8 @@ name is set to the expression's class name transformed to snake case.
    -
    3998class TimestampAdd(Func, TimeUnit):
    -3999    arg_types = {"this": True, "expression": True, "unit": False}
    +            
    4053class TimestampAdd(Func, TimeUnit):
    +4054    arg_types = {"this": True, "expression": True, "unit": False}
     
    @@ -38983,8 +39572,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4002class TimestampSub(Func, TimeUnit):
    -4003    arg_types = {"this": True, "expression": True, "unit": False}
    +            
    4057class TimestampSub(Func, TimeUnit):
    +4058    arg_types = {"this": True, "expression": True, "unit": False}
     
    @@ -39073,8 +39662,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4006class TimestampDiff(Func, TimeUnit):
    -4007    arg_types = {"this": True, "expression": True, "unit": False}
    +            
    4061class TimestampDiff(Func, TimeUnit):
    +4062    arg_types = {"this": True, "expression": True, "unit": False}
     
    @@ -39163,8 +39752,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4010class TimestampTrunc(Func, TimeUnit):
    -4011    arg_types = {"this": True, "unit": True, "zone": False}
    +            
    4065class TimestampTrunc(Func, TimeUnit):
    +4066    arg_types = {"this": True, "unit": True, "zone": False}
     
    @@ -39253,8 +39842,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4014class TimeAdd(Func, TimeUnit):
    -4015    arg_types = {"this": True, "expression": True, "unit": False}
    +            
    4069class TimeAdd(Func, TimeUnit):
    +4070    arg_types = {"this": True, "expression": True, "unit": False}
     
    @@ -39343,8 +39932,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4018class TimeSub(Func, TimeUnit):
    -4019    arg_types = {"this": True, "expression": True, "unit": False}
    +            
    4073class TimeSub(Func, TimeUnit):
    +4074    arg_types = {"this": True, "expression": True, "unit": False}
     
    @@ -39433,8 +40022,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4022class TimeDiff(Func, TimeUnit):
    -4023    arg_types = {"this": True, "expression": True, "unit": False}
    +            
    4077class TimeDiff(Func, TimeUnit):
    +4078    arg_types = {"this": True, "expression": True, "unit": False}
     
    @@ -39523,8 +40112,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4026class TimeTrunc(Func, TimeUnit):
    -4027    arg_types = {"this": True, "unit": True, "zone": False}
    +            
    4081class TimeTrunc(Func, TimeUnit):
    +4082    arg_types = {"this": True, "unit": True, "zone": False}
     
    @@ -39613,9 +40202,9 @@ name is set to the expression's class name transformed to snake case.
    -
    4030class DateFromParts(Func):
    -4031    _sql_names = ["DATEFROMPARTS"]
    -4032    arg_types = {"year": True, "month": True, "day": True}
    +            
    4085class DateFromParts(Func):
    +4086    _sql_names = ["DATEFROMPARTS"]
    +4087    arg_types = {"year": True, "month": True, "day": True}
     
    @@ -39701,8 +40290,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4035class DateStrToDate(Func):
    -4036    pass
    +            
    4090class DateStrToDate(Func):
    +4091    pass
     
    @@ -39788,8 +40377,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4039class DateToDateStr(Func):
    -4040    pass
    +            
    4094class DateToDateStr(Func):
    +4095    pass
     
    @@ -39875,8 +40464,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4043class DateToDi(Func):
    -4044    pass
    +            
    4098class DateToDi(Func):
    +4099    pass
     
    @@ -39947,6 +40536,94 @@ name is set to the expression's class name transformed to snake case.
    neq
    rlike
    +
    + + +
    +
    + +
    + + class + Date(Func): + + + +
    + +
    4102class Date(Func):
    +4103    arg_types = {"expressions": True}
    +4104    is_var_len_args = True
    +
    + + + + + @@ -39962,8 +40639,8 @@ name is set to the expression's class name transformed to snake case. -
    4047class Day(Func):
    -4048    pass
    +            
    4107class Day(Func):
    +4108    pass
     
    @@ -40049,8 +40726,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4051class Decode(Func):
    -4052    arg_types = {"this": True, "charset": True, "replace": False}
    +            
    4111class Decode(Func):
    +4112    arg_types = {"this": True, "charset": True, "replace": False}
     
    @@ -40136,8 +40813,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4055class DiToDate(Func):
    -4056    pass
    +            
    4115class DiToDate(Func):
    +4116    pass
     
    @@ -40223,8 +40900,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4059class Encode(Func):
    -4060    arg_types = {"this": True, "charset": True}
    +            
    4119class Encode(Func):
    +4120    arg_types = {"this": True, "charset": True}
     
    @@ -40310,8 +40987,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4063class Exp(Func):
    -4064    pass
    +            
    4123class Exp(Func):
    +4124    pass
     
    @@ -40397,8 +41074,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4067class Explode(Func):
    -4068    pass
    +            
    4127class Explode(Func):
    +4128    pass
     
    @@ -40484,8 +41161,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4071class Floor(Func):
    -4072    arg_types = {"this": True, "decimals": False}
    +            
    4131class Floor(Func):
    +4132    arg_types = {"this": True, "decimals": False}
     
    @@ -40571,8 +41248,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4075class FromBase64(Func):
    -4076    pass
    +            
    4135class FromBase64(Func):
    +4136    pass
     
    @@ -40658,8 +41335,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4079class ToBase64(Func):
    -4080    pass
    +            
    4139class ToBase64(Func):
    +4140    pass
     
    @@ -40745,9 +41422,9 @@ name is set to the expression's class name transformed to snake case.
    -
    4083class Greatest(Func):
    -4084    arg_types = {"this": True, "expressions": False}
    -4085    is_var_len_args = True
    +            
    4143class Greatest(Func):
    +4144    arg_types = {"this": True, "expressions": False}
    +4145    is_var_len_args = True
     
    @@ -40833,8 +41510,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4088class GroupConcat(Func):
    -4089    arg_types = {"this": True, "separator": False}
    +            
    4148class GroupConcat(Func):
    +4149    arg_types = {"this": True, "separator": False}
     
    @@ -40920,8 +41597,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4092class Hex(Func):
    -4093    pass
    +            
    4152class Hex(Func):
    +4153    pass
     
    @@ -41007,8 +41684,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4096class If(Func):
    -4097    arg_types = {"this": True, "true": True, "false": False}
    +            
    4156class If(Func):
    +4157    arg_types = {"this": True, "true": True, "false": False}
     
    @@ -41079,94 +41756,6 @@ name is set to the expression's class name transformed to snake case.
    neq
    rlike
    -
    - - -
    -
    - -
    - - class - IfNull(Func): - - - -
    - -
    4100class IfNull(Func):
    -4101    arg_types = {"this": True, "expression": False}
    -4102    _sql_names = ["IFNULL", "NVL"]
    -
    - - - - - @@ -41182,8 +41771,8 @@ name is set to the expression's class name transformed to snake case. -
    4105class Initcap(Func):
    -4106    arg_types = {"this": True, "expression": False}
    +            
    4160class Initcap(Func):
    +4161    arg_types = {"this": True, "expression": False}
     
    @@ -41269,8 +41858,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4109class JSONKeyValue(Expression):
    -4110    arg_types = {"this": True, "expression": True}
    +            
    4164class JSONKeyValue(Expression):
    +4165    arg_types = {"this": True, "expression": True}
     
    @@ -41334,15 +41923,15 @@ name is set to the expression's class name transformed to snake case.
    -
    4113class JSONObject(Func):
    -4114    arg_types = {
    -4115        "expressions": False,
    -4116        "null_handling": False,
    -4117        "unique_keys": False,
    -4118        "return_type": False,
    -4119        "format_json": False,
    -4120        "encoding": False,
    -4121    }
    +            
    4168class JSONObject(Func):
    +4169    arg_types = {
    +4170        "expressions": False,
    +4171        "null_handling": False,
    +4172        "unique_keys": False,
    +4173        "return_type": False,
    +4174        "format_json": False,
    +4175        "encoding": False,
    +4176    }
     
    @@ -41428,8 +42017,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4124class OpenJSONColumnDef(Expression):
    -4125    arg_types = {"this": True, "kind": True, "path": False, "as_json": False}
    +            
    4179class OpenJSONColumnDef(Expression):
    +4180    arg_types = {"this": True, "kind": True, "path": False, "as_json": False}
     
    @@ -41493,8 +42082,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4128class OpenJSON(Func):
    -4129    arg_types = {"this": True, "path": False, "expressions": False}
    +            
    4183class OpenJSON(Func):
    +4184    arg_types = {"this": True, "path": False, "expressions": False}
     
    @@ -41580,8 +42169,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4132class JSONBContains(Binary):
    -4133    _sql_names = ["JSONB_CONTAINS"]
    +            
    4187class JSONBContains(Binary):
    +4188    _sql_names = ["JSONB_CONTAINS"]
     
    @@ -41660,8 +42249,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4136class JSONExtract(Binary, Func):
    -4137    _sql_names = ["JSON_EXTRACT"]
    +            
    4191class JSONExtract(Binary, Func):
    +4192    _sql_names = ["JSON_EXTRACT"]
     
    @@ -41747,8 +42336,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4140class JSONExtractScalar(JSONExtract):
    -4141    _sql_names = ["JSON_EXTRACT_SCALAR"]
    +            
    4195class JSONExtractScalar(JSONExtract):
    +4196    _sql_names = ["JSON_EXTRACT_SCALAR"]
     
    @@ -41834,8 +42423,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4144class JSONBExtract(JSONExtract):
    -4145    _sql_names = ["JSONB_EXTRACT"]
    +            
    4199class JSONBExtract(JSONExtract):
    +4200    _sql_names = ["JSONB_EXTRACT"]
     
    @@ -41921,8 +42510,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4148class JSONBExtractScalar(JSONExtract):
    -4149    _sql_names = ["JSONB_EXTRACT_SCALAR"]
    +            
    4203class JSONBExtractScalar(JSONExtract):
    +4204    _sql_names = ["JSONB_EXTRACT_SCALAR"]
     
    @@ -42008,9 +42597,9 @@ name is set to the expression's class name transformed to snake case.
    -
    4152class JSONFormat(Func):
    -4153    arg_types = {"this": False, "options": False}
    -4154    _sql_names = ["JSON_FORMAT"]
    +            
    4207class JSONFormat(Func):
    +4208    arg_types = {"this": False, "options": False}
    +4209    _sql_names = ["JSON_FORMAT"]
     
    @@ -42096,9 +42685,9 @@ name is set to the expression's class name transformed to snake case.
    -
    4157class Least(Func):
    -4158    arg_types = {"expressions": False}
    -4159    is_var_len_args = True
    +            
    4212class Least(Func):
    +4213    arg_types = {"expressions": False}
    +4214    is_var_len_args = True
     
    @@ -42184,8 +42773,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4162class Left(Func):
    -4163    arg_types = {"this": True, "expression": True}
    +            
    4217class Left(Func):
    +4218    arg_types = {"this": True, "expression": True}
     
    @@ -42271,8 +42860,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4166class Right(Func):
    -4167    arg_types = {"this": True, "expression": True}
    +            
    4221class Right(Func):
    +4222    arg_types = {"this": True, "expression": True}
     
    @@ -42358,8 +42947,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4170class Length(Func):
    -4171    pass
    +            
    4225class Length(Func):
    +4226    _sql_names = ["LENGTH", "LEN"]
     
    @@ -42445,14 +43034,14 @@ name is set to the expression's class name transformed to snake case.
    -
    4174class Levenshtein(Func):
    -4175    arg_types = {
    -4176        "this": True,
    -4177        "expression": False,
    -4178        "ins_cost": False,
    -4179        "del_cost": False,
    -4180        "sub_cost": False,
    -4181    }
    +            
    4229class Levenshtein(Func):
    +4230    arg_types = {
    +4231        "this": True,
    +4232        "expression": False,
    +4233        "ins_cost": False,
    +4234        "del_cost": False,
    +4235        "sub_cost": False,
    +4236    }
     
    @@ -42538,8 +43127,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4184class Ln(Func):
    -4185    pass
    +            
    4239class Ln(Func):
    +4240    pass
     
    @@ -42625,8 +43214,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4188class Log(Func):
    -4189    arg_types = {"this": True, "expression": False}
    +            
    4243class Log(Func):
    +4244    arg_types = {"this": True, "expression": False}
     
    @@ -42712,8 +43301,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4192class Log2(Func):
    -4193    pass
    +            
    4247class Log2(Func):
    +4248    pass
     
    @@ -42799,8 +43388,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4196class Log10(Func):
    -4197    pass
    +            
    4251class Log10(Func):
    +4252    pass
     
    @@ -42886,8 +43475,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4200class LogicalOr(AggFunc):
    -4201    _sql_names = ["LOGICAL_OR", "BOOL_OR", "BOOLOR_AGG"]
    +            
    4255class LogicalOr(AggFunc):
    +4256    _sql_names = ["LOGICAL_OR", "BOOL_OR", "BOOLOR_AGG"]
     
    @@ -42973,8 +43562,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4204class LogicalAnd(AggFunc):
    -4205    _sql_names = ["LOGICAL_AND", "BOOL_AND", "BOOLAND_AGG"]
    +            
    4259class LogicalAnd(AggFunc):
    +4260    _sql_names = ["LOGICAL_AND", "BOOL_AND", "BOOLAND_AGG"]
     
    @@ -43060,8 +43649,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4208class Lower(Func):
    -4209    _sql_names = ["LOWER", "LCASE"]
    +            
    4263class Lower(Func):
    +4264    _sql_names = ["LOWER", "LCASE"]
     
    @@ -43147,8 +43736,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4212class Map(Func):
    -4213    arg_types = {"keys": False, "values": False}
    +            
    4267class Map(Func):
    +4268    arg_types = {"keys": False, "values": False}
     
    @@ -43234,8 +43823,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4216class StarMap(Func):
    -4217    pass
    +            
    4271class StarMap(Func):
    +4272    pass
     
    @@ -43321,17 +43910,17 @@ name is set to the expression's class name transformed to snake case.
    -
    4220class VarMap(Func):
    -4221    arg_types = {"keys": True, "values": True}
    -4222    is_var_len_args = True
    -4223
    -4224    @property
    -4225    def keys(self) -> t.List[Expression]:
    -4226        return self.args["keys"].expressions
    -4227
    -4228    @property
    -4229    def values(self) -> t.List[Expression]:
    -4230        return self.args["values"].expressions
    +            
    4275class VarMap(Func):
    +4276    arg_types = {"keys": True, "values": True}
    +4277    is_var_len_args = True
    +4278
    +4279    @property
    +4280    def keys(self) -> t.List[Expression]:
    +4281        return self.args["keys"].expressions
    +4282
    +4283    @property
    +4284    def values(self) -> t.List[Expression]:
    +4285        return self.args["values"].expressions
     
    @@ -43417,8 +44006,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4234class MatchAgainst(Func):
    -4235    arg_types = {"this": True, "expressions": True, "modifier": False}
    +            
    4289class MatchAgainst(Func):
    +4290    arg_types = {"this": True, "expressions": True, "modifier": False}
     
    @@ -43504,9 +44093,9 @@ name is set to the expression's class name transformed to snake case.
    -
    4238class Max(AggFunc):
    -4239    arg_types = {"this": True, "expressions": False}
    -4240    is_var_len_args = True
    +            
    4293class Max(AggFunc):
    +4294    arg_types = {"this": True, "expressions": False}
    +4295    is_var_len_args = True
     
    @@ -43592,8 +44181,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4243class MD5(Func):
    -4244    _sql_names = ["MD5"]
    +            
    4298class MD5(Func):
    +4299    _sql_names = ["MD5"]
     
    @@ -43679,9 +44268,9 @@ name is set to the expression's class name transformed to snake case.
    -
    4247class Min(AggFunc):
    -4248    arg_types = {"this": True, "expressions": False}
    -4249    is_var_len_args = True
    +            
    4302class Min(AggFunc):
    +4303    arg_types = {"this": True, "expressions": False}
    +4304    is_var_len_args = True
     
    @@ -43767,8 +44356,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4252class Month(Func):
    -4253    pass
    +            
    4307class Month(Func):
    +4308    pass
     
    @@ -43854,8 +44443,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4256class Nvl2(Func):
    -4257    arg_types = {"this": True, "true": True, "false": False}
    +            
    4311class Nvl2(Func):
    +4312    arg_types = {"this": True, "true": True, "false": False}
     
    @@ -43941,8 +44530,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4260class Posexplode(Func):
    -4261    pass
    +            
    4315class Posexplode(Func):
    +4316    pass
     
    @@ -44028,8 +44617,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4264class Pow(Binary, Func):
    -4265    _sql_names = ["POWER", "POW"]
    +            
    4319class Pow(Binary, Func):
    +4320    _sql_names = ["POWER", "POW"]
     
    @@ -44115,8 +44704,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4268class PercentileCont(AggFunc):
    -4269    arg_types = {"this": True, "expression": False}
    +            
    4323class PercentileCont(AggFunc):
    +4324    arg_types = {"this": True, "expression": False}
     
    @@ -44202,8 +44791,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4272class PercentileDisc(AggFunc):
    -4273    arg_types = {"this": True, "expression": False}
    +            
    4327class PercentileDisc(AggFunc):
    +4328    arg_types = {"this": True, "expression": False}
     
    @@ -44289,8 +44878,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4276class Quantile(AggFunc):
    -4277    arg_types = {"this": True, "quantile": True}
    +            
    4331class Quantile(AggFunc):
    +4332    arg_types = {"this": True, "quantile": True}
     
    @@ -44376,8 +44965,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4280class ApproxQuantile(Quantile):
    -4281    arg_types = {"this": True, "quantile": True, "accuracy": False, "weight": False}
    +            
    4335class ApproxQuantile(Quantile):
    +4336    arg_types = {"this": True, "quantile": True, "accuracy": False, "weight": False}
     
    @@ -44463,8 +45052,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4284class RangeN(Func):
    -4285    arg_types = {"this": True, "expressions": True, "each": False}
    +            
    4339class RangeN(Func):
    +4340    arg_types = {"this": True, "expressions": True, "each": False}
     
    @@ -44550,10 +45139,10 @@ name is set to the expression's class name transformed to snake case.
    -
    4288class ReadCSV(Func):
    -4289    _sql_names = ["READ_CSV"]
    -4290    is_var_len_args = True
    -4291    arg_types = {"this": True, "expressions": False}
    +            
    4343class ReadCSV(Func):
    +4344    _sql_names = ["READ_CSV"]
    +4345    is_var_len_args = True
    +4346    arg_types = {"this": True, "expressions": False}
     
    @@ -44639,8 +45228,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4294class Reduce(Func):
    -4295    arg_types = {"this": True, "initial": True, "merge": True, "finish": False}
    +            
    4349class Reduce(Func):
    +4350    arg_types = {"this": True, "initial": True, "merge": True, "finish": False}
     
    @@ -44726,14 +45315,14 @@ name is set to the expression's class name transformed to snake case.
    -
    4298class RegexpExtract(Func):
    -4299    arg_types = {
    -4300        "this": True,
    -4301        "expression": True,
    -4302        "position": False,
    -4303        "occurrence": False,
    -4304        "group": False,
    -4305    }
    +            
    4353class RegexpExtract(Func):
    +4354    arg_types = {
    +4355        "this": True,
    +4356        "expression": True,
    +4357        "position": False,
    +4358        "occurrence": False,
    +4359        "group": False,
    +4360    }
     
    @@ -44819,8 +45408,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4308class RegexpLike(Func):
    -4309    arg_types = {"this": True, "expression": True, "flag": False}
    +            
    4363class RegexpLike(Func):
    +4364    arg_types = {"this": True, "expression": True, "flag": False}
     
    @@ -44906,8 +45495,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4312class RegexpILike(Func):
    -4313    arg_types = {"this": True, "expression": True, "flag": False}
    +            
    4367class RegexpILike(Func):
    +4368    arg_types = {"this": True, "expression": True, "flag": False}
     
    @@ -44993,8 +45582,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4318class RegexpSplit(Func):
    -4319    arg_types = {"this": True, "expression": True, "limit": False}
    +            
    4373class RegexpSplit(Func):
    +4374    arg_types = {"this": True, "expression": True, "limit": False}
     
    @@ -45080,8 +45669,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4322class Repeat(Func):
    -4323    arg_types = {"this": True, "times": True}
    +            
    4377class Repeat(Func):
    +4378    arg_types = {"this": True, "times": True}
     
    @@ -45167,8 +45756,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4326class Round(Func):
    -4327    arg_types = {"this": True, "decimals": False}
    +            
    4381class Round(Func):
    +4382    arg_types = {"this": True, "decimals": False}
     
    @@ -45254,8 +45843,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4330class RowNumber(Func):
    -4331    arg_types: t.Dict[str, t.Any] = {}
    +            
    4385class RowNumber(Func):
    +4386    arg_types: t.Dict[str, t.Any] = {}
     
    @@ -45341,8 +45930,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4334class SafeDivide(Func):
    -4335    arg_types = {"this": True, "expression": True}
    +            
    4389class SafeDivide(Func):
    +4390    arg_types = {"this": True, "expression": True}
     
    @@ -45428,8 +46017,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4338class SetAgg(AggFunc):
    -4339    pass
    +            
    4393class SetAgg(AggFunc):
    +4394    pass
     
    @@ -45515,8 +46104,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4342class SHA(Func):
    -4343    _sql_names = ["SHA", "SHA1"]
    +            
    4397class SHA(Func):
    +4398    _sql_names = ["SHA", "SHA1"]
     
    @@ -45602,9 +46191,9 @@ name is set to the expression's class name transformed to snake case.
    -
    4346class SHA2(Func):
    -4347    _sql_names = ["SHA2"]
    -4348    arg_types = {"this": True, "length": False}
    +            
    4401class SHA2(Func):
    +4402    _sql_names = ["SHA2"]
    +4403    arg_types = {"this": True, "length": False}
     
    @@ -45690,8 +46279,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4351class SortArray(Func):
    -4352    arg_types = {"this": True, "asc": False}
    +            
    4406class SortArray(Func):
    +4407    arg_types = {"this": True, "asc": False}
     
    @@ -45777,8 +46366,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4355class Split(Func):
    -4356    arg_types = {"this": True, "expression": True, "limit": False}
    +            
    4410class Split(Func):
    +4411    arg_types = {"this": True, "expression": True, "limit": False}
     
    @@ -45864,8 +46453,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4361class Substring(Func):
    -4362    arg_types = {"this": True, "start": False, "length": False}
    +            
    4416class Substring(Func):
    +4417    arg_types = {"this": True, "start": False, "length": False}
     
    @@ -45951,8 +46540,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4365class StandardHash(Func):
    -4366    arg_types = {"this": True, "expression": False}
    +            
    4420class StandardHash(Func):
    +4421    arg_types = {"this": True, "expression": False}
     
    @@ -46038,13 +46627,13 @@ name is set to the expression's class name transformed to snake case.
    -
    4369class StrPosition(Func):
    -4370    arg_types = {
    -4371        "this": True,
    -4372        "substr": True,
    -4373        "position": False,
    -4374        "instance": False,
    -4375    }
    +            
    4424class StrPosition(Func):
    +4425    arg_types = {
    +4426        "this": True,
    +4427        "substr": True,
    +4428        "position": False,
    +4429        "instance": False,
    +4430    }
     
    @@ -46130,8 +46719,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4378class StrToDate(Func):
    -4379    arg_types = {"this": True, "format": True}
    +            
    4433class StrToDate(Func):
    +4434    arg_types = {"this": True, "format": True}
     
    @@ -46217,8 +46806,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4382class StrToTime(Func):
    -4383    arg_types = {"this": True, "format": True}
    +            
    4437class StrToTime(Func):
    +4438    arg_types = {"this": True, "format": True}
     
    @@ -46304,8 +46893,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4388class StrToUnix(Func):
    -4389    arg_types = {"this": False, "format": False}
    +            
    4443class StrToUnix(Func):
    +4444    arg_types = {"this": False, "format": False}
     
    @@ -46391,8 +46980,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4392class NumberToStr(Func):
    -4393    arg_types = {"this": True, "format": True}
    +            
    4447class NumberToStr(Func):
    +4448    arg_types = {"this": True, "format": True}
     
    @@ -46463,6 +47052,93 @@ name is set to the expression's class name transformed to snake case.
    neq
    rlike
    +
    + + +
    +
    + +
    + + class + FromBase(Func): + + + +
    + +
    4451class FromBase(Func):
    +4452    arg_types = {"this": True, "expression": True}
    +
    + + + + + @@ -46478,9 +47154,9 @@ name is set to the expression's class name transformed to snake case. -
    4396class Struct(Func):
    -4397    arg_types = {"expressions": True}
    -4398    is_var_len_args = True
    +            
    4455class Struct(Func):
    +4456    arg_types = {"expressions": True}
    +4457    is_var_len_args = True
     
    @@ -46566,8 +47242,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4401class StructExtract(Func):
    -4402    arg_types = {"this": True, "expression": True}
    +            
    4460class StructExtract(Func):
    +4461    arg_types = {"this": True, "expression": True}
     
    @@ -46653,8 +47329,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4405class Sum(AggFunc):
    -4406    pass
    +            
    4464class Sum(AggFunc):
    +4465    pass
     
    @@ -46740,8 +47416,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4409class Sqrt(Func):
    -4410    pass
    +            
    4468class Sqrt(Func):
    +4469    pass
     
    @@ -46827,8 +47503,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4413class Stddev(AggFunc):
    -4414    pass
    +            
    4472class Stddev(AggFunc):
    +4473    pass
     
    @@ -46914,8 +47590,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4417class StddevPop(AggFunc):
    -4418    pass
    +            
    4476class StddevPop(AggFunc):
    +4477    pass
     
    @@ -47001,8 +47677,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4421class StddevSamp(AggFunc):
    -4422    pass
    +            
    4480class StddevSamp(AggFunc):
    +4481    pass
     
    @@ -47088,8 +47764,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4425class TimeToStr(Func):
    -4426    arg_types = {"this": True, "format": True}
    +            
    4484class TimeToStr(Func):
    +4485    arg_types = {"this": True, "format": True}
     
    @@ -47175,8 +47851,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4429class TimeToTimeStr(Func):
    -4430    pass
    +            
    4488class TimeToTimeStr(Func):
    +4489    pass
     
    @@ -47262,8 +47938,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4433class TimeToUnix(Func):
    -4434    pass
    +            
    4492class TimeToUnix(Func):
    +4493    pass
     
    @@ -47349,8 +48025,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4437class TimeStrToDate(Func):
    -4438    pass
    +            
    4496class TimeStrToDate(Func):
    +4497    pass
     
    @@ -47436,8 +48112,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4441class TimeStrToTime(Func):
    -4442    pass
    +            
    4500class TimeStrToTime(Func):
    +4501    pass
     
    @@ -47523,8 +48199,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4445class TimeStrToUnix(Func):
    -4446    pass
    +            
    4504class TimeStrToUnix(Func):
    +4505    pass
     
    @@ -47610,13 +48286,13 @@ name is set to the expression's class name transformed to snake case.
    -
    4449class Trim(Func):
    -4450    arg_types = {
    -4451        "this": True,
    -4452        "expression": False,
    -4453        "position": False,
    -4454        "collation": False,
    -4455    }
    +            
    4508class Trim(Func):
    +4509    arg_types = {
    +4510        "this": True,
    +4511        "expression": False,
    +4512        "position": False,
    +4513        "collation": False,
    +4514    }
     
    @@ -47702,8 +48378,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4458class TsOrDsAdd(Func, TimeUnit):
    -4459    arg_types = {"this": True, "expression": True, "unit": False}
    +            
    4517class TsOrDsAdd(Func, TimeUnit):
    +4518    arg_types = {"this": True, "expression": True, "unit": False}
     
    @@ -47792,8 +48468,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4462class TsOrDsToDateStr(Func):
    -4463    pass
    +            
    4521class TsOrDsToDateStr(Func):
    +4522    pass
     
    @@ -47879,8 +48555,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4466class TsOrDsToDate(Func):
    -4467    arg_types = {"this": True, "format": False}
    +            
    4525class TsOrDsToDate(Func):
    +4526    arg_types = {"this": True, "format": False}
     
    @@ -47966,8 +48642,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4470class TsOrDiToDi(Func):
    -4471    pass
    +            
    4529class TsOrDiToDi(Func):
    +4530    pass
     
    @@ -48053,8 +48729,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4474class Unhex(Func):
    -4475    pass
    +            
    4533class Unhex(Func):
    +4534    pass
     
    @@ -48140,8 +48816,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4478class UnixToStr(Func):
    -4479    arg_types = {"this": True, "format": False}
    +            
    4537class UnixToStr(Func):
    +4538    arg_types = {"this": True, "format": False}
     
    @@ -48227,12 +48903,12 @@ name is set to the expression's class name transformed to snake case.
    -
    4484class UnixToTime(Func):
    -4485    arg_types = {"this": True, "scale": False, "zone": False, "hours": False, "minutes": False}
    -4486
    -4487    SECONDS = Literal.string("seconds")
    -4488    MILLIS = Literal.string("millis")
    -4489    MICROS = Literal.string("micros")
    +            
    4543class UnixToTime(Func):
    +4544    arg_types = {"this": True, "scale": False, "zone": False, "hours": False, "minutes": False}
    +4545
    +4546    SECONDS = Literal.string("seconds")
    +4547    MILLIS = Literal.string("millis")
    +4548    MICROS = Literal.string("micros")
     
    @@ -48318,8 +48994,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4492class UnixToTimeStr(Func):
    -4493    pass
    +            
    4551class UnixToTimeStr(Func):
    +4552    pass
     
    @@ -48405,8 +49081,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4496class Upper(Func):
    -4497    _sql_names = ["UPPER", "UCASE"]
    +            
    4555class Upper(Func):
    +4556    _sql_names = ["UPPER", "UCASE"]
     
    @@ -48492,8 +49168,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4500class Variance(AggFunc):
    -4501    _sql_names = ["VARIANCE", "VARIANCE_SAMP", "VAR_SAMP"]
    +            
    4559class Variance(AggFunc):
    +4560    _sql_names = ["VARIANCE", "VARIANCE_SAMP", "VAR_SAMP"]
     
    @@ -48579,8 +49255,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4504class VariancePop(AggFunc):
    -4505    _sql_names = ["VARIANCE_POP", "VAR_POP"]
    +            
    4563class VariancePop(AggFunc):
    +4564    _sql_names = ["VARIANCE_POP", "VAR_POP"]
     
    @@ -48666,8 +49342,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4508class Week(Func):
    -4509    arg_types = {"this": True, "mode": False}
    +            
    4567class Week(Func):
    +4568    arg_types = {"this": True, "mode": False}
     
    @@ -48753,8 +49429,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4512class XMLTable(Func):
    -4513    arg_types = {"this": True, "passing": False, "columns": False, "by_ref": False}
    +            
    4571class XMLTable(Func):
    +4572    arg_types = {"this": True, "passing": False, "columns": False, "by_ref": False}
     
    @@ -48840,8 +49516,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4516class Year(Func):
    -4517    pass
    +            
    4575class Year(Func):
    +4576    pass
     
    @@ -48927,8 +49603,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4520class Use(Expression):
    -4521    arg_types = {"this": True, "kind": False}
    +            
    4579class Use(Expression):
    +4580    arg_types = {"this": True, "kind": False}
     
    @@ -48992,8 +49668,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4524class Merge(Expression):
    -4525    arg_types = {"this": True, "using": True, "on": True, "expressions": True}
    +            
    4583class Merge(Expression):
    +4584    arg_types = {"this": True, "using": True, "on": True, "expressions": True}
     
    @@ -49057,8 +49733,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4528class When(Func):
    -4529    arg_types = {"matched": True, "source": False, "condition": False, "then": True}
    +            
    4587class When(Func):
    +4588    arg_types = {"matched": True, "source": False, "condition": False, "then": True}
     
    @@ -49144,8 +49820,8 @@ name is set to the expression's class name transformed to snake case.
    -
    4534class NextValueFor(Func):
    -4535    arg_types = {"this": True, "order": False}
    +            
    4593class NextValueFor(Func):
    +4594    arg_types = {"this": True, "order": False}
     
    @@ -49231,52 +49907,52 @@ name is set to the expression's class name transformed to snake case.
    -
    4572def maybe_parse(
    -4573    sql_or_expression: ExpOrStr,
    -4574    *,
    -4575    into: t.Optional[IntoType] = None,
    -4576    dialect: DialectType = None,
    -4577    prefix: t.Optional[str] = None,
    -4578    copy: bool = False,
    -4579    **opts,
    -4580) -> Expression:
    -4581    """Gracefully handle a possible string or expression.
    -4582
    -4583    Example:
    -4584        >>> maybe_parse("1")
    -4585        (LITERAL this: 1, is_string: False)
    -4586        >>> maybe_parse(to_identifier("x"))
    -4587        (IDENTIFIER this: x, quoted: False)
    -4588
    -4589    Args:
    -4590        sql_or_expression: the SQL code string or an expression
    -4591        into: the SQLGlot Expression to parse into
    -4592        dialect: the dialect used to parse the input expressions (in the case that an
    -4593            input expression is a SQL string).
    -4594        prefix: a string to prefix the sql with before it gets parsed
    -4595            (automatically includes a space)
    -4596        copy: whether or not to copy the expression.
    -4597        **opts: other options to use to parse the input expressions (again, in the case
    -4598            that an input expression is a SQL string).
    -4599
    -4600    Returns:
    -4601        Expression: the parsed or given expression.
    -4602    """
    -4603    if isinstance(sql_or_expression, Expression):
    -4604        if copy:
    -4605            return sql_or_expression.copy()
    -4606        return sql_or_expression
    -4607
    -4608    if sql_or_expression is None:
    -4609        raise ParseError(f"SQL cannot be None")
    -4610
    -4611    import sqlglot
    -4612
    -4613    sql = str(sql_or_expression)
    -4614    if prefix:
    -4615        sql = f"{prefix} {sql}"
    -4616
    -4617    return sqlglot.parse_one(sql, read=dialect, into=into, **opts)
    +            
    4631def maybe_parse(
    +4632    sql_or_expression: ExpOrStr,
    +4633    *,
    +4634    into: t.Optional[IntoType] = None,
    +4635    dialect: DialectType = None,
    +4636    prefix: t.Optional[str] = None,
    +4637    copy: bool = False,
    +4638    **opts,
    +4639) -> Expression:
    +4640    """Gracefully handle a possible string or expression.
    +4641
    +4642    Example:
    +4643        >>> maybe_parse("1")
    +4644        (LITERAL this: 1, is_string: False)
    +4645        >>> maybe_parse(to_identifier("x"))
    +4646        (IDENTIFIER this: x, quoted: False)
    +4647
    +4648    Args:
    +4649        sql_or_expression: the SQL code string or an expression
    +4650        into: the SQLGlot Expression to parse into
    +4651        dialect: the dialect used to parse the input expressions (in the case that an
    +4652            input expression is a SQL string).
    +4653        prefix: a string to prefix the sql with before it gets parsed
    +4654            (automatically includes a space)
    +4655        copy: whether or not to copy the expression.
    +4656        **opts: other options to use to parse the input expressions (again, in the case
    +4657            that an input expression is a SQL string).
    +4658
    +4659    Returns:
    +4660        Expression: the parsed or given expression.
    +4661    """
    +4662    if isinstance(sql_or_expression, Expression):
    +4663        if copy:
    +4664            return sql_or_expression.copy()
    +4665        return sql_or_expression
    +4666
    +4667    if sql_or_expression is None:
    +4668        raise ParseError(f"SQL cannot be None")
    +4669
    +4670    import sqlglot
    +4671
    +4672    sql = str(sql_or_expression)
    +4673    if prefix:
    +4674        sql = f"{prefix} {sql}"
    +4675
    +4676    return sqlglot.parse_one(sql, read=dialect, into=into, **opts)
     
    @@ -49328,32 +50004,32 @@ that an input expression is a SQL string).
    -
    4801def union(
    -4802    left: ExpOrStr, right: ExpOrStr, distinct: bool = True, dialect: DialectType = None, **opts
    -4803) -> Union:
    -4804    """
    -4805    Initializes a syntax tree from one UNION expression.
    -4806
    -4807    Example:
    -4808        >>> union("SELECT * FROM foo", "SELECT * FROM bla").sql()
    -4809        'SELECT * FROM foo UNION SELECT * FROM bla'
    -4810
    -4811    Args:
    -4812        left: the SQL code string corresponding to the left-hand side.
    -4813            If an `Expression` instance is passed, it will be used as-is.
    -4814        right: the SQL code string corresponding to the right-hand side.
    -4815            If an `Expression` instance is passed, it will be used as-is.
    -4816        distinct: set the DISTINCT flag if and only if this is true.
    -4817        dialect: the dialect used to parse the input expression.
    -4818        opts: other options to use to parse the input expressions.
    -4819
    -4820    Returns:
    -4821        The new Union instance.
    -4822    """
    -4823    left = maybe_parse(sql_or_expression=left, dialect=dialect, **opts)
    -4824    right = maybe_parse(sql_or_expression=right, dialect=dialect, **opts)
    -4825
    -4826    return Union(this=left, expression=right, distinct=distinct)
    +            
    4860def union(
    +4861    left: ExpOrStr, right: ExpOrStr, distinct: bool = True, dialect: DialectType = None, **opts
    +4862) -> Union:
    +4863    """
    +4864    Initializes a syntax tree from one UNION expression.
    +4865
    +4866    Example:
    +4867        >>> union("SELECT * FROM foo", "SELECT * FROM bla").sql()
    +4868        'SELECT * FROM foo UNION SELECT * FROM bla'
    +4869
    +4870    Args:
    +4871        left: the SQL code string corresponding to the left-hand side.
    +4872            If an `Expression` instance is passed, it will be used as-is.
    +4873        right: the SQL code string corresponding to the right-hand side.
    +4874            If an `Expression` instance is passed, it will be used as-is.
    +4875        distinct: set the DISTINCT flag if and only if this is true.
    +4876        dialect: the dialect used to parse the input expression.
    +4877        opts: other options to use to parse the input expressions.
    +4878
    +4879    Returns:
    +4880        The new Union instance.
    +4881    """
    +4882    left = maybe_parse(sql_or_expression=left, dialect=dialect, **opts)
    +4883    right = maybe_parse(sql_or_expression=right, dialect=dialect, **opts)
    +4884
    +4885    return Union(this=left, expression=right, distinct=distinct)
     
    @@ -49401,32 +50077,32 @@ If an Expression instance is passed, it w
    -
    4829def intersect(
    -4830    left: ExpOrStr, right: ExpOrStr, distinct: bool = True, dialect: DialectType = None, **opts
    -4831) -> Intersect:
    -4832    """
    -4833    Initializes a syntax tree from one INTERSECT expression.
    -4834
    -4835    Example:
    -4836        >>> intersect("SELECT * FROM foo", "SELECT * FROM bla").sql()
    -4837        'SELECT * FROM foo INTERSECT SELECT * FROM bla'
    -4838
    -4839    Args:
    -4840        left: the SQL code string corresponding to the left-hand side.
    -4841            If an `Expression` instance is passed, it will be used as-is.
    -4842        right: the SQL code string corresponding to the right-hand side.
    -4843            If an `Expression` instance is passed, it will be used as-is.
    -4844        distinct: set the DISTINCT flag if and only if this is true.
    -4845        dialect: the dialect used to parse the input expression.
    -4846        opts: other options to use to parse the input expressions.
    -4847
    -4848    Returns:
    -4849        The new Intersect instance.
    -4850    """
    -4851    left = maybe_parse(sql_or_expression=left, dialect=dialect, **opts)
    -4852    right = maybe_parse(sql_or_expression=right, dialect=dialect, **opts)
    -4853
    -4854    return Intersect(this=left, expression=right, distinct=distinct)
    +            
    4888def intersect(
    +4889    left: ExpOrStr, right: ExpOrStr, distinct: bool = True, dialect: DialectType = None, **opts
    +4890) -> Intersect:
    +4891    """
    +4892    Initializes a syntax tree from one INTERSECT expression.
    +4893
    +4894    Example:
    +4895        >>> intersect("SELECT * FROM foo", "SELECT * FROM bla").sql()
    +4896        'SELECT * FROM foo INTERSECT SELECT * FROM bla'
    +4897
    +4898    Args:
    +4899        left: the SQL code string corresponding to the left-hand side.
    +4900            If an `Expression` instance is passed, it will be used as-is.
    +4901        right: the SQL code string corresponding to the right-hand side.
    +4902            If an `Expression` instance is passed, it will be used as-is.
    +4903        distinct: set the DISTINCT flag if and only if this is true.
    +4904        dialect: the dialect used to parse the input expression.
    +4905        opts: other options to use to parse the input expressions.
    +4906
    +4907    Returns:
    +4908        The new Intersect instance.
    +4909    """
    +4910    left = maybe_parse(sql_or_expression=left, dialect=dialect, **opts)
    +4911    right = maybe_parse(sql_or_expression=right, dialect=dialect, **opts)
    +4912
    +4913    return Intersect(this=left, expression=right, distinct=distinct)
     
    @@ -49474,32 +50150,32 @@ If an Expression instance is passed, it w
    -
    4857def except_(
    -4858    left: ExpOrStr, right: ExpOrStr, distinct: bool = True, dialect: DialectType = None, **opts
    -4859) -> Except:
    -4860    """
    -4861    Initializes a syntax tree from one EXCEPT expression.
    -4862
    -4863    Example:
    -4864        >>> except_("SELECT * FROM foo", "SELECT * FROM bla").sql()
    -4865        'SELECT * FROM foo EXCEPT SELECT * FROM bla'
    -4866
    -4867    Args:
    -4868        left: the SQL code string corresponding to the left-hand side.
    -4869            If an `Expression` instance is passed, it will be used as-is.
    -4870        right: the SQL code string corresponding to the right-hand side.
    -4871            If an `Expression` instance is passed, it will be used as-is.
    -4872        distinct: set the DISTINCT flag if and only if this is true.
    -4873        dialect: the dialect used to parse the input expression.
    -4874        opts: other options to use to parse the input expressions.
    -4875
    -4876    Returns:
    -4877        The new Except instance.
    -4878    """
    -4879    left = maybe_parse(sql_or_expression=left, dialect=dialect, **opts)
    -4880    right = maybe_parse(sql_or_expression=right, dialect=dialect, **opts)
    -4881
    -4882    return Except(this=left, expression=right, distinct=distinct)
    +            
    4916def except_(
    +4917    left: ExpOrStr, right: ExpOrStr, distinct: bool = True, dialect: DialectType = None, **opts
    +4918) -> Except:
    +4919    """
    +4920    Initializes a syntax tree from one EXCEPT expression.
    +4921
    +4922    Example:
    +4923        >>> except_("SELECT * FROM foo", "SELECT * FROM bla").sql()
    +4924        'SELECT * FROM foo EXCEPT SELECT * FROM bla'
    +4925
    +4926    Args:
    +4927        left: the SQL code string corresponding to the left-hand side.
    +4928            If an `Expression` instance is passed, it will be used as-is.
    +4929        right: the SQL code string corresponding to the right-hand side.
    +4930            If an `Expression` instance is passed, it will be used as-is.
    +4931        distinct: set the DISTINCT flag if and only if this is true.
    +4932        dialect: the dialect used to parse the input expression.
    +4933        opts: other options to use to parse the input expressions.
    +4934
    +4935    Returns:
    +4936        The new Except instance.
    +4937    """
    +4938    left = maybe_parse(sql_or_expression=left, dialect=dialect, **opts)
    +4939    right = maybe_parse(sql_or_expression=right, dialect=dialect, **opts)
    +4940
    +4941    return Except(this=left, expression=right, distinct=distinct)
     
    @@ -49547,26 +50223,26 @@ If an Expression instance is passed, it w
    -
    4885def select(*expressions: ExpOrStr, dialect: DialectType = None, **opts) -> Select:
    -4886    """
    -4887    Initializes a syntax tree from one or multiple SELECT expressions.
    -4888
    -4889    Example:
    -4890        >>> select("col1", "col2").from_("tbl").sql()
    -4891        'SELECT col1, col2 FROM tbl'
    -4892
    -4893    Args:
    -4894        *expressions: the SQL code string to parse as the expressions of a
    -4895            SELECT statement. If an Expression instance is passed, this is used as-is.
    -4896        dialect: the dialect used to parse the input expressions (in the case that an
    -4897            input expression is a SQL string).
    -4898        **opts: other options to use to parse the input expressions (again, in the case
    -4899            that an input expression is a SQL string).
    -4900
    -4901    Returns:
    -4902        Select: the syntax tree for the SELECT statement.
    -4903    """
    -4904    return Select().select(*expressions, dialect=dialect, **opts)
    +            
    4944def select(*expressions: ExpOrStr, dialect: DialectType = None, **opts) -> Select:
    +4945    """
    +4946    Initializes a syntax tree from one or multiple SELECT expressions.
    +4947
    +4948    Example:
    +4949        >>> select("col1", "col2").from_("tbl").sql()
    +4950        'SELECT col1, col2 FROM tbl'
    +4951
    +4952    Args:
    +4953        *expressions: the SQL code string to parse as the expressions of a
    +4954            SELECT statement. If an Expression instance is passed, this is used as-is.
    +4955        dialect: the dialect used to parse the input expressions (in the case that an
    +4956            input expression is a SQL string).
    +4957        **opts: other options to use to parse the input expressions (again, in the case
    +4958            that an input expression is a SQL string).
    +4959
    +4960    Returns:
    +4961        Select: the syntax tree for the SELECT statement.
    +4962    """
    +4963    return Select().select(*expressions, dialect=dialect, **opts)
     
    @@ -49613,26 +50289,26 @@ that an input expression is a SQL string).
    -
    4907def from_(expression: ExpOrStr, dialect: DialectType = None, **opts) -> Select:
    -4908    """
    -4909    Initializes a syntax tree from a FROM expression.
    -4910
    -4911    Example:
    -4912        >>> from_("tbl").select("col1", "col2").sql()
    -4913        'SELECT col1, col2 FROM tbl'
    -4914
    -4915    Args:
    -4916        *expression: the SQL code string to parse as the FROM expressions of a
    -4917            SELECT statement. If an Expression instance is passed, this is used as-is.
    -4918        dialect: the dialect used to parse the input expression (in the case that the
    -4919            input expression is a SQL string).
    -4920        **opts: other options to use to parse the input expressions (again, in the case
    -4921            that the input expression is a SQL string).
    -4922
    -4923    Returns:
    -4924        Select: the syntax tree for the SELECT statement.
    -4925    """
    -4926    return Select().from_(expression, dialect=dialect, **opts)
    +            
    4966def from_(expression: ExpOrStr, dialect: DialectType = None, **opts) -> Select:
    +4967    """
    +4968    Initializes a syntax tree from a FROM expression.
    +4969
    +4970    Example:
    +4971        >>> from_("tbl").select("col1", "col2").sql()
    +4972        'SELECT col1, col2 FROM tbl'
    +4973
    +4974    Args:
    +4975        *expression: the SQL code string to parse as the FROM expressions of a
    +4976            SELECT statement. If an Expression instance is passed, this is used as-is.
    +4977        dialect: the dialect used to parse the input expression (in the case that the
    +4978            input expression is a SQL string).
    +4979        **opts: other options to use to parse the input expressions (again, in the case
    +4980            that the input expression is a SQL string).
    +4981
    +4982    Returns:
    +4983        Select: the syntax tree for the SELECT statement.
    +4984    """
    +4985    return Select().from_(expression, dialect=dialect, **opts)
     
    @@ -49679,53 +50355,53 @@ that the input expression is a SQL string).
    -
    4929def update(
    -4930    table: str | Table,
    -4931    properties: dict,
    -4932    where: t.Optional[ExpOrStr] = None,
    -4933    from_: t.Optional[ExpOrStr] = None,
    -4934    dialect: DialectType = None,
    -4935    **opts,
    -4936) -> Update:
    -4937    """
    -4938    Creates an update statement.
    -4939
    -4940    Example:
    -4941        >>> update("my_table", {"x": 1, "y": "2", "z": None}, from_="baz", where="id > 1").sql()
    -4942        "UPDATE my_table SET x = 1, y = '2', z = NULL FROM baz WHERE id > 1"
    -4943
    -4944    Args:
    -4945        *properties: dictionary of properties to set which are
    -4946            auto converted to sql objects eg None -> NULL
    -4947        where: sql conditional parsed into a WHERE statement
    -4948        from_: sql statement parsed into a FROM statement
    -4949        dialect: the dialect used to parse the input expressions.
    -4950        **opts: other options to use to parse the input expressions.
    -4951
    -4952    Returns:
    -4953        Update: the syntax tree for the UPDATE statement.
    -4954    """
    -4955    update_expr = Update(this=maybe_parse(table, into=Table, dialect=dialect))
    -4956    update_expr.set(
    -4957        "expressions",
    -4958        [
    -4959            EQ(this=maybe_parse(k, dialect=dialect, **opts), expression=convert(v))
    -4960            for k, v in properties.items()
    -4961        ],
    -4962    )
    -4963    if from_:
    -4964        update_expr.set(
    -4965            "from",
    -4966            maybe_parse(from_, into=From, dialect=dialect, prefix="FROM", **opts),
    -4967        )
    -4968    if isinstance(where, Condition):
    -4969        where = Where(this=where)
    -4970    if where:
    -4971        update_expr.set(
    -4972            "where",
    -4973            maybe_parse(where, into=Where, dialect=dialect, prefix="WHERE", **opts),
    -4974        )
    -4975    return update_expr
    +            
    4988def update(
    +4989    table: str | Table,
    +4990    properties: dict,
    +4991    where: t.Optional[ExpOrStr] = None,
    +4992    from_: t.Optional[ExpOrStr] = None,
    +4993    dialect: DialectType = None,
    +4994    **opts,
    +4995) -> Update:
    +4996    """
    +4997    Creates an update statement.
    +4998
    +4999    Example:
    +5000        >>> update("my_table", {"x": 1, "y": "2", "z": None}, from_="baz", where="id > 1").sql()
    +5001        "UPDATE my_table SET x = 1, y = '2', z = NULL FROM baz WHERE id > 1"
    +5002
    +5003    Args:
    +5004        *properties: dictionary of properties to set which are
    +5005            auto converted to sql objects eg None -> NULL
    +5006        where: sql conditional parsed into a WHERE statement
    +5007        from_: sql statement parsed into a FROM statement
    +5008        dialect: the dialect used to parse the input expressions.
    +5009        **opts: other options to use to parse the input expressions.
    +5010
    +5011    Returns:
    +5012        Update: the syntax tree for the UPDATE statement.
    +5013    """
    +5014    update_expr = Update(this=maybe_parse(table, into=Table, dialect=dialect))
    +5015    update_expr.set(
    +5016        "expressions",
    +5017        [
    +5018            EQ(this=maybe_parse(k, dialect=dialect, **opts), expression=convert(v))
    +5019            for k, v in properties.items()
    +5020        ],
    +5021    )
    +5022    if from_:
    +5023        update_expr.set(
    +5024            "from",
    +5025            maybe_parse(from_, into=From, dialect=dialect, prefix="FROM", **opts),
    +5026        )
    +5027    if isinstance(where, Condition):
    +5028        where = Where(this=where)
    +5029    if where:
    +5030        update_expr.set(
    +5031            "where",
    +5032            maybe_parse(where, into=Where, dialect=dialect, prefix="WHERE", **opts),
    +5033        )
    +5034    return update_expr
     
    @@ -49772,35 +50448,35 @@ auto converted to sql objects eg None -> NULL
    -
    4978def delete(
    -4979    table: ExpOrStr,
    -4980    where: t.Optional[ExpOrStr] = None,
    -4981    returning: t.Optional[ExpOrStr] = None,
    -4982    dialect: DialectType = None,
    -4983    **opts,
    -4984) -> Delete:
    -4985    """
    -4986    Builds a delete statement.
    -4987
    -4988    Example:
    -4989        >>> delete("my_table", where="id > 1").sql()
    -4990        'DELETE FROM my_table WHERE id > 1'
    -4991
    -4992    Args:
    -4993        where: sql conditional parsed into a WHERE statement
    -4994        returning: sql conditional parsed into a RETURNING statement
    -4995        dialect: the dialect used to parse the input expressions.
    -4996        **opts: other options to use to parse the input expressions.
    -4997
    -4998    Returns:
    -4999        Delete: the syntax tree for the DELETE statement.
    -5000    """
    -5001    delete_expr = Delete().delete(table, dialect=dialect, copy=False, **opts)
    -5002    if where:
    -5003        delete_expr = delete_expr.where(where, dialect=dialect, copy=False, **opts)
    -5004    if returning:
    -5005        delete_expr = delete_expr.returning(returning, dialect=dialect, copy=False, **opts)
    -5006    return delete_expr
    +            
    5037def delete(
    +5038    table: ExpOrStr,
    +5039    where: t.Optional[ExpOrStr] = None,
    +5040    returning: t.Optional[ExpOrStr] = None,
    +5041    dialect: DialectType = None,
    +5042    **opts,
    +5043) -> Delete:
    +5044    """
    +5045    Builds a delete statement.
    +5046
    +5047    Example:
    +5048        >>> delete("my_table", where="id > 1").sql()
    +5049        'DELETE FROM my_table WHERE id > 1'
    +5050
    +5051    Args:
    +5052        where: sql conditional parsed into a WHERE statement
    +5053        returning: sql conditional parsed into a RETURNING statement
    +5054        dialect: the dialect used to parse the input expressions.
    +5055        **opts: other options to use to parse the input expressions.
    +5056
    +5057    Returns:
    +5058        Delete: the syntax tree for the DELETE statement.
    +5059    """
    +5060    delete_expr = Delete().delete(table, dialect=dialect, copy=False, **opts)
    +5061    if where:
    +5062        delete_expr = delete_expr.where(where, dialect=dialect, copy=False, **opts)
    +5063    if returning:
    +5064        delete_expr = delete_expr.returning(returning, dialect=dialect, copy=False, **opts)
    +5065    return delete_expr
     
    @@ -49845,49 +50521,49 @@ auto converted to sql objects eg None -> NULL
    -
    5009def insert(
    -5010    expression: ExpOrStr,
    -5011    into: ExpOrStr,
    -5012    columns: t.Optional[t.Sequence[ExpOrStr]] = None,
    -5013    overwrite: t.Optional[bool] = None,
    -5014    dialect: DialectType = None,
    -5015    copy: bool = True,
    -5016    **opts,
    -5017) -> Insert:
    -5018    """
    -5019    Builds an INSERT statement.
    -5020
    -5021    Example:
    -5022        >>> insert("VALUES (1, 2, 3)", "tbl").sql()
    -5023        'INSERT INTO tbl VALUES (1, 2, 3)'
    -5024
    -5025    Args:
    -5026        expression: the sql string or expression of the INSERT statement
    -5027        into: the tbl to insert data to.
    -5028        columns: optionally the table's column names.
    -5029        overwrite: whether to INSERT OVERWRITE or not.
    -5030        dialect: the dialect used to parse the input expressions.
    -5031        copy: whether or not to copy the expression.
    -5032        **opts: other options to use to parse the input expressions.
    -5033
    -5034    Returns:
    -5035        Insert: the syntax tree for the INSERT statement.
    -5036    """
    -5037    expr = maybe_parse(expression, dialect=dialect, copy=copy, **opts)
    -5038    this: Table | Schema = maybe_parse(into, into=Table, dialect=dialect, copy=copy, **opts)
    -5039
    -5040    if columns:
    -5041        this = _apply_list_builder(
    -5042            *columns,
    -5043            instance=Schema(this=this),
    -5044            arg="expressions",
    -5045            into=Identifier,
    -5046            copy=False,
    -5047            dialect=dialect,
    -5048            **opts,
    -5049        )
    -5050
    -5051    return Insert(this=this, expression=expr, overwrite=overwrite)
    +            
    5068def insert(
    +5069    expression: ExpOrStr,
    +5070    into: ExpOrStr,
    +5071    columns: t.Optional[t.Sequence[ExpOrStr]] = None,
    +5072    overwrite: t.Optional[bool] = None,
    +5073    dialect: DialectType = None,
    +5074    copy: bool = True,
    +5075    **opts,
    +5076) -> Insert:
    +5077    """
    +5078    Builds an INSERT statement.
    +5079
    +5080    Example:
    +5081        >>> insert("VALUES (1, 2, 3)", "tbl").sql()
    +5082        'INSERT INTO tbl VALUES (1, 2, 3)'
    +5083
    +5084    Args:
    +5085        expression: the sql string or expression of the INSERT statement
    +5086        into: the tbl to insert data to.
    +5087        columns: optionally the table's column names.
    +5088        overwrite: whether to INSERT OVERWRITE or not.
    +5089        dialect: the dialect used to parse the input expressions.
    +5090        copy: whether or not to copy the expression.
    +5091        **opts: other options to use to parse the input expressions.
    +5092
    +5093    Returns:
    +5094        Insert: the syntax tree for the INSERT statement.
    +5095    """
    +5096    expr = maybe_parse(expression, dialect=dialect, copy=copy, **opts)
    +5097    this: Table | Schema = maybe_parse(into, into=Table, dialect=dialect, copy=copy, **opts)
    +5098
    +5099    if columns:
    +5100        this = _apply_list_builder(
    +5101            *columns,
    +5102            instance=Schema(this=this),
    +5103            arg="expressions",
    +5104            into=Identifier,
    +5105            copy=False,
    +5106            dialect=dialect,
    +5107            **opts,
    +5108        )
    +5109
    +5110    return Insert(this=this, expression=expr, overwrite=overwrite)
     
    @@ -49935,41 +50611,41 @@ auto converted to sql objects eg None -> NULL
    -
    5054def condition(
    -5055    expression: ExpOrStr, dialect: DialectType = None, copy: bool = True, **opts
    -5056) -> Condition:
    -5057    """
    -5058    Initialize a logical condition expression.
    -5059
    -5060    Example:
    -5061        >>> condition("x=1").sql()
    -5062        'x = 1'
    -5063
    -5064        This is helpful for composing larger logical syntax trees:
    -5065        >>> where = condition("x=1")
    -5066        >>> where = where.and_("y=1")
    -5067        >>> Select().from_("tbl").select("*").where(where).sql()
    -5068        'SELECT * FROM tbl WHERE x = 1 AND y = 1'
    -5069
    -5070    Args:
    -5071        *expression: the SQL code string to parse.
    -5072            If an Expression instance is passed, this is used as-is.
    -5073        dialect: the dialect used to parse the input expression (in the case that the
    -5074            input expression is a SQL string).
    -5075        copy: Whether or not to copy `expression` (only applies to expressions).
    -5076        **opts: other options to use to parse the input expressions (again, in the case
    -5077            that the input expression is a SQL string).
    -5078
    -5079    Returns:
    -5080        The new Condition instance
    -5081    """
    -5082    return maybe_parse(
    -5083        expression,
    -5084        into=Condition,
    -5085        dialect=dialect,
    -5086        copy=copy,
    -5087        **opts,
    -5088    )
    +            
    5113def condition(
    +5114    expression: ExpOrStr, dialect: DialectType = None, copy: bool = True, **opts
    +5115) -> Condition:
    +5116    """
    +5117    Initialize a logical condition expression.
    +5118
    +5119    Example:
    +5120        >>> condition("x=1").sql()
    +5121        'x = 1'
    +5122
    +5123        This is helpful for composing larger logical syntax trees:
    +5124        >>> where = condition("x=1")
    +5125        >>> where = where.and_("y=1")
    +5126        >>> Select().from_("tbl").select("*").where(where).sql()
    +5127        'SELECT * FROM tbl WHERE x = 1 AND y = 1'
    +5128
    +5129    Args:
    +5130        *expression: the SQL code string to parse.
    +5131            If an Expression instance is passed, this is used as-is.
    +5132        dialect: the dialect used to parse the input expression (in the case that the
    +5133            input expression is a SQL string).
    +5134        copy: Whether or not to copy `expression` (only applies to expressions).
    +5135        **opts: other options to use to parse the input expressions (again, in the case
    +5136            that the input expression is a SQL string).
    +5137
    +5138    Returns:
    +5139        The new Condition instance
    +5140    """
    +5141    return maybe_parse(
    +5142        expression,
    +5143        into=Condition,
    +5144        dialect=dialect,
    +5145        copy=copy,
    +5146        **opts,
    +5147    )
     
    @@ -50027,27 +50703,27 @@ that the input expression is a SQL string).
    -
    5091def and_(
    -5092    *expressions: t.Optional[ExpOrStr], dialect: DialectType = None, copy: bool = True, **opts
    -5093) -> Condition:
    -5094    """
    -5095    Combine multiple conditions with an AND logical operator.
    -5096
    -5097    Example:
    -5098        >>> and_("x=1", and_("y=1", "z=1")).sql()
    -5099        'x = 1 AND (y = 1 AND z = 1)'
    -5100
    -5101    Args:
    -5102        *expressions: the SQL code strings to parse.
    -5103            If an Expression instance is passed, this is used as-is.
    -5104        dialect: the dialect used to parse the input expression.
    -5105        copy: whether or not to copy `expressions` (only applies to Expressions).
    -5106        **opts: other options to use to parse the input expressions.
    -5107
    -5108    Returns:
    -5109        And: the new condition
    -5110    """
    -5111    return t.cast(Condition, _combine(expressions, And, dialect, copy=copy, **opts))
    +            
    5150def and_(
    +5151    *expressions: t.Optional[ExpOrStr], dialect: DialectType = None, copy: bool = True, **opts
    +5152) -> Condition:
    +5153    """
    +5154    Combine multiple conditions with an AND logical operator.
    +5155
    +5156    Example:
    +5157        >>> and_("x=1", and_("y=1", "z=1")).sql()
    +5158        'x = 1 AND (y = 1 AND z = 1)'
    +5159
    +5160    Args:
    +5161        *expressions: the SQL code strings to parse.
    +5162            If an Expression instance is passed, this is used as-is.
    +5163        dialect: the dialect used to parse the input expression.
    +5164        copy: whether or not to copy `expressions` (only applies to Expressions).
    +5165        **opts: other options to use to parse the input expressions.
    +5166
    +5167    Returns:
    +5168        And: the new condition
    +5169    """
    +5170    return t.cast(Condition, _combine(expressions, And, dialect, copy=copy, **opts))
     
    @@ -50093,27 +50769,27 @@ If an Expression instance is passed, this is used as-is.
    -
    5114def or_(
    -5115    *expressions: t.Optional[ExpOrStr], dialect: DialectType = None, copy: bool = True, **opts
    -5116) -> Condition:
    -5117    """
    -5118    Combine multiple conditions with an OR logical operator.
    -5119
    -5120    Example:
    -5121        >>> or_("x=1", or_("y=1", "z=1")).sql()
    -5122        'x = 1 OR (y = 1 OR z = 1)'
    -5123
    -5124    Args:
    -5125        *expressions: the SQL code strings to parse.
    -5126            If an Expression instance is passed, this is used as-is.
    -5127        dialect: the dialect used to parse the input expression.
    -5128        copy: whether or not to copy `expressions` (only applies to Expressions).
    -5129        **opts: other options to use to parse the input expressions.
    -5130
    -5131    Returns:
    -5132        Or: the new condition
    -5133    """
    -5134    return t.cast(Condition, _combine(expressions, Or, dialect, copy=copy, **opts))
    +            
    5173def or_(
    +5174    *expressions: t.Optional[ExpOrStr], dialect: DialectType = None, copy: bool = True, **opts
    +5175) -> Condition:
    +5176    """
    +5177    Combine multiple conditions with an OR logical operator.
    +5178
    +5179    Example:
    +5180        >>> or_("x=1", or_("y=1", "z=1")).sql()
    +5181        'x = 1 OR (y = 1 OR z = 1)'
    +5182
    +5183    Args:
    +5184        *expressions: the SQL code strings to parse.
    +5185            If an Expression instance is passed, this is used as-is.
    +5186        dialect: the dialect used to parse the input expression.
    +5187        copy: whether or not to copy `expressions` (only applies to Expressions).
    +5188        **opts: other options to use to parse the input expressions.
    +5189
    +5190    Returns:
    +5191        Or: the new condition
    +5192    """
    +5193    return t.cast(Condition, _combine(expressions, Or, dialect, copy=copy, **opts))
     
    @@ -50159,31 +50835,31 @@ If an Expression instance is passed, this is used as-is.
    -
    5137def not_(expression: ExpOrStr, dialect: DialectType = None, copy: bool = True, **opts) -> Not:
    -5138    """
    -5139    Wrap a condition with a NOT operator.
    -5140
    -5141    Example:
    -5142        >>> not_("this_suit='black'").sql()
    -5143        "NOT this_suit = 'black'"
    -5144
    -5145    Args:
    -5146        expression: the SQL code string to parse.
    -5147            If an Expression instance is passed, this is used as-is.
    -5148        dialect: the dialect used to parse the input expression.
    -5149        copy: whether to copy the expression or not.
    -5150        **opts: other options to use to parse the input expressions.
    -5151
    -5152    Returns:
    -5153        The new condition.
    -5154    """
    -5155    this = condition(
    -5156        expression,
    -5157        dialect=dialect,
    -5158        copy=copy,
    -5159        **opts,
    -5160    )
    -5161    return Not(this=_wrap(this, Connector))
    +            
    5196def not_(expression: ExpOrStr, dialect: DialectType = None, copy: bool = True, **opts) -> Not:
    +5197    """
    +5198    Wrap a condition with a NOT operator.
    +5199
    +5200    Example:
    +5201        >>> not_("this_suit='black'").sql()
    +5202        "NOT this_suit = 'black'"
    +5203
    +5204    Args:
    +5205        expression: the SQL code string to parse.
    +5206            If an Expression instance is passed, this is used as-is.
    +5207        dialect: the dialect used to parse the input expression.
    +5208        copy: whether to copy the expression or not.
    +5209        **opts: other options to use to parse the input expressions.
    +5210
    +5211    Returns:
    +5212        The new condition.
    +5213    """
    +5214    this = condition(
    +5215        expression,
    +5216        dialect=dialect,
    +5217        copy=copy,
    +5218        **opts,
    +5219    )
    +5220    return Not(this=_wrap(this, Connector))
     
    @@ -50229,23 +50905,23 @@ If an Expression instance is passed, this is used as-is.
    -
    5164def paren(expression: ExpOrStr, copy: bool = True) -> Paren:
    -5165    """
    -5166    Wrap an expression in parentheses.
    -5167
    -5168    Example:
    -5169        >>> paren("5 + 3").sql()
    -5170        '(5 + 3)'
    -5171
    -5172    Args:
    -5173        expression: the SQL code string to parse.
    -5174            If an Expression instance is passed, this is used as-is.
    -5175        copy: whether to copy the expression or not.
    -5176
    -5177    Returns:
    -5178        The wrapped expression.
    -5179    """
    -5180    return Paren(this=maybe_parse(expression, copy=copy))
    +            
    5223def paren(expression: ExpOrStr, copy: bool = True) -> Paren:
    +5224    """
    +5225    Wrap an expression in parentheses.
    +5226
    +5227    Example:
    +5228        >>> paren("5 + 3").sql()
    +5229        '(5 + 3)'
    +5230
    +5231    Args:
    +5232        expression: the SQL code string to parse.
    +5233            If an Expression instance is passed, this is used as-is.
    +5234        copy: whether to copy the expression or not.
    +5235
    +5236    Returns:
    +5237        The wrapped expression.
    +5238    """
    +5239    return Paren(this=maybe_parse(expression, copy=copy))
     
    @@ -50289,31 +50965,31 @@ If an Expression instance is passed, this is used as-is.
    -
    5198def to_identifier(name, quoted=None, copy=True):
    -5199    """Builds an identifier.
    -5200
    -5201    Args:
    -5202        name: The name to turn into an identifier.
    -5203        quoted: Whether or not force quote the identifier.
    -5204        copy: Whether or not to copy a passed in Identefier node.
    -5205
    -5206    Returns:
    -5207        The identifier ast node.
    -5208    """
    -5209
    -5210    if name is None:
    -5211        return None
    -5212
    -5213    if isinstance(name, Identifier):
    -5214        identifier = _maybe_copy(name, copy)
    -5215    elif isinstance(name, str):
    -5216        identifier = Identifier(
    -5217            this=name,
    -5218            quoted=not SAFE_IDENTIFIER_RE.match(name) if quoted is None else quoted,
    -5219        )
    -5220    else:
    -5221        raise ValueError(f"Name needs to be a string or an Identifier, got: {name.__class__}")
    -5222    return identifier
    +            
    5257def to_identifier(name, quoted=None, copy=True):
    +5258    """Builds an identifier.
    +5259
    +5260    Args:
    +5261        name: The name to turn into an identifier.
    +5262        quoted: Whether or not force quote the identifier.
    +5263        copy: Whether or not to copy a passed in Identefier node.
    +5264
    +5265    Returns:
    +5266        The identifier ast node.
    +5267    """
    +5268
    +5269    if name is None:
    +5270        return None
    +5271
    +5272    if isinstance(name, Identifier):
    +5273        identifier = _maybe_copy(name, copy)
    +5274    elif isinstance(name, str):
    +5275        identifier = Identifier(
    +5276            this=name,
    +5277            quoted=not SAFE_IDENTIFIER_RE.match(name) if quoted is None else quoted,
    +5278        )
    +5279    else:
    +5280        raise ValueError(f"Name needs to be a string or an Identifier, got: {name.__class__}")
    +5281    return identifier
     
    @@ -50347,23 +51023,23 @@ If an Expression instance is passed, this is used as-is.
    -
    5228def to_interval(interval: str | Literal) -> Interval:
    -5229    """Builds an interval expression from a string like '1 day' or '5 months'."""
    -5230    if isinstance(interval, Literal):
    -5231        if not interval.is_string:
    -5232            raise ValueError("Invalid interval string.")
    -5233
    -5234        interval = interval.this
    -5235
    -5236    interval_parts = INTERVAL_STRING_RE.match(interval)  # type: ignore
    -5237
    -5238    if not interval_parts:
    -5239        raise ValueError("Invalid interval string.")
    -5240
    -5241    return Interval(
    -5242        this=Literal.string(interval_parts.group(1)),
    -5243        unit=Var(this=interval_parts.group(2)),
    -5244    )
    +            
    5287def to_interval(interval: str | Literal) -> Interval:
    +5288    """Builds an interval expression from a string like '1 day' or '5 months'."""
    +5289    if isinstance(interval, Literal):
    +5290        if not interval.is_string:
    +5291            raise ValueError("Invalid interval string.")
    +5292
    +5293        interval = interval.this
    +5294
    +5295    interval_parts = INTERVAL_STRING_RE.match(interval)  # type: ignore
    +5296
    +5297    if not interval_parts:
    +5298        raise ValueError("Invalid interval string.")
    +5299
    +5300    return Interval(
    +5301        this=Literal.string(interval_parts.group(1)),
    +5302        unit=Var(this=interval_parts.group(2)),
    +5303    )
     
    @@ -50383,32 +51059,32 @@ If an Expression instance is passed, this is used as-is.
    -
    5257def to_table(
    -5258    sql_path: t.Optional[str | Table], dialect: DialectType = None, **kwargs
    -5259) -> t.Optional[Table]:
    -5260    """
    -5261    Create a table expression from a `[catalog].[schema].[table]` sql path. Catalog and schema are optional.
    -5262    If a table is passed in then that table is returned.
    -5263
    -5264    Args:
    -5265        sql_path: a `[catalog].[schema].[table]` string.
    -5266        dialect: the source dialect according to which the table name will be parsed.
    -5267        kwargs: the kwargs to instantiate the resulting `Table` expression with.
    -5268
    -5269    Returns:
    -5270        A table expression.
    -5271    """
    -5272    if sql_path is None or isinstance(sql_path, Table):
    -5273        return sql_path
    -5274    if not isinstance(sql_path, str):
    -5275        raise ValueError(f"Invalid type provided for a table: {type(sql_path)}")
    -5276
    -5277    table = maybe_parse(sql_path, into=Table, dialect=dialect)
    -5278    if table:
    -5279        for k, v in kwargs.items():
    -5280            table.set(k, v)
    -5281
    -5282    return table
    +            
    5316def to_table(
    +5317    sql_path: t.Optional[str | Table], dialect: DialectType = None, **kwargs
    +5318) -> t.Optional[Table]:
    +5319    """
    +5320    Create a table expression from a `[catalog].[schema].[table]` sql path. Catalog and schema are optional.
    +5321    If a table is passed in then that table is returned.
    +5322
    +5323    Args:
    +5324        sql_path: a `[catalog].[schema].[table]` string.
    +5325        dialect: the source dialect according to which the table name will be parsed.
    +5326        kwargs: the kwargs to instantiate the resulting `Table` expression with.
    +5327
    +5328    Returns:
    +5329        A table expression.
    +5330    """
    +5331    if sql_path is None or isinstance(sql_path, Table):
    +5332        return sql_path
    +5333    if not isinstance(sql_path, str):
    +5334        raise ValueError(f"Invalid type provided for a table: {type(sql_path)}")
    +5335
    +5336    table = maybe_parse(sql_path, into=Table, dialect=dialect)
    +5337    if table:
    +5338        for k, v in kwargs.items():
    +5339            table.set(k, v)
    +5340
    +5341    return table
     
    @@ -50443,22 +51119,22 @@ If a table is passed in then that table is returned.

    -
    5285def to_column(sql_path: str | Column, **kwargs) -> Column:
    -5286    """
    -5287    Create a column from a `[table].[column]` sql path. Schema is optional.
    -5288
    -5289    If a column is passed in then that column is returned.
    -5290
    -5291    Args:
    -5292        sql_path: `[table].[column]` string
    -5293    Returns:
    -5294        Table: A column expression
    -5295    """
    -5296    if sql_path is None or isinstance(sql_path, Column):
    -5297        return sql_path
    -5298    if not isinstance(sql_path, str):
    -5299        raise ValueError(f"Invalid type provided for column: {type(sql_path)}")
    -5300    return column(*reversed(sql_path.split(".")), **kwargs)  # type: ignore
    +            
    5344def to_column(sql_path: str | Column, **kwargs) -> Column:
    +5345    """
    +5346    Create a column from a `[table].[column]` sql path. Schema is optional.
    +5347
    +5348    If a column is passed in then that column is returned.
    +5349
    +5350    Args:
    +5351        sql_path: `[table].[column]` string
    +5352    Returns:
    +5353        Table: A column expression
    +5354    """
    +5355    if sql_path is None or isinstance(sql_path, Column):
    +5356        return sql_path
    +5357    if not isinstance(sql_path, str):
    +5358        raise ValueError(f"Invalid type provided for column: {type(sql_path)}")
    +5359    return column(*reversed(sql_path.split(".")), **kwargs)  # type: ignore
     
    @@ -50492,61 +51168,61 @@ If a table is passed in then that table is returned.

    -
    5303def alias_(
    -5304    expression: ExpOrStr,
    -5305    alias: str | Identifier,
    -5306    table: bool | t.Sequence[str | Identifier] = False,
    -5307    quoted: t.Optional[bool] = None,
    -5308    dialect: DialectType = None,
    -5309    copy: bool = True,
    -5310    **opts,
    -5311):
    -5312    """Create an Alias expression.
    -5313
    -5314    Example:
    -5315        >>> alias_('foo', 'bar').sql()
    -5316        'foo AS bar'
    -5317
    -5318        >>> alias_('(select 1, 2)', 'bar', table=['a', 'b']).sql()
    -5319        '(SELECT 1, 2) AS bar(a, b)'
    -5320
    -5321    Args:
    -5322        expression: the SQL code strings to parse.
    -5323            If an Expression instance is passed, this is used as-is.
    -5324        alias: the alias name to use. If the name has
    -5325            special characters it is quoted.
    -5326        table: Whether or not to create a table alias, can also be a list of columns.
    -5327        quoted: whether or not to quote the alias
    -5328        dialect: the dialect used to parse the input expression.
    -5329        copy: Whether or not to copy the expression.
    -5330        **opts: other options to use to parse the input expressions.
    -5331
    -5332    Returns:
    -5333        Alias: the aliased expression
    -5334    """
    -5335    exp = maybe_parse(expression, dialect=dialect, copy=copy, **opts)
    -5336    alias = to_identifier(alias, quoted=quoted)
    -5337
    -5338    if table:
    -5339        table_alias = TableAlias(this=alias)
    -5340        exp.set("alias", table_alias)
    -5341
    -5342        if not isinstance(table, bool):
    -5343            for column in table:
    -5344                table_alias.append("columns", to_identifier(column, quoted=quoted))
    -5345
    -5346        return exp
    -5347
    -5348    # We don't set the "alias" arg for Window expressions, because that would add an IDENTIFIER node in
    -5349    # the AST, representing a "named_window" [1] construct (eg. bigquery). What we want is an ALIAS node
    -5350    # for the complete Window expression.
    -5351    #
    -5352    # [1]: https://cloud.google.com/bigquery/docs/reference/standard-sql/window-function-calls
    -5353
    -5354    if "alias" in exp.arg_types and not isinstance(exp, Window):
    -5355        exp.set("alias", alias)
    -5356        return exp
    -5357    return Alias(this=exp, alias=alias)
    +            
    5362def alias_(
    +5363    expression: ExpOrStr,
    +5364    alias: str | Identifier,
    +5365    table: bool | t.Sequence[str | Identifier] = False,
    +5366    quoted: t.Optional[bool] = None,
    +5367    dialect: DialectType = None,
    +5368    copy: bool = True,
    +5369    **opts,
    +5370):
    +5371    """Create an Alias expression.
    +5372
    +5373    Example:
    +5374        >>> alias_('foo', 'bar').sql()
    +5375        'foo AS bar'
    +5376
    +5377        >>> alias_('(select 1, 2)', 'bar', table=['a', 'b']).sql()
    +5378        '(SELECT 1, 2) AS bar(a, b)'
    +5379
    +5380    Args:
    +5381        expression: the SQL code strings to parse.
    +5382            If an Expression instance is passed, this is used as-is.
    +5383        alias: the alias name to use. If the name has
    +5384            special characters it is quoted.
    +5385        table: Whether or not to create a table alias, can also be a list of columns.
    +5386        quoted: whether or not to quote the alias
    +5387        dialect: the dialect used to parse the input expression.
    +5388        copy: Whether or not to copy the expression.
    +5389        **opts: other options to use to parse the input expressions.
    +5390
    +5391    Returns:
    +5392        Alias: the aliased expression
    +5393    """
    +5394    exp = maybe_parse(expression, dialect=dialect, copy=copy, **opts)
    +5395    alias = to_identifier(alias, quoted=quoted)
    +5396
    +5397    if table:
    +5398        table_alias = TableAlias(this=alias)
    +5399        exp.set("alias", table_alias)
    +5400
    +5401        if not isinstance(table, bool):
    +5402            for column in table:
    +5403                table_alias.append("columns", to_identifier(column, quoted=quoted))
    +5404
    +5405        return exp
    +5406
    +5407    # We don't set the "alias" arg for Window expressions, because that would add an IDENTIFIER node in
    +5408    # the AST, representing a "named_window" [1] construct (eg. bigquery). What we want is an ALIAS node
    +5409    # for the complete Window expression.
    +5410    #
    +5411    # [1]: https://cloud.google.com/bigquery/docs/reference/standard-sql/window-function-calls
    +5412
    +5413    if "alias" in exp.arg_types and not isinstance(exp, Window):
    +5414        exp.set("alias", alias)
    +5415        return exp
    +5416    return Alias(this=exp, alias=alias)
     
    @@ -50602,32 +51278,32 @@ special characters it is quoted.
    -
    5360def subquery(
    -5361    expression: ExpOrStr,
    -5362    alias: t.Optional[Identifier | str] = None,
    -5363    dialect: DialectType = None,
    -5364    **opts,
    -5365) -> Select:
    -5366    """
    -5367    Build a subquery expression.
    -5368
    -5369    Example:
    -5370        >>> subquery('select x from tbl', 'bar').select('x').sql()
    -5371        'SELECT x FROM (SELECT x FROM tbl) AS bar'
    -5372
    -5373    Args:
    -5374        expression: the SQL code strings to parse.
    -5375            If an Expression instance is passed, this is used as-is.
    -5376        alias: the alias name to use.
    -5377        dialect: the dialect used to parse the input expression.
    -5378        **opts: other options to use to parse the input expressions.
    -5379
    -5380    Returns:
    -5381        A new Select instance with the subquery expression included.
    -5382    """
    -5383
    -5384    expression = maybe_parse(expression, dialect=dialect, **opts).subquery(alias)
    -5385    return Select().from_(expression, dialect=dialect, **opts)
    +            
    5419def subquery(
    +5420    expression: ExpOrStr,
    +5421    alias: t.Optional[Identifier | str] = None,
    +5422    dialect: DialectType = None,
    +5423    **opts,
    +5424) -> Select:
    +5425    """
    +5426    Build a subquery expression.
    +5427
    +5428    Example:
    +5429        >>> subquery('select x from tbl', 'bar').select('x').sql()
    +5430        'SELECT x FROM (SELECT x FROM tbl) AS bar'
    +5431
    +5432    Args:
    +5433        expression: the SQL code strings to parse.
    +5434            If an Expression instance is passed, this is used as-is.
    +5435        alias: the alias name to use.
    +5436        dialect: the dialect used to parse the input expression.
    +5437        **opts: other options to use to parse the input expressions.
    +5438
    +5439    Returns:
    +5440        A new Select instance with the subquery expression included.
    +5441    """
    +5442
    +5443    expression = maybe_parse(expression, dialect=dialect, **opts).subquery(alias)
    +5444    return Select().from_(expression, dialect=dialect, **opts)
     
    @@ -50673,32 +51349,32 @@ If an Expression instance is passed, this is used as-is.
    -
    5388def column(
    -5389    col: str | Identifier,
    -5390    table: t.Optional[str | Identifier] = None,
    -5391    db: t.Optional[str | Identifier] = None,
    -5392    catalog: t.Optional[str | Identifier] = None,
    -5393    quoted: t.Optional[bool] = None,
    -5394) -> Column:
    -5395    """
    -5396    Build a Column.
    -5397
    -5398    Args:
    -5399        col: Column name.
    -5400        table: Table name.
    -5401        db: Database name.
    -5402        catalog: Catalog name.
    -5403        quoted: Whether to force quotes on the column's identifiers.
    -5404
    -5405    Returns:
    -5406        The new Column instance.
    -5407    """
    -5408    return Column(
    -5409        this=to_identifier(col, quoted=quoted),
    -5410        table=to_identifier(table, quoted=quoted),
    -5411        db=to_identifier(db, quoted=quoted),
    -5412        catalog=to_identifier(catalog, quoted=quoted),
    -5413    )
    +            
    5447def column(
    +5448    col: str | Identifier,
    +5449    table: t.Optional[str | Identifier] = None,
    +5450    db: t.Optional[str | Identifier] = None,
    +5451    catalog: t.Optional[str | Identifier] = None,
    +5452    quoted: t.Optional[bool] = None,
    +5453) -> Column:
    +5454    """
    +5455    Build a Column.
    +5456
    +5457    Args:
    +5458        col: Column name.
    +5459        table: Table name.
    +5460        db: Database name.
    +5461        catalog: Catalog name.
    +5462        quoted: Whether to force quotes on the column's identifiers.
    +5463
    +5464    Returns:
    +5465        The new Column instance.
    +5466    """
    +5467    return Column(
    +5468        this=to_identifier(col, quoted=quoted),
    +5469        table=to_identifier(table, quoted=quoted),
    +5470        db=to_identifier(db, quoted=quoted),
    +5471        catalog=to_identifier(catalog, quoted=quoted),
    +5472    )
     
    @@ -50734,22 +51410,22 @@ If an Expression instance is passed, this is used as-is.
    -
    5416def cast(expression: ExpOrStr, to: str | DataType | DataType.Type, **opts) -> Cast:
    -5417    """Cast an expression to a data type.
    -5418
    -5419    Example:
    -5420        >>> cast('x + 1', 'int').sql()
    -5421        'CAST(x + 1 AS INT)'
    -5422
    -5423    Args:
    -5424        expression: The expression to cast.
    -5425        to: The datatype to cast to.
    -5426
    -5427    Returns:
    -5428        The new Cast instance.
    -5429    """
    -5430    expression = maybe_parse(expression, **opts)
    -5431    return Cast(this=expression, to=DataType.build(to, **opts))
    +            
    5475def cast(expression: ExpOrStr, to: str | DataType | DataType.Type, **opts) -> Cast:
    +5476    """Cast an expression to a data type.
    +5477
    +5478    Example:
    +5479        >>> cast('x + 1', 'int').sql()
    +5480        'CAST(x + 1 AS INT)'
    +5481
    +5482    Args:
    +5483        expression: The expression to cast.
    +5484        to: The datatype to cast to.
    +5485
    +5486    Returns:
    +5487        The new Cast instance.
    +5488    """
    +5489    expression = maybe_parse(expression, **opts)
    +5490    return Cast(this=expression, to=DataType.build(to, **opts))
     
    @@ -50792,31 +51468,31 @@ If an Expression instance is passed, this is used as-is.
    -
    5434def table_(
    -5435    table: Identifier | str,
    -5436    db: t.Optional[Identifier | str] = None,
    -5437    catalog: t.Optional[Identifier | str] = None,
    -5438    quoted: t.Optional[bool] = None,
    -5439    alias: t.Optional[Identifier | str] = None,
    -5440) -> Table:
    -5441    """Build a Table.
    -5442
    -5443    Args:
    -5444        table: Table name.
    -5445        db: Database name.
    -5446        catalog: Catalog name.
    -5447        quote: Whether to force quotes on the table's identifiers.
    -5448        alias: Table's alias.
    -5449
    -5450    Returns:
    -5451        The new Table instance.
    -5452    """
    -5453    return Table(
    -5454        this=to_identifier(table, quoted=quoted),
    -5455        db=to_identifier(db, quoted=quoted),
    -5456        catalog=to_identifier(catalog, quoted=quoted),
    -5457        alias=TableAlias(this=to_identifier(alias)) if alias else None,
    -5458    )
    +            
    5493def table_(
    +5494    table: Identifier | str,
    +5495    db: t.Optional[Identifier | str] = None,
    +5496    catalog: t.Optional[Identifier | str] = None,
    +5497    quoted: t.Optional[bool] = None,
    +5498    alias: t.Optional[Identifier | str] = None,
    +5499) -> Table:
    +5500    """Build a Table.
    +5501
    +5502    Args:
    +5503        table: Table name.
    +5504        db: Database name.
    +5505        catalog: Catalog name.
    +5506        quote: Whether to force quotes on the table's identifiers.
    +5507        alias: Table's alias.
    +5508
    +5509    Returns:
    +5510        The new Table instance.
    +5511    """
    +5512    return Table(
    +5513        this=to_identifier(table, quoted=quoted),
    +5514        db=to_identifier(db, quoted=quoted),
    +5515        catalog=to_identifier(catalog, quoted=quoted),
    +5516        alias=TableAlias(this=to_identifier(alias)) if alias else None,
    +5517    )
     
    @@ -50852,37 +51528,37 @@ If an Expression instance is passed, this is used as-is.
    -
    5461def values(
    -5462    values: t.Iterable[t.Tuple[t.Any, ...]],
    -5463    alias: t.Optional[str] = None,
    -5464    columns: t.Optional[t.Iterable[str] | t.Dict[str, DataType]] = None,
    -5465) -> Values:
    -5466    """Build VALUES statement.
    -5467
    -5468    Example:
    -5469        >>> values([(1, '2')]).sql()
    -5470        "VALUES (1, '2')"
    -5471
    -5472    Args:
    -5473        values: values statements that will be converted to SQL
    -5474        alias: optional alias
    -5475        columns: Optional list of ordered column names or ordered dictionary of column names to types.
    -5476         If either are provided then an alias is also required.
    -5477
    -5478    Returns:
    -5479        Values: the Values expression object
    -5480    """
    -5481    if columns and not alias:
    -5482        raise ValueError("Alias is required when providing columns")
    -5483
    -5484    return Values(
    -5485        expressions=[convert(tup) for tup in values],
    -5486        alias=(
    -5487            TableAlias(this=to_identifier(alias), columns=[to_identifier(x) for x in columns])
    -5488            if columns
    -5489            else (TableAlias(this=to_identifier(alias)) if alias else None)
    -5490        ),
    -5491    )
    +            
    5520def values(
    +5521    values: t.Iterable[t.Tuple[t.Any, ...]],
    +5522    alias: t.Optional[str] = None,
    +5523    columns: t.Optional[t.Iterable[str] | t.Dict[str, DataType]] = None,
    +5524) -> Values:
    +5525    """Build VALUES statement.
    +5526
    +5527    Example:
    +5528        >>> values([(1, '2')]).sql()
    +5529        "VALUES (1, '2')"
    +5530
    +5531    Args:
    +5532        values: values statements that will be converted to SQL
    +5533        alias: optional alias
    +5534        columns: Optional list of ordered column names or ordered dictionary of column names to types.
    +5535         If either are provided then an alias is also required.
    +5536
    +5537    Returns:
    +5538        Values: the Values expression object
    +5539    """
    +5540    if columns and not alias:
    +5541        raise ValueError("Alias is required when providing columns")
    +5542
    +5543    return Values(
    +5544        expressions=[convert(tup) for tup in values],
    +5545        alias=(
    +5546            TableAlias(this=to_identifier(alias), columns=[to_identifier(x) for x in columns])
    +5547            if columns
    +5548            else (TableAlias(this=to_identifier(alias)) if alias else None)
    +5549        ),
    +5550    )
     
    @@ -50927,28 +51603,28 @@ If either are provided then an alias is also required.
    -
    5494def var(name: t.Optional[ExpOrStr]) -> Var:
    -5495    """Build a SQL variable.
    -5496
    -5497    Example:
    -5498        >>> repr(var('x'))
    -5499        '(VAR this: x)'
    -5500
    -5501        >>> repr(var(column('x', table='y')))
    -5502        '(VAR this: x)'
    -5503
    -5504    Args:
    -5505        name: The name of the var or an expression who's name will become the var.
    -5506
    -5507    Returns:
    -5508        The new variable node.
    -5509    """
    -5510    if not name:
    -5511        raise ValueError("Cannot convert empty name into var.")
    -5512
    -5513    if isinstance(name, Expression):
    -5514        name = name.name
    -5515    return Var(this=name)
    +            
    5553def var(name: t.Optional[ExpOrStr]) -> Var:
    +5554    """Build a SQL variable.
    +5555
    +5556    Example:
    +5557        >>> repr(var('x'))
    +5558        '(VAR this: x)'
    +5559
    +5560        >>> repr(var(column('x', table='y')))
    +5561        '(VAR this: x)'
    +5562
    +5563    Args:
    +5564        name: The name of the var or an expression who's name will become the var.
    +5565
    +5566    Returns:
    +5567        The new variable node.
    +5568    """
    +5569    if not name:
    +5570        raise ValueError("Cannot convert empty name into var.")
    +5571
    +5572    if isinstance(name, Expression):
    +5573        name = name.name
    +5574    return Var(this=name)
     
    @@ -50996,24 +51672,24 @@ If either are provided then an alias is also required.
    -
    5518def rename_table(old_name: str | Table, new_name: str | Table) -> AlterTable:
    -5519    """Build ALTER TABLE... RENAME... expression
    -5520
    -5521    Args:
    -5522        old_name: The old name of the table
    -5523        new_name: The new name of the table
    -5524
    -5525    Returns:
    -5526        Alter table expression
    -5527    """
    -5528    old_table = to_table(old_name)
    -5529    new_table = to_table(new_name)
    -5530    return AlterTable(
    -5531        this=old_table,
    -5532        actions=[
    -5533            RenameTable(this=new_table),
    -5534        ],
    -5535    )
    +            
    5577def rename_table(old_name: str | Table, new_name: str | Table) -> AlterTable:
    +5578    """Build ALTER TABLE... RENAME... expression
    +5579
    +5580    Args:
    +5581        old_name: The old name of the table
    +5582        new_name: The new name of the table
    +5583
    +5584    Returns:
    +5585        Alter table expression
    +5586    """
    +5587    old_table = to_table(old_name)
    +5588    new_table = to_table(new_name)
    +5589    return AlterTable(
    +5590        this=old_table,
    +5591        actions=[
    +5592            RenameTable(this=new_table),
    +5593        ],
    +5594    )
     
    @@ -51046,46 +51722,46 @@ If either are provided then an alias is also required.
    -
    5538def convert(value: t.Any, copy: bool = False) -> Expression:
    -5539    """Convert a python value into an expression object.
    -5540
    -5541    Raises an error if a conversion is not possible.
    -5542
    -5543    Args:
    -5544        value: A python object.
    -5545        copy: Whether or not to copy `value` (only applies to Expressions and collections).
    -5546
    -5547    Returns:
    -5548        Expression: the equivalent expression object.
    -5549    """
    -5550    if isinstance(value, Expression):
    -5551        return _maybe_copy(value, copy)
    -5552    if isinstance(value, str):
    -5553        return Literal.string(value)
    -5554    if isinstance(value, bool):
    -5555        return Boolean(this=value)
    -5556    if value is None or (isinstance(value, float) and math.isnan(value)):
    -5557        return NULL
    -5558    if isinstance(value, numbers.Number):
    -5559        return Literal.number(value)
    -5560    if isinstance(value, datetime.datetime):
    -5561        datetime_literal = Literal.string(
    -5562            (value if value.tzinfo else value.replace(tzinfo=datetime.timezone.utc)).isoformat()
    -5563        )
    -5564        return TimeStrToTime(this=datetime_literal)
    -5565    if isinstance(value, datetime.date):
    -5566        date_literal = Literal.string(value.strftime("%Y-%m-%d"))
    -5567        return DateStrToDate(this=date_literal)
    -5568    if isinstance(value, tuple):
    -5569        return Tuple(expressions=[convert(v, copy=copy) for v in value])
    -5570    if isinstance(value, list):
    -5571        return Array(expressions=[convert(v, copy=copy) for v in value])
    -5572    if isinstance(value, dict):
    -5573        return Map(
    -5574            keys=[convert(k, copy=copy) for k in value],
    -5575            values=[convert(v, copy=copy) for v in value.values()],
    -5576        )
    -5577    raise ValueError(f"Cannot convert {value}")
    +            
    5597def convert(value: t.Any, copy: bool = False) -> Expression:
    +5598    """Convert a python value into an expression object.
    +5599
    +5600    Raises an error if a conversion is not possible.
    +5601
    +5602    Args:
    +5603        value: A python object.
    +5604        copy: Whether or not to copy `value` (only applies to Expressions and collections).
    +5605
    +5606    Returns:
    +5607        Expression: the equivalent expression object.
    +5608    """
    +5609    if isinstance(value, Expression):
    +5610        return _maybe_copy(value, copy)
    +5611    if isinstance(value, str):
    +5612        return Literal.string(value)
    +5613    if isinstance(value, bool):
    +5614        return Boolean(this=value)
    +5615    if value is None or (isinstance(value, float) and math.isnan(value)):
    +5616        return NULL
    +5617    if isinstance(value, numbers.Number):
    +5618        return Literal.number(value)
    +5619    if isinstance(value, datetime.datetime):
    +5620        datetime_literal = Literal.string(
    +5621            (value if value.tzinfo else value.replace(tzinfo=datetime.timezone.utc)).isoformat()
    +5622        )
    +5623        return TimeStrToTime(this=datetime_literal)
    +5624    if isinstance(value, datetime.date):
    +5625        date_literal = Literal.string(value.strftime("%Y-%m-%d"))
    +5626        return DateStrToDate(this=date_literal)
    +5627    if isinstance(value, tuple):
    +5628        return Tuple(expressions=[convert(v, copy=copy) for v in value])
    +5629    if isinstance(value, list):
    +5630        return Array(expressions=[convert(v, copy=copy) for v in value])
    +5631    if isinstance(value, dict):
    +5632        return Map(
    +5633            keys=[convert(k, copy=copy) for k in value],
    +5634            values=[convert(v, copy=copy) for v in value.values()],
    +5635        )
    +5636    raise ValueError(f"Cannot convert {value}")
     
    @@ -51120,26 +51796,26 @@ If either are provided then an alias is also required.
    -
    5580def replace_children(expression: Expression, fun: t.Callable, *args, **kwargs) -> None:
    -5581    """
    -5582    Replace children of an expression with the result of a lambda fun(child) -> exp.
    -5583    """
    -5584    for k, v in expression.args.items():
    -5585        is_list_arg = type(v) is list
    -5586
    -5587        child_nodes = v if is_list_arg else [v]
    -5588        new_child_nodes = []
    -5589
    -5590        for cn in child_nodes:
    -5591            if isinstance(cn, Expression):
    -5592                for child_node in ensure_collection(fun(cn, *args, **kwargs)):
    -5593                    new_child_nodes.append(child_node)
    -5594                    child_node.parent = expression
    -5595                    child_node.arg_key = k
    -5596            else:
    -5597                new_child_nodes.append(cn)
    -5598
    -5599        expression.args[k] = new_child_nodes if is_list_arg else seq_get(new_child_nodes, 0)
    +            
    5639def replace_children(expression: Expression, fun: t.Callable, *args, **kwargs) -> None:
    +5640    """
    +5641    Replace children of an expression with the result of a lambda fun(child) -> exp.
    +5642    """
    +5643    for k, v in expression.args.items():
    +5644        is_list_arg = type(v) is list
    +5645
    +5646        child_nodes = v if is_list_arg else [v]
    +5647        new_child_nodes = []
    +5648
    +5649        for cn in child_nodes:
    +5650            if isinstance(cn, Expression):
    +5651                for child_node in ensure_collection(fun(cn, *args, **kwargs)):
    +5652                    new_child_nodes.append(child_node)
    +5653                    child_node.parent = expression
    +5654                    child_node.arg_key = k
    +5655            else:
    +5656                new_child_nodes.append(cn)
    +5657
    +5658        expression.args[k] = new_child_nodes if is_list_arg else seq_get(new_child_nodes, 0)
     
    @@ -51159,22 +51835,22 @@ If either are provided then an alias is also required.
    -
    5602def column_table_names(expression: Expression) -> t.List[str]:
    -5603    """
    -5604    Return all table names referenced through columns in an expression.
    -5605
    -5606    Example:
    -5607        >>> import sqlglot
    -5608        >>> column_table_names(sqlglot.parse_one("a.b AND c.d AND c.e"))
    -5609        ['c', 'a']
    -5610
    -5611    Args:
    -5612        expression: expression to find table names.
    -5613
    -5614    Returns:
    -5615        A list of unique names.
    -5616    """
    -5617    return list(dict.fromkeys(column.table for column in expression.find_all(Column)))
    +            
    5661def column_table_names(expression: Expression) -> t.List[str]:
    +5662    """
    +5663    Return all table names referenced through columns in an expression.
    +5664
    +5665    Example:
    +5666        >>> import sqlglot
    +5667        >>> column_table_names(sqlglot.parse_one("a.b AND c.d AND c.e"))
    +5668        ['c', 'a']
    +5669
    +5670    Args:
    +5671        expression: expression to find table names.
    +5672
    +5673    Returns:
    +5674        A list of unique names.
    +5675    """
    +5676    return list(dict.fromkeys(column.table for column in expression.find_all(Column)))
     
    @@ -51217,27 +51893,27 @@ If either are provided then an alias is also required.
    -
    5620def table_name(table: Table | str) -> str:
    -5621    """Get the full name of a table as a string.
    -5622
    -5623    Args:
    -5624        table: table expression node or string.
    -5625
    -5626    Examples:
    -5627        >>> from sqlglot import exp, parse_one
    -5628        >>> table_name(parse_one("select * from a.b.c").find(exp.Table))
    -5629        'a.b.c'
    -5630
    -5631    Returns:
    -5632        The table name.
    -5633    """
    -5634
    -5635    table = maybe_parse(table, into=Table)
    -5636
    -5637    if not table:
    -5638        raise ValueError(f"Cannot parse {table}")
    -5639
    -5640    return ".".join(part for part in (table.text("catalog"), table.text("db"), table.name) if part)
    +            
    5679def table_name(table: Table | str) -> str:
    +5680    """Get the full name of a table as a string.
    +5681
    +5682    Args:
    +5683        table: table expression node or string.
    +5684
    +5685    Examples:
    +5686        >>> from sqlglot import exp, parse_one
    +5687        >>> table_name(parse_one("select * from a.b.c").find(exp.Table))
    +5688        'a.b.c'
    +5689
    +5690    Returns:
    +5691        The table name.
    +5692    """
    +5693
    +5694    table = maybe_parse(table, into=Table)
    +5695
    +5696    if not table:
    +5697        raise ValueError(f"Cannot parse {table}")
    +5698
    +5699    return ".".join(part for part in (table.text("catalog"), table.text("db"), table.name) if part)
     
    @@ -51274,39 +51950,40 @@ If either are provided then an alias is also required.
    def - replace_tables(expression: ~E, mapping: Dict[str, str]) -> ~E: + replace_tables(expression: ~E, mapping: Dict[str, str], copy: bool = True) -> ~E:
    -
    5643def replace_tables(expression: E, mapping: t.Dict[str, str]) -> E:
    -5644    """Replace all tables in expression according to the mapping.
    -5645
    -5646    Args:
    -5647        expression: expression node to be transformed and replaced.
    -5648        mapping: mapping of table names.
    -5649
    -5650    Examples:
    -5651        >>> from sqlglot import exp, parse_one
    -5652        >>> replace_tables(parse_one("select * from a.b"), {"a.b": "c"}).sql()
    -5653        'SELECT * FROM c'
    -5654
    -5655    Returns:
    -5656        The mapped expression.
    -5657    """
    -5658
    -5659    def _replace_tables(node: Expression) -> Expression:
    -5660        if isinstance(node, Table):
    -5661            new_name = mapping.get(table_name(node))
    -5662            if new_name:
    -5663                return to_table(
    -5664                    new_name,
    -5665                    **{k: v for k, v in node.args.items() if k not in ("this", "db", "catalog")},
    -5666                )
    -5667        return node
    -5668
    -5669    return expression.transform(_replace_tables)
    +            
    5702def replace_tables(expression: E, mapping: t.Dict[str, str], copy: bool = True) -> E:
    +5703    """Replace all tables in expression according to the mapping.
    +5704
    +5705    Args:
    +5706        expression: expression node to be transformed and replaced.
    +5707        mapping: mapping of table names.
    +5708        copy: whether or not to copy the expression.
    +5709
    +5710    Examples:
    +5711        >>> from sqlglot import exp, parse_one
    +5712        >>> replace_tables(parse_one("select * from a.b"), {"a.b": "c"}).sql()
    +5713        'SELECT * FROM c'
    +5714
    +5715    Returns:
    +5716        The mapped expression.
    +5717    """
    +5718
    +5719    def _replace_tables(node: Expression) -> Expression:
    +5720        if isinstance(node, Table):
    +5721            new_name = mapping.get(table_name(node))
    +5722            if new_name:
    +5723                return to_table(
    +5724                    new_name,
    +5725                    **{k: v for k, v in node.args.items() if k not in ("this", "db", "catalog")},
    +5726                )
    +5727        return node
    +5728
    +5729    return expression.transform(_replace_tables, copy=copy)
     
    @@ -51317,6 +51994,7 @@ If either are provided then an alias is also required.
    • expression: expression node to be transformed and replaced.
    • mapping: mapping of table names.
    • +
    • copy: whether or not to copy the expression.
    Examples:
    @@ -51350,40 +52028,40 @@ If either are provided then an alias is also required.
    -
    5672def replace_placeholders(expression: Expression, *args, **kwargs) -> Expression:
    -5673    """Replace placeholders in an expression.
    -5674
    -5675    Args:
    -5676        expression: expression node to be transformed and replaced.
    -5677        args: positional names that will substitute unnamed placeholders in the given order.
    -5678        kwargs: keyword arguments that will substitute named placeholders.
    -5679
    -5680    Examples:
    -5681        >>> from sqlglot import exp, parse_one
    -5682        >>> replace_placeholders(
    -5683        ...     parse_one("select * from :tbl where ? = ?"),
    -5684        ...     exp.to_identifier("str_col"), "b", tbl=exp.to_identifier("foo")
    -5685        ... ).sql()
    -5686        "SELECT * FROM foo WHERE str_col = 'b'"
    -5687
    -5688    Returns:
    -5689        The mapped expression.
    -5690    """
    -5691
    -5692    def _replace_placeholders(node: Expression, args, **kwargs) -> Expression:
    -5693        if isinstance(node, Placeholder):
    -5694            if node.name:
    -5695                new_name = kwargs.get(node.name)
    -5696                if new_name:
    -5697                    return convert(new_name)
    -5698            else:
    -5699                try:
    -5700                    return convert(next(args))
    -5701                except StopIteration:
    -5702                    pass
    -5703        return node
    -5704
    -5705    return expression.transform(_replace_placeholders, iter(args), **kwargs)
    +            
    5732def replace_placeholders(expression: Expression, *args, **kwargs) -> Expression:
    +5733    """Replace placeholders in an expression.
    +5734
    +5735    Args:
    +5736        expression: expression node to be transformed and replaced.
    +5737        args: positional names that will substitute unnamed placeholders in the given order.
    +5738        kwargs: keyword arguments that will substitute named placeholders.
    +5739
    +5740    Examples:
    +5741        >>> from sqlglot import exp, parse_one
    +5742        >>> replace_placeholders(
    +5743        ...     parse_one("select * from :tbl where ? = ?"),
    +5744        ...     exp.to_identifier("str_col"), "b", tbl=exp.to_identifier("foo")
    +5745        ... ).sql()
    +5746        "SELECT * FROM foo WHERE str_col = 'b'"
    +5747
    +5748    Returns:
    +5749        The mapped expression.
    +5750    """
    +5751
    +5752    def _replace_placeholders(node: Expression, args, **kwargs) -> Expression:
    +5753        if isinstance(node, Placeholder):
    +5754            if node.name:
    +5755                new_name = kwargs.get(node.name)
    +5756                if new_name:
    +5757                    return convert(new_name)
    +5758            else:
    +5759                try:
    +5760                    return convert(next(args))
    +5761                except StopIteration:
    +5762                    pass
    +5763        return node
    +5764
    +5765    return expression.transform(_replace_placeholders, iter(args), **kwargs)
     
    @@ -51431,39 +52109,39 @@ If either are provided then an alias is also required.
    -
    5708def expand(
    -5709    expression: Expression, sources: t.Dict[str, Subqueryable], copy: bool = True
    -5710) -> Expression:
    -5711    """Transforms an expression by expanding all referenced sources into subqueries.
    -5712
    -5713    Examples:
    -5714        >>> from sqlglot import parse_one
    -5715        >>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y")}).sql()
    -5716        'SELECT * FROM (SELECT * FROM y) AS z /* source: x */'
    -5717
    -5718        >>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y"), "y": parse_one("select * from z")}).sql()
    -5719        'SELECT * FROM (SELECT * FROM (SELECT * FROM z) AS y /* source: y */) AS z /* source: x */'
    -5720
    -5721    Args:
    -5722        expression: The expression to expand.
    -5723        sources: A dictionary of name to Subqueryables.
    -5724        copy: Whether or not to copy the expression during transformation. Defaults to True.
    -5725
    -5726    Returns:
    -5727        The transformed expression.
    -5728    """
    -5729
    -5730    def _expand(node: Expression):
    -5731        if isinstance(node, Table):
    -5732            name = table_name(node)
    -5733            source = sources.get(name)
    -5734            if source:
    -5735                subquery = source.subquery(node.alias or name)
    -5736                subquery.comments = [f"source: {name}"]
    -5737                return subquery.transform(_expand, copy=False)
    -5738        return node
    -5739
    -5740    return expression.transform(_expand, copy=copy)
    +            
    5768def expand(
    +5769    expression: Expression, sources: t.Dict[str, Subqueryable], copy: bool = True
    +5770) -> Expression:
    +5771    """Transforms an expression by expanding all referenced sources into subqueries.
    +5772
    +5773    Examples:
    +5774        >>> from sqlglot import parse_one
    +5775        >>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y")}).sql()
    +5776        'SELECT * FROM (SELECT * FROM y) AS z /* source: x */'
    +5777
    +5778        >>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y"), "y": parse_one("select * from z")}).sql()
    +5779        'SELECT * FROM (SELECT * FROM (SELECT * FROM z) AS y /* source: y */) AS z /* source: x */'
    +5780
    +5781    Args:
    +5782        expression: The expression to expand.
    +5783        sources: A dictionary of name to Subqueryables.
    +5784        copy: Whether or not to copy the expression during transformation. Defaults to True.
    +5785
    +5786    Returns:
    +5787        The transformed expression.
    +5788    """
    +5789
    +5790    def _expand(node: Expression):
    +5791        if isinstance(node, Table):
    +5792            name = table_name(node)
    +5793            source = sources.get(name)
    +5794            if source:
    +5795                subquery = source.subquery(node.alias or name)
    +5796                subquery.comments = [f"source: {name}"]
    +5797                return subquery.transform(_expand, copy=False)
    +5798        return node
    +5799
    +5800    return expression.transform(_expand, copy=copy)
     
    @@ -51514,51 +52192,51 @@ If either are provided then an alias is also required.
    -
    5743def func(name: str, *args, dialect: DialectType = None, **kwargs) -> Func:
    -5744    """
    -5745    Returns a Func expression.
    -5746
    -5747    Examples:
    -5748        >>> func("abs", 5).sql()
    -5749        'ABS(5)'
    -5750
    -5751        >>> func("cast", this=5, to=DataType.build("DOUBLE")).sql()
    -5752        'CAST(5 AS DOUBLE)'
    -5753
    -5754    Args:
    -5755        name: the name of the function to build.
    -5756        args: the args used to instantiate the function of interest.
    -5757        dialect: the source dialect.
    -5758        kwargs: the kwargs used to instantiate the function of interest.
    -5759
    -5760    Note:
    -5761        The arguments `args` and `kwargs` are mutually exclusive.
    -5762
    -5763    Returns:
    -5764        An instance of the function of interest, or an anonymous function, if `name` doesn't
    -5765        correspond to an existing `sqlglot.expressions.Func` class.
    -5766    """
    -5767    if args and kwargs:
    -5768        raise ValueError("Can't use both args and kwargs to instantiate a function.")
    -5769
    -5770    from sqlglot.dialects.dialect import Dialect
    -5771
    -5772    converted: t.List[Expression] = [maybe_parse(arg, dialect=dialect) for arg in args]
    -5773    kwargs = {key: maybe_parse(value, dialect=dialect) for key, value in kwargs.items()}
    -5774
    -5775    parser = Dialect.get_or_raise(dialect)().parser()
    -5776    from_args_list = parser.FUNCTIONS.get(name.upper())
    -5777
    -5778    if from_args_list:
    -5779        function = from_args_list(converted) if converted else from_args_list.__self__(**kwargs)  # type: ignore
    -5780    else:
    -5781        kwargs = kwargs or {"expressions": converted}
    -5782        function = Anonymous(this=name, **kwargs)
    -5783
    -5784    for error_message in function.error_messages(converted):
    -5785        raise ValueError(error_message)
    -5786
    -5787    return function
    +            
    5803def func(name: str, *args, dialect: DialectType = None, **kwargs) -> Func:
    +5804    """
    +5805    Returns a Func expression.
    +5806
    +5807    Examples:
    +5808        >>> func("abs", 5).sql()
    +5809        'ABS(5)'
    +5810
    +5811        >>> func("cast", this=5, to=DataType.build("DOUBLE")).sql()
    +5812        'CAST(5 AS DOUBLE)'
    +5813
    +5814    Args:
    +5815        name: the name of the function to build.
    +5816        args: the args used to instantiate the function of interest.
    +5817        dialect: the source dialect.
    +5818        kwargs: the kwargs used to instantiate the function of interest.
    +5819
    +5820    Note:
    +5821        The arguments `args` and `kwargs` are mutually exclusive.
    +5822
    +5823    Returns:
    +5824        An instance of the function of interest, or an anonymous function, if `name` doesn't
    +5825        correspond to an existing `sqlglot.expressions.Func` class.
    +5826    """
    +5827    if args and kwargs:
    +5828        raise ValueError("Can't use both args and kwargs to instantiate a function.")
    +5829
    +5830    from sqlglot.dialects.dialect import Dialect
    +5831
    +5832    converted: t.List[Expression] = [maybe_parse(arg, dialect=dialect) for arg in args]
    +5833    kwargs = {key: maybe_parse(value, dialect=dialect) for key, value in kwargs.items()}
    +5834
    +5835    parser = Dialect.get_or_raise(dialect)().parser()
    +5836    from_args_list = parser.FUNCTIONS.get(name.upper())
    +5837
    +5838    if from_args_list:
    +5839        function = from_args_list(converted) if converted else from_args_list.__self__(**kwargs)  # type: ignore
    +5840    else:
    +5841        kwargs = kwargs or {"expressions": converted}
    +5842        function = Anonymous(this=name, **kwargs)
    +5843
    +5844    for error_message in function.error_messages(converted):
    +5845        raise ValueError(error_message)
    +5846
    +5847    return function
     
    @@ -51616,11 +52294,11 @@ If either are provided then an alias is also required.
    -
    5790def true() -> Boolean:
    -5791    """
    -5792    Returns a true Boolean expression.
    -5793    """
    -5794    return Boolean(this=True)
    +            
    5850def true() -> Boolean:
    +5851    """
    +5852    Returns a true Boolean expression.
    +5853    """
    +5854    return Boolean(this=True)
     
    @@ -51640,11 +52318,11 @@ If either are provided then an alias is also required.
    -
    5797def false() -> Boolean:
    -5798    """
    -5799    Returns a false Boolean expression.
    -5800    """
    -5801    return Boolean(this=False)
    +            
    5857def false() -> Boolean:
    +5858    """
    +5859    Returns a false Boolean expression.
    +5860    """
    +5861    return Boolean(this=False)
     
    @@ -51664,11 +52342,11 @@ If either are provided then an alias is also required.
    -
    5804def null() -> Null:
    -5805    """
    -5806    Returns a Null expression.
    -5807    """
    -5808    return Null()
    +            
    5864def null() -> Null:
    +5865    """
    +5866    Returns a Null expression.
    +5867    """
    +5868    return Null()
     
    diff --git a/docs/sqlglot/generator.html b/docs/sqlglot/generator.html index 4de314d..3ca0cff 100644 --- a/docs/sqlglot/generator.html +++ b/docs/sqlglot/generator.html @@ -108,6 +108,9 @@
  • uniquecolumnconstraint_sql
  • +
  • + createable_sql +
  • create_sql
  • @@ -354,6 +357,9 @@
  • query_modifiers
  • +
  • + offset_limit_modifiers +
  • after_having_modifiers
  • @@ -366,6 +372,9 @@
  • schema_sql
  • +
  • + schema_columns_sql +
  • star_sql
  • @@ -439,7 +448,7 @@ trim_sql
  • - concat_sql + safeconcat_sql
  • check_sql @@ -591,6 +600,9 @@
  • dpipe_sql
  • +
  • + safedpipe_sql +
  • div_sql
  • @@ -735,6 +747,9 @@
  • dictsubproperty_sql
  • +
  • + oncluster_sql +
  • @@ -780,1791 +795,1791 @@
    14 15class Generator: 16 """ - 17 Generator interprets the given syntax tree and produces a SQL string as an output. + 17 Generator converts a given syntax tree to the corresponding SQL string. 18 19 Args: - 20 time_mapping (dict): the dictionary of custom time mappings in which the key - 21 represents a python time format and the output the target time format - 22 time_trie (trie): a trie of the time_mapping keys - 23 pretty (bool): if set to True the returned string will be formatted. Default: False. - 24 quote_start (str): specifies which starting character to use to delimit quotes. Default: '. - 25 quote_end (str): specifies which ending character to use to delimit quotes. Default: '. - 26 identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ". - 27 identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ". - 28 bit_start (str): specifies which starting character to use to delimit bit literals. Default: None. - 29 bit_end (str): specifies which ending character to use to delimit bit literals. Default: None. - 30 hex_start (str): specifies which starting character to use to delimit hex literals. Default: None. - 31 hex_end (str): specifies which ending character to use to delimit hex literals. Default: None. - 32 byte_start (str): specifies which starting character to use to delimit byte literals. Default: None. - 33 byte_end (str): specifies which ending character to use to delimit byte literals. Default: None. - 34 raw_start (str): specifies which starting character to use to delimit raw literals. Default: None. - 35 raw_end (str): specifies which ending character to use to delimit raw literals. Default: None. - 36 identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always. - 37 normalize (bool): if set to True all identifiers will lower cased - 38 string_escape (str): specifies a string escape character. Default: '. - 39 identifier_escape (str): specifies an identifier escape character. Default: ". - 40 pad (int): determines padding in a formatted string. Default: 2. - 41 indent (int): determines the size of indentation in a formatted string. Default: 4. - 42 unnest_column_only (bool): if true unnest table aliases are considered only as column aliases - 43 normalize_functions (str): normalize function names, "upper", "lower", or None - 44 Default: "upper" - 45 alias_post_tablesample (bool): if the table alias comes after tablesample - 46 Default: False - 47 identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit - 48 Default: False - 49 unsupported_level (ErrorLevel): determines the generator's behavior when it encounters - 50 unsupported expressions. Default ErrorLevel.WARN. - 51 null_ordering (str): Indicates the default null ordering method to use if not explicitly set. - 52 Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". - 53 Default: "nulls_are_small" - 54 max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. - 55 This is only relevant if unsupported_level is ErrorLevel.RAISE. - 56 Default: 3 - 57 leading_comma (bool): if the the comma is leading or trailing in select statements - 58 Default: False - 59 max_text_width: The max number of characters in a segment before creating new lines in pretty mode. - 60 The default is on the smaller end because the length only represents a segment and not the true - 61 line length. - 62 Default: 80 - 63 comments: Whether or not to preserve comments in the output SQL code. - 64 Default: True - 65 """ - 66 - 67 TRANSFORMS = { - 68 exp.DateAdd: lambda self, e: self.func( - 69 "DATE_ADD", e.this, e.expression, exp.Literal.string(e.text("unit")) - 70 ), - 71 exp.TsOrDsAdd: lambda self, e: self.func( - 72 "TS_OR_DS_ADD", e.this, e.expression, exp.Literal.string(e.text("unit")) - 73 ), - 74 exp.VarMap: lambda self, e: self.func("MAP", e.args["keys"], e.args["values"]), - 75 exp.CharacterSetProperty: lambda self, e: f"{'DEFAULT ' if e.args.get('default') else ''}CHARACTER SET={self.sql(e, 'this')}", - 76 exp.ExecuteAsProperty: lambda self, e: self.naked_property(e), - 77 exp.ExternalProperty: lambda self, e: "EXTERNAL", - 78 exp.LanguageProperty: lambda self, e: self.naked_property(e), - 79 exp.LocationProperty: lambda self, e: self.naked_property(e), - 80 exp.LogProperty: lambda self, e: f"{'NO ' if e.args.get('no') else ''}LOG", - 81 exp.MaterializedProperty: lambda self, e: "MATERIALIZED", - 82 exp.NoPrimaryIndexProperty: lambda self, e: "NO PRIMARY INDEX", - 83 exp.OnCommitProperty: lambda self, e: f"ON COMMIT {'DELETE' if e.args.get('delete') else 'PRESERVE'} ROWS", - 84 exp.ReturnsProperty: lambda self, e: self.naked_property(e), - 85 exp.SetProperty: lambda self, e: f"{'MULTI' if e.args.get('multi') else ''}SET", - 86 exp.SettingsProperty: lambda self, e: f"SETTINGS{self.seg('')}{(self.expressions(e))}", - 87 exp.SqlSecurityProperty: lambda self, e: f"SQL SECURITY {'DEFINER' if e.args.get('definer') else 'INVOKER'}", - 88 exp.TemporaryProperty: lambda self, e: f"TEMPORARY", - 89 exp.TransientProperty: lambda self, e: "TRANSIENT", - 90 exp.StabilityProperty: lambda self, e: e.name, - 91 exp.VolatileProperty: lambda self, e: "VOLATILE", - 92 exp.WithJournalTableProperty: lambda self, e: f"WITH JOURNAL TABLE={self.sql(e, 'this')}", - 93 exp.CaseSpecificColumnConstraint: lambda self, e: f"{'NOT ' if e.args.get('not_') else ''}CASESPECIFIC", - 94 exp.CharacterSetColumnConstraint: lambda self, e: f"CHARACTER SET {self.sql(e, 'this')}", - 95 exp.DateFormatColumnConstraint: lambda self, e: f"FORMAT {self.sql(e, 'this')}", - 96 exp.OnUpdateColumnConstraint: lambda self, e: f"ON UPDATE {self.sql(e, 'this')}", - 97 exp.UppercaseColumnConstraint: lambda self, e: f"UPPERCASE", - 98 exp.TitleColumnConstraint: lambda self, e: f"TITLE {self.sql(e, 'this')}", - 99 exp.PathColumnConstraint: lambda self, e: f"PATH {self.sql(e, 'this')}", - 100 exp.CheckColumnConstraint: lambda self, e: f"CHECK ({self.sql(e, 'this')})", - 101 exp.CommentColumnConstraint: lambda self, e: f"COMMENT {self.sql(e, 'this')}", - 102 exp.CollateColumnConstraint: lambda self, e: f"COLLATE {self.sql(e, 'this')}", - 103 exp.EncodeColumnConstraint: lambda self, e: f"ENCODE {self.sql(e, 'this')}", - 104 exp.DefaultColumnConstraint: lambda self, e: f"DEFAULT {self.sql(e, 'this')}", - 105 exp.InlineLengthColumnConstraint: lambda self, e: f"INLINE LENGTH {self.sql(e, 'this')}", - 106 } - 107 - 108 # Whether or not null ordering is supported in order by - 109 NULL_ORDERING_SUPPORTED = True - 110 - 111 # Whether or not locking reads (i.e. SELECT ... FOR UPDATE/SHARE) are supported - 112 LOCKING_READS_SUPPORTED = False - 113 - 114 # Always do union distinct or union all - 115 EXPLICIT_UNION = False - 116 - 117 # Wrap derived values in parens, usually standard but spark doesn't support it - 118 WRAP_DERIVED_VALUES = True - 119 - 120 # Whether or not create function uses an AS before the RETURN - 121 CREATE_FUNCTION_RETURN_AS = True - 122 - 123 # Whether or not MERGE ... WHEN MATCHED BY SOURCE is allowed - 124 MATCHED_BY_SOURCE = True - 125 - 126 # Whether or not the INTERVAL expression works only with values like '1 day' - 127 SINGLE_STRING_INTERVAL = False - 128 - 129 # Whether or not the plural form of date parts like day (i.e. "days") is supported in INTERVALs - 130 INTERVAL_ALLOWS_PLURAL_FORM = True - 131 - 132 # Whether or not the TABLESAMPLE clause supports a method name, like BERNOULLI - 133 TABLESAMPLE_WITH_METHOD = True - 134 - 135 # Whether or not to treat the number in TABLESAMPLE (50) as a percentage - 136 TABLESAMPLE_SIZE_IS_PERCENT = False - 137 - 138 # Whether or not limit and fetch are supported (possible values: "ALL", "LIMIT", "FETCH") - 139 LIMIT_FETCH = "ALL" - 140 - 141 # Whether a table is allowed to be renamed with a db - 142 RENAME_TABLE_WITH_DB = True - 143 - 144 # The separator for grouping sets and rollups - 145 GROUPINGS_SEP = "," - 146 - 147 # The string used for creating index on a table - 148 INDEX_ON = "ON" - 149 - 150 TYPE_MAPPING = { - 151 exp.DataType.Type.NCHAR: "CHAR", - 152 exp.DataType.Type.NVARCHAR: "VARCHAR", - 153 exp.DataType.Type.MEDIUMTEXT: "TEXT", - 154 exp.DataType.Type.LONGTEXT: "TEXT", - 155 exp.DataType.Type.MEDIUMBLOB: "BLOB", - 156 exp.DataType.Type.LONGBLOB: "BLOB", - 157 exp.DataType.Type.INET: "INET", + 20 pretty: Whether or not to format the produced SQL string. + 21 Default: False. + 22 identify: Determines when an identifier should be quoted. Possible values are: + 23 False (default): Never quote, except in cases where it's mandatory by the dialect. + 24 True or 'always': Always quote. + 25 'safe': Only quote identifiers that are case insensitive. + 26 normalize: Whether or not to normalize identifiers to lowercase. + 27 Default: False. + 28 pad: Determines the pad size in a formatted string. + 29 Default: 2. + 30 indent: Determines the indentation size in a formatted string. + 31 Default: 2. + 32 normalize_functions: Whether or not to normalize all function names. Possible values are: + 33 "upper" or True (default): Convert names to uppercase. + 34 "lower": Convert names to lowercase. + 35 False: Disables function name normalization. + 36 unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. + 37 Default ErrorLevel.WARN. + 38 max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. + 39 This is only relevant if unsupported_level is ErrorLevel.RAISE. + 40 Default: 3 + 41 leading_comma: Determines whether or not the comma is leading or trailing in select expressions. + 42 This is only relevant when generating in pretty mode. + 43 Default: False + 44 max_text_width: The max number of characters in a segment before creating new lines in pretty mode. + 45 The default is on the smaller end because the length only represents a segment and not the true + 46 line length. + 47 Default: 80 + 48 comments: Whether or not to preserve comments in the output SQL code. + 49 Default: True + 50 """ + 51 + 52 TRANSFORMS = { + 53 exp.DateAdd: lambda self, e: self.func( + 54 "DATE_ADD", e.this, e.expression, exp.Literal.string(e.text("unit")) + 55 ), + 56 exp.TsOrDsAdd: lambda self, e: self.func( + 57 "TS_OR_DS_ADD", e.this, e.expression, exp.Literal.string(e.text("unit")) + 58 ), + 59 exp.VarMap: lambda self, e: self.func("MAP", e.args["keys"], e.args["values"]), + 60 exp.CharacterSetProperty: lambda self, e: f"{'DEFAULT ' if e.args.get('default') else ''}CHARACTER SET={self.sql(e, 'this')}", + 61 exp.ExecuteAsProperty: lambda self, e: self.naked_property(e), + 62 exp.ExternalProperty: lambda self, e: "EXTERNAL", + 63 exp.LanguageProperty: lambda self, e: self.naked_property(e), + 64 exp.LocationProperty: lambda self, e: self.naked_property(e), + 65 exp.LogProperty: lambda self, e: f"{'NO ' if e.args.get('no') else ''}LOG", + 66 exp.MaterializedProperty: lambda self, e: "MATERIALIZED", + 67 exp.NoPrimaryIndexProperty: lambda self, e: "NO PRIMARY INDEX", + 68 exp.OnCommitProperty: lambda self, e: f"ON COMMIT {'DELETE' if e.args.get('delete') else 'PRESERVE'} ROWS", + 69 exp.ReturnsProperty: lambda self, e: self.naked_property(e), + 70 exp.SetProperty: lambda self, e: f"{'MULTI' if e.args.get('multi') else ''}SET", + 71 exp.SettingsProperty: lambda self, e: f"SETTINGS{self.seg('')}{(self.expressions(e))}", + 72 exp.SqlSecurityProperty: lambda self, e: f"SQL SECURITY {'DEFINER' if e.args.get('definer') else 'INVOKER'}", + 73 exp.TemporaryProperty: lambda self, e: f"TEMPORARY", + 74 exp.ToTableProperty: lambda self, e: f"TO {self.sql(e.this)}", + 75 exp.TransientProperty: lambda self, e: "TRANSIENT", + 76 exp.StabilityProperty: lambda self, e: e.name, + 77 exp.VolatileProperty: lambda self, e: "VOLATILE", + 78 exp.WithJournalTableProperty: lambda self, e: f"WITH JOURNAL TABLE={self.sql(e, 'this')}", + 79 exp.CaseSpecificColumnConstraint: lambda self, e: f"{'NOT ' if e.args.get('not_') else ''}CASESPECIFIC", + 80 exp.CharacterSetColumnConstraint: lambda self, e: f"CHARACTER SET {self.sql(e, 'this')}", + 81 exp.DateFormatColumnConstraint: lambda self, e: f"FORMAT {self.sql(e, 'this')}", + 82 exp.OnUpdateColumnConstraint: lambda self, e: f"ON UPDATE {self.sql(e, 'this')}", + 83 exp.UppercaseColumnConstraint: lambda self, e: f"UPPERCASE", + 84 exp.TitleColumnConstraint: lambda self, e: f"TITLE {self.sql(e, 'this')}", + 85 exp.PathColumnConstraint: lambda self, e: f"PATH {self.sql(e, 'this')}", + 86 exp.CheckColumnConstraint: lambda self, e: f"CHECK ({self.sql(e, 'this')})", + 87 exp.CommentColumnConstraint: lambda self, e: f"COMMENT {self.sql(e, 'this')}", + 88 exp.CollateColumnConstraint: lambda self, e: f"COLLATE {self.sql(e, 'this')}", + 89 exp.EncodeColumnConstraint: lambda self, e: f"ENCODE {self.sql(e, 'this')}", + 90 exp.DefaultColumnConstraint: lambda self, e: f"DEFAULT {self.sql(e, 'this')}", + 91 exp.InlineLengthColumnConstraint: lambda self, e: f"INLINE LENGTH {self.sql(e, 'this')}", + 92 } + 93 + 94 # Whether or not null ordering is supported in order by + 95 NULL_ORDERING_SUPPORTED = True + 96 + 97 # Whether or not locking reads (i.e. SELECT ... FOR UPDATE/SHARE) are supported + 98 LOCKING_READS_SUPPORTED = False + 99 + 100 # Always do union distinct or union all + 101 EXPLICIT_UNION = False + 102 + 103 # Wrap derived values in parens, usually standard but spark doesn't support it + 104 WRAP_DERIVED_VALUES = True + 105 + 106 # Whether or not create function uses an AS before the RETURN + 107 CREATE_FUNCTION_RETURN_AS = True + 108 + 109 # Whether or not MERGE ... WHEN MATCHED BY SOURCE is allowed + 110 MATCHED_BY_SOURCE = True + 111 + 112 # Whether or not the INTERVAL expression works only with values like '1 day' + 113 SINGLE_STRING_INTERVAL = False + 114 + 115 # Whether or not the plural form of date parts like day (i.e. "days") is supported in INTERVALs + 116 INTERVAL_ALLOWS_PLURAL_FORM = True + 117 + 118 # Whether or not the TABLESAMPLE clause supports a method name, like BERNOULLI + 119 TABLESAMPLE_WITH_METHOD = True + 120 + 121 # Whether or not to treat the number in TABLESAMPLE (50) as a percentage + 122 TABLESAMPLE_SIZE_IS_PERCENT = False + 123 + 124 # Whether or not limit and fetch are supported (possible values: "ALL", "LIMIT", "FETCH") + 125 LIMIT_FETCH = "ALL" + 126 + 127 # Whether or not a table is allowed to be renamed with a db + 128 RENAME_TABLE_WITH_DB = True + 129 + 130 # The separator for grouping sets and rollups + 131 GROUPINGS_SEP = "," + 132 + 133 # The string used for creating an index on a table + 134 INDEX_ON = "ON" + 135 + 136 # Whether or not join hints should be generated + 137 JOIN_HINTS = True + 138 + 139 # Whether or not table hints should be generated + 140 TABLE_HINTS = True + 141 + 142 # Whether or not comparing against booleans (e.g. x IS TRUE) is supported + 143 IS_BOOL_ALLOWED = True + 144 + 145 TYPE_MAPPING = { + 146 exp.DataType.Type.NCHAR: "CHAR", + 147 exp.DataType.Type.NVARCHAR: "VARCHAR", + 148 exp.DataType.Type.MEDIUMTEXT: "TEXT", + 149 exp.DataType.Type.LONGTEXT: "TEXT", + 150 exp.DataType.Type.MEDIUMBLOB: "BLOB", + 151 exp.DataType.Type.LONGBLOB: "BLOB", + 152 exp.DataType.Type.INET: "INET", + 153 } + 154 + 155 STAR_MAPPING = { + 156 "except": "EXCEPT", + 157 "replace": "REPLACE", 158 } 159 - 160 STAR_MAPPING = { - 161 "except": "EXCEPT", - 162 "replace": "REPLACE", - 163 } - 164 - 165 TIME_PART_SINGULARS = { - 166 "microseconds": "microsecond", - 167 "seconds": "second", - 168 "minutes": "minute", - 169 "hours": "hour", - 170 "days": "day", - 171 "weeks": "week", - 172 "months": "month", - 173 "quarters": "quarter", - 174 "years": "year", - 175 } - 176 - 177 TOKEN_MAPPING: t.Dict[TokenType, str] = {} - 178 - 179 STRUCT_DELIMITER = ("<", ">") - 180 - 181 PARAMETER_TOKEN = "@" - 182 - 183 PROPERTIES_LOCATION = { - 184 exp.AlgorithmProperty: exp.Properties.Location.POST_CREATE, - 185 exp.AutoIncrementProperty: exp.Properties.Location.POST_SCHEMA, - 186 exp.BlockCompressionProperty: exp.Properties.Location.POST_NAME, - 187 exp.CharacterSetProperty: exp.Properties.Location.POST_SCHEMA, - 188 exp.ChecksumProperty: exp.Properties.Location.POST_NAME, - 189 exp.CollateProperty: exp.Properties.Location.POST_SCHEMA, - 190 exp.Cluster: exp.Properties.Location.POST_SCHEMA, - 191 exp.DataBlocksizeProperty: exp.Properties.Location.POST_NAME, - 192 exp.DefinerProperty: exp.Properties.Location.POST_CREATE, - 193 exp.DictRange: exp.Properties.Location.POST_SCHEMA, - 194 exp.DictProperty: exp.Properties.Location.POST_SCHEMA, - 195 exp.DistKeyProperty: exp.Properties.Location.POST_SCHEMA, - 196 exp.DistStyleProperty: exp.Properties.Location.POST_SCHEMA, - 197 exp.EngineProperty: exp.Properties.Location.POST_SCHEMA, - 198 exp.ExecuteAsProperty: exp.Properties.Location.POST_SCHEMA, - 199 exp.ExternalProperty: exp.Properties.Location.POST_CREATE, - 200 exp.FallbackProperty: exp.Properties.Location.POST_NAME, - 201 exp.FileFormatProperty: exp.Properties.Location.POST_WITH, - 202 exp.FreespaceProperty: exp.Properties.Location.POST_NAME, - 203 exp.IsolatedLoadingProperty: exp.Properties.Location.POST_NAME, - 204 exp.JournalProperty: exp.Properties.Location.POST_NAME, - 205 exp.LanguageProperty: exp.Properties.Location.POST_SCHEMA, - 206 exp.LikeProperty: exp.Properties.Location.POST_SCHEMA, - 207 exp.LocationProperty: exp.Properties.Location.POST_SCHEMA, - 208 exp.LockingProperty: exp.Properties.Location.POST_ALIAS, - 209 exp.LogProperty: exp.Properties.Location.POST_NAME, - 210 exp.MaterializedProperty: exp.Properties.Location.POST_CREATE, - 211 exp.MergeBlockRatioProperty: exp.Properties.Location.POST_NAME, - 212 exp.NoPrimaryIndexProperty: exp.Properties.Location.POST_EXPRESSION, - 213 exp.OnCommitProperty: exp.Properties.Location.POST_EXPRESSION, - 214 exp.Order: exp.Properties.Location.POST_SCHEMA, - 215 exp.PartitionedByProperty: exp.Properties.Location.POST_WITH, - 216 exp.PrimaryKey: exp.Properties.Location.POST_SCHEMA, - 217 exp.Property: exp.Properties.Location.POST_WITH, - 218 exp.ReturnsProperty: exp.Properties.Location.POST_SCHEMA, - 219 exp.RowFormatProperty: exp.Properties.Location.POST_SCHEMA, - 220 exp.RowFormatDelimitedProperty: exp.Properties.Location.POST_SCHEMA, - 221 exp.RowFormatSerdeProperty: exp.Properties.Location.POST_SCHEMA, - 222 exp.SchemaCommentProperty: exp.Properties.Location.POST_SCHEMA, - 223 exp.SerdeProperties: exp.Properties.Location.POST_SCHEMA, - 224 exp.Set: exp.Properties.Location.POST_SCHEMA, - 225 exp.SettingsProperty: exp.Properties.Location.POST_SCHEMA, - 226 exp.SetProperty: exp.Properties.Location.POST_CREATE, - 227 exp.SortKeyProperty: exp.Properties.Location.POST_SCHEMA, - 228 exp.SqlSecurityProperty: exp.Properties.Location.POST_CREATE, - 229 exp.StabilityProperty: exp.Properties.Location.POST_SCHEMA, - 230 exp.TemporaryProperty: exp.Properties.Location.POST_CREATE, - 231 exp.TransientProperty: exp.Properties.Location.POST_CREATE, - 232 exp.MergeTreeTTL: exp.Properties.Location.POST_SCHEMA, - 233 exp.VolatileProperty: exp.Properties.Location.POST_CREATE, - 234 exp.WithDataProperty: exp.Properties.Location.POST_EXPRESSION, - 235 exp.WithJournalTableProperty: exp.Properties.Location.POST_NAME, - 236 } - 237 - 238 JOIN_HINTS = True - 239 TABLE_HINTS = True - 240 - 241 RESERVED_KEYWORDS: t.Set[str] = set() - 242 WITH_SEPARATED_COMMENTS = (exp.Select, exp.From, exp.Where, exp.With) - 243 UNWRAPPED_INTERVAL_VALUES = (exp.Column, exp.Literal, exp.Neg, exp.Paren) + 160 TIME_PART_SINGULARS = { + 161 "microseconds": "microsecond", + 162 "seconds": "second", + 163 "minutes": "minute", + 164 "hours": "hour", + 165 "days": "day", + 166 "weeks": "week", + 167 "months": "month", + 168 "quarters": "quarter", + 169 "years": "year", + 170 } + 171 + 172 TOKEN_MAPPING: t.Dict[TokenType, str] = {} + 173 + 174 STRUCT_DELIMITER = ("<", ">") + 175 + 176 PARAMETER_TOKEN = "@" + 177 + 178 PROPERTIES_LOCATION = { + 179 exp.AlgorithmProperty: exp.Properties.Location.POST_CREATE, + 180 exp.AutoIncrementProperty: exp.Properties.Location.POST_SCHEMA, + 181 exp.BlockCompressionProperty: exp.Properties.Location.POST_NAME, + 182 exp.CharacterSetProperty: exp.Properties.Location.POST_SCHEMA, + 183 exp.ChecksumProperty: exp.Properties.Location.POST_NAME, + 184 exp.CollateProperty: exp.Properties.Location.POST_SCHEMA, + 185 exp.Cluster: exp.Properties.Location.POST_SCHEMA, + 186 exp.DataBlocksizeProperty: exp.Properties.Location.POST_NAME, + 187 exp.DefinerProperty: exp.Properties.Location.POST_CREATE, + 188 exp.DictRange: exp.Properties.Location.POST_SCHEMA, + 189 exp.DictProperty: exp.Properties.Location.POST_SCHEMA, + 190 exp.DistKeyProperty: exp.Properties.Location.POST_SCHEMA, + 191 exp.DistStyleProperty: exp.Properties.Location.POST_SCHEMA, + 192 exp.EngineProperty: exp.Properties.Location.POST_SCHEMA, + 193 exp.ExecuteAsProperty: exp.Properties.Location.POST_SCHEMA, + 194 exp.ExternalProperty: exp.Properties.Location.POST_CREATE, + 195 exp.FallbackProperty: exp.Properties.Location.POST_NAME, + 196 exp.FileFormatProperty: exp.Properties.Location.POST_WITH, + 197 exp.FreespaceProperty: exp.Properties.Location.POST_NAME, + 198 exp.IsolatedLoadingProperty: exp.Properties.Location.POST_NAME, + 199 exp.JournalProperty: exp.Properties.Location.POST_NAME, + 200 exp.LanguageProperty: exp.Properties.Location.POST_SCHEMA, + 201 exp.LikeProperty: exp.Properties.Location.POST_SCHEMA, + 202 exp.LocationProperty: exp.Properties.Location.POST_SCHEMA, + 203 exp.LockingProperty: exp.Properties.Location.POST_ALIAS, + 204 exp.LogProperty: exp.Properties.Location.POST_NAME, + 205 exp.MaterializedProperty: exp.Properties.Location.POST_CREATE, + 206 exp.MergeBlockRatioProperty: exp.Properties.Location.POST_NAME, + 207 exp.NoPrimaryIndexProperty: exp.Properties.Location.POST_EXPRESSION, + 208 exp.OnCommitProperty: exp.Properties.Location.POST_EXPRESSION, + 209 exp.Order: exp.Properties.Location.POST_SCHEMA, + 210 exp.PartitionedByProperty: exp.Properties.Location.POST_WITH, + 211 exp.PrimaryKey: exp.Properties.Location.POST_SCHEMA, + 212 exp.Property: exp.Properties.Location.POST_WITH, + 213 exp.ReturnsProperty: exp.Properties.Location.POST_SCHEMA, + 214 exp.RowFormatProperty: exp.Properties.Location.POST_SCHEMA, + 215 exp.RowFormatDelimitedProperty: exp.Properties.Location.POST_SCHEMA, + 216 exp.RowFormatSerdeProperty: exp.Properties.Location.POST_SCHEMA, + 217 exp.SchemaCommentProperty: exp.Properties.Location.POST_SCHEMA, + 218 exp.SerdeProperties: exp.Properties.Location.POST_SCHEMA, + 219 exp.Set: exp.Properties.Location.POST_SCHEMA, + 220 exp.SettingsProperty: exp.Properties.Location.POST_SCHEMA, + 221 exp.SetProperty: exp.Properties.Location.POST_CREATE, + 222 exp.SortKeyProperty: exp.Properties.Location.POST_SCHEMA, + 223 exp.SqlSecurityProperty: exp.Properties.Location.POST_CREATE, + 224 exp.StabilityProperty: exp.Properties.Location.POST_SCHEMA, + 225 exp.TemporaryProperty: exp.Properties.Location.POST_CREATE, + 226 exp.ToTableProperty: exp.Properties.Location.POST_SCHEMA, + 227 exp.TransientProperty: exp.Properties.Location.POST_CREATE, + 228 exp.MergeTreeTTL: exp.Properties.Location.POST_SCHEMA, + 229 exp.VolatileProperty: exp.Properties.Location.POST_CREATE, + 230 exp.WithDataProperty: exp.Properties.Location.POST_EXPRESSION, + 231 exp.WithJournalTableProperty: exp.Properties.Location.POST_NAME, + 232 } + 233 + 234 # Keywords that can't be used as unquoted identifier names + 235 RESERVED_KEYWORDS: t.Set[str] = set() + 236 + 237 # Expressions whose comments are separated from them for better formatting + 238 WITH_SEPARATED_COMMENTS: t.Tuple[t.Type[exp.Expression], ...] = ( + 239 exp.Select, + 240 exp.From, + 241 exp.Where, + 242 exp.With, + 243 ) 244 - 245 SENTINEL_LINE_BREAK = "__SQLGLOT__LB__" - 246 - 247 __slots__ = ( - 248 "time_mapping", - 249 "time_trie", - 250 "pretty", - 251 "quote_start", - 252 "quote_end", - 253 "identifier_start", - 254 "identifier_end", - 255 "bit_start", - 256 "bit_end", - 257 "hex_start", - 258 "hex_end", - 259 "byte_start", - 260 "byte_end", - 261 "raw_start", - 262 "raw_end", - 263 "identify", - 264 "normalize", - 265 "string_escape", - 266 "identifier_escape", - 267 "pad", - 268 "index_offset", - 269 "unnest_column_only", - 270 "alias_post_tablesample", - 271 "identifiers_can_start_with_digit", - 272 "normalize_functions", - 273 "unsupported_level", - 274 "unsupported_messages", - 275 "null_ordering", - 276 "max_unsupported", - 277 "_indent", - 278 "_escaped_quote_end", - 279 "_escaped_identifier_end", - 280 "_leading_comma", - 281 "_max_text_width", - 282 "_comments", - 283 "_cache", - 284 ) - 285 - 286 def __init__( - 287 self, - 288 time_mapping=None, - 289 time_trie=None, - 290 pretty=None, - 291 quote_start=None, - 292 quote_end=None, - 293 identifier_start=None, - 294 identifier_end=None, - 295 bit_start=None, - 296 bit_end=None, - 297 hex_start=None, - 298 hex_end=None, - 299 byte_start=None, - 300 byte_end=None, - 301 raw_start=None, - 302 raw_end=None, - 303 identify=False, - 304 normalize=False, - 305 string_escape=None, - 306 identifier_escape=None, - 307 pad=2, - 308 indent=2, - 309 index_offset=0, - 310 unnest_column_only=False, - 311 alias_post_tablesample=False, - 312 identifiers_can_start_with_digit=False, - 313 normalize_functions="upper", - 314 unsupported_level=ErrorLevel.WARN, - 315 null_ordering=None, - 316 max_unsupported=3, - 317 leading_comma=False, - 318 max_text_width=80, - 319 comments=True, - 320 ): - 321 import sqlglot - 322 - 323 self.time_mapping = time_mapping or {} - 324 self.time_trie = time_trie - 325 self.pretty = pretty if pretty is not None else sqlglot.pretty - 326 self.quote_start = quote_start or "'" - 327 self.quote_end = quote_end or "'" - 328 self.identifier_start = identifier_start or '"' - 329 self.identifier_end = identifier_end or '"' - 330 self.bit_start = bit_start - 331 self.bit_end = bit_end - 332 self.hex_start = hex_start - 333 self.hex_end = hex_end - 334 self.byte_start = byte_start - 335 self.byte_end = byte_end - 336 self.raw_start = raw_start - 337 self.raw_end = raw_end - 338 self.identify = identify - 339 self.normalize = normalize - 340 self.string_escape = string_escape or "'" - 341 self.identifier_escape = identifier_escape or '"' - 342 self.pad = pad - 343 self.index_offset = index_offset - 344 self.unnest_column_only = unnest_column_only - 345 self.alias_post_tablesample = alias_post_tablesample - 346 self.identifiers_can_start_with_digit = identifiers_can_start_with_digit - 347 self.normalize_functions = normalize_functions - 348 self.unsupported_level = unsupported_level - 349 self.unsupported_messages = [] - 350 self.max_unsupported = max_unsupported - 351 self.null_ordering = null_ordering - 352 self._indent = indent - 353 self._escaped_quote_end = self.string_escape + self.quote_end - 354 self._escaped_identifier_end = self.identifier_escape + self.identifier_end - 355 self._leading_comma = leading_comma - 356 self._max_text_width = max_text_width - 357 self._comments = comments - 358 self._cache = None - 359 - 360 def generate( - 361 self, - 362 expression: t.Optional[exp.Expression], - 363 cache: t.Optional[t.Dict[int, str]] = None, - 364 ) -> str: - 365 """ - 366 Generates a SQL string by interpreting the given syntax tree. - 367 - 368 Args - 369 expression: the syntax tree. - 370 cache: an optional sql string cache. this leverages the hash of an expression which is slow, so only use this if you set _hash on each node. - 371 - 372 Returns - 373 the SQL string. - 374 """ - 375 if cache is not None: - 376 self._cache = cache - 377 self.unsupported_messages = [] - 378 sql = self.sql(expression).strip() - 379 self._cache = None - 380 - 381 if self.unsupported_level == ErrorLevel.IGNORE: - 382 return sql - 383 - 384 if self.unsupported_level == ErrorLevel.WARN: - 385 for msg in self.unsupported_messages: - 386 logger.warning(msg) - 387 elif self.unsupported_level == ErrorLevel.RAISE and self.unsupported_messages: - 388 raise UnsupportedError(concat_messages(self.unsupported_messages, self.max_unsupported)) - 389 - 390 if self.pretty: - 391 sql = sql.replace(self.SENTINEL_LINE_BREAK, "\n") - 392 return sql - 393 - 394 def unsupported(self, message: str) -> None: - 395 if self.unsupported_level == ErrorLevel.IMMEDIATE: - 396 raise UnsupportedError(message) - 397 self.unsupported_messages.append(message) - 398 - 399 def sep(self, sep: str = " ") -> str: - 400 return f"{sep.strip()}\n" if self.pretty else sep - 401 - 402 def seg(self, sql: str, sep: str = " ") -> str: - 403 return f"{self.sep(sep)}{sql}" - 404 - 405 def pad_comment(self, comment: str) -> str: - 406 comment = " " + comment if comment[0].strip() else comment - 407 comment = comment + " " if comment[-1].strip() else comment - 408 return comment - 409 - 410 def maybe_comment( - 411 self, - 412 sql: str, - 413 expression: t.Optional[exp.Expression] = None, - 414 comments: t.Optional[t.List[str]] = None, - 415 ) -> str: - 416 comments = ((expression and expression.comments) if comments is None else comments) if self._comments else None # type: ignore - 417 - 418 if not comments or isinstance(expression, exp.Binary): - 419 return sql + 245 # Expressions that can remain unwrapped when appearing in the context of an INTERVAL + 246 UNWRAPPED_INTERVAL_VALUES: t.Tuple[t.Type[exp.Expression], ...] = ( + 247 exp.Column, + 248 exp.Literal, + 249 exp.Neg, + 250 exp.Paren, + 251 ) + 252 + 253 SENTINEL_LINE_BREAK = "__SQLGLOT__LB__" + 254 + 255 # Autofilled + 256 INVERSE_TIME_MAPPING: t.Dict[str, str] = {} + 257 INVERSE_TIME_TRIE: t.Dict = {} + 258 INDEX_OFFSET = 0 + 259 UNNEST_COLUMN_ONLY = False + 260 ALIAS_POST_TABLESAMPLE = False + 261 IDENTIFIERS_CAN_START_WITH_DIGIT = False + 262 STRICT_STRING_CONCAT = False + 263 NORMALIZE_FUNCTIONS: bool | str = "upper" + 264 NULL_ORDERING = "nulls_are_small" + 265 + 266 # Delimiters for quotes, identifiers and the corresponding escape characters + 267 QUOTE_START = "'" + 268 QUOTE_END = "'" + 269 IDENTIFIER_START = '"' + 270 IDENTIFIER_END = '"' + 271 STRING_ESCAPE = "'" + 272 IDENTIFIER_ESCAPE = '"' + 273 + 274 # Delimiters for bit, hex, byte and raw literals + 275 BIT_START: t.Optional[str] = None + 276 BIT_END: t.Optional[str] = None + 277 HEX_START: t.Optional[str] = None + 278 HEX_END: t.Optional[str] = None + 279 BYTE_START: t.Optional[str] = None + 280 BYTE_END: t.Optional[str] = None + 281 RAW_START: t.Optional[str] = None + 282 RAW_END: t.Optional[str] = None + 283 + 284 __slots__ = ( + 285 "pretty", + 286 "identify", + 287 "normalize", + 288 "pad", + 289 "_indent", + 290 "normalize_functions", + 291 "unsupported_level", + 292 "max_unsupported", + 293 "leading_comma", + 294 "max_text_width", + 295 "comments", + 296 "unsupported_messages", + 297 "_escaped_quote_end", + 298 "_escaped_identifier_end", + 299 "_cache", + 300 ) + 301 + 302 def __init__( + 303 self, + 304 pretty: t.Optional[bool] = None, + 305 identify: str | bool = False, + 306 normalize: bool = False, + 307 pad: int = 2, + 308 indent: int = 2, + 309 normalize_functions: t.Optional[str | bool] = None, + 310 unsupported_level: ErrorLevel = ErrorLevel.WARN, + 311 max_unsupported: int = 3, + 312 leading_comma: bool = False, + 313 max_text_width: int = 80, + 314 comments: bool = True, + 315 ): + 316 import sqlglot + 317 + 318 self.pretty = pretty if pretty is not None else sqlglot.pretty + 319 self.identify = identify + 320 self.normalize = normalize + 321 self.pad = pad + 322 self._indent = indent + 323 self.unsupported_level = unsupported_level + 324 self.max_unsupported = max_unsupported + 325 self.leading_comma = leading_comma + 326 self.max_text_width = max_text_width + 327 self.comments = comments + 328 + 329 # This is both a Dialect property and a Generator argument, so we prioritize the latter + 330 self.normalize_functions = ( + 331 self.NORMALIZE_FUNCTIONS if normalize_functions is None else normalize_functions + 332 ) + 333 + 334 self.unsupported_messages: t.List[str] = [] + 335 self._escaped_quote_end: str = self.STRING_ESCAPE + self.QUOTE_END + 336 self._escaped_identifier_end: str = self.IDENTIFIER_ESCAPE + self.IDENTIFIER_END + 337 self._cache: t.Optional[t.Dict[int, str]] = None + 338 + 339 def generate( + 340 self, + 341 expression: t.Optional[exp.Expression], + 342 cache: t.Optional[t.Dict[int, str]] = None, + 343 ) -> str: + 344 """ + 345 Generates the SQL string corresponding to the given syntax tree. + 346 + 347 Args: + 348 expression: The syntax tree. + 349 cache: An optional sql string cache. This leverages the hash of an Expression + 350 which can be slow to compute, so only use it if you set _hash on each node. + 351 + 352 Returns: + 353 The SQL string corresponding to `expression`. + 354 """ + 355 if cache is not None: + 356 self._cache = cache + 357 + 358 self.unsupported_messages = [] + 359 sql = self.sql(expression).strip() + 360 self._cache = None + 361 + 362 if self.unsupported_level == ErrorLevel.IGNORE: + 363 return sql + 364 + 365 if self.unsupported_level == ErrorLevel.WARN: + 366 for msg in self.unsupported_messages: + 367 logger.warning(msg) + 368 elif self.unsupported_level == ErrorLevel.RAISE and self.unsupported_messages: + 369 raise UnsupportedError(concat_messages(self.unsupported_messages, self.max_unsupported)) + 370 + 371 if self.pretty: + 372 sql = sql.replace(self.SENTINEL_LINE_BREAK, "\n") + 373 return sql + 374 + 375 def unsupported(self, message: str) -> None: + 376 if self.unsupported_level == ErrorLevel.IMMEDIATE: + 377 raise UnsupportedError(message) + 378 self.unsupported_messages.append(message) + 379 + 380 def sep(self, sep: str = " ") -> str: + 381 return f"{sep.strip()}\n" if self.pretty else sep + 382 + 383 def seg(self, sql: str, sep: str = " ") -> str: + 384 return f"{self.sep(sep)}{sql}" + 385 + 386 def pad_comment(self, comment: str) -> str: + 387 comment = " " + comment if comment[0].strip() else comment + 388 comment = comment + " " if comment[-1].strip() else comment + 389 return comment + 390 + 391 def maybe_comment( + 392 self, + 393 sql: str, + 394 expression: t.Optional[exp.Expression] = None, + 395 comments: t.Optional[t.List[str]] = None, + 396 ) -> str: + 397 comments = ( + 398 ((expression and expression.comments) if comments is None else comments) # type: ignore + 399 if self.comments + 400 else None + 401 ) + 402 + 403 if not comments or isinstance(expression, exp.Binary): + 404 return sql + 405 + 406 sep = "\n" if self.pretty else " " + 407 comments_sql = sep.join( + 408 f"/*{self.pad_comment(comment)}*/" for comment in comments if comment + 409 ) + 410 + 411 if not comments_sql: + 412 return sql + 413 + 414 if isinstance(expression, self.WITH_SEPARATED_COMMENTS): + 415 return ( + 416 f"{self.sep()}{comments_sql}{sql}" + 417 if sql[0].isspace() + 418 else f"{comments_sql}{self.sep()}{sql}" + 419 ) 420 - 421 sep = "\n" if self.pretty else " " - 422 comments_sql = sep.join( - 423 f"/*{self.pad_comment(comment)}*/" for comment in comments if comment - 424 ) - 425 - 426 if not comments_sql: - 427 return sql - 428 - 429 if isinstance(expression, self.WITH_SEPARATED_COMMENTS): - 430 return ( - 431 f"{self.sep()}{comments_sql}{sql}" - 432 if sql[0].isspace() - 433 else f"{comments_sql}{self.sep()}{sql}" - 434 ) - 435 - 436 return f"{sql} {comments_sql}" - 437 - 438 def wrap(self, expression: exp.Expression | str) -> str: - 439 this_sql = self.indent( - 440 self.sql(expression) - 441 if isinstance(expression, (exp.Select, exp.Union)) - 442 else self.sql(expression, "this"), - 443 level=1, - 444 pad=0, - 445 ) - 446 return f"({self.sep('')}{this_sql}{self.seg(')', sep='')}" - 447 - 448 def no_identify(self, func: t.Callable[..., str], *args, **kwargs) -> str: - 449 original = self.identify - 450 self.identify = False - 451 result = func(*args, **kwargs) - 452 self.identify = original - 453 return result - 454 - 455 def normalize_func(self, name: str) -> str: - 456 if self.normalize_functions == "upper": - 457 return name.upper() - 458 if self.normalize_functions == "lower": - 459 return name.lower() - 460 return name - 461 - 462 def indent( - 463 self, - 464 sql: str, - 465 level: int = 0, - 466 pad: t.Optional[int] = None, - 467 skip_first: bool = False, - 468 skip_last: bool = False, - 469 ) -> str: - 470 if not self.pretty: - 471 return sql - 472 - 473 pad = self.pad if pad is None else pad - 474 lines = sql.split("\n") - 475 - 476 return "\n".join( - 477 line - 478 if (skip_first and i == 0) or (skip_last and i == len(lines) - 1) - 479 else f"{' ' * (level * self._indent + pad)}{line}" - 480 for i, line in enumerate(lines) - 481 ) + 421 return f"{sql} {comments_sql}" + 422 + 423 def wrap(self, expression: exp.Expression | str) -> str: + 424 this_sql = self.indent( + 425 self.sql(expression) + 426 if isinstance(expression, (exp.Select, exp.Union)) + 427 else self.sql(expression, "this"), + 428 level=1, + 429 pad=0, + 430 ) + 431 return f"({self.sep('')}{this_sql}{self.seg(')', sep='')}" + 432 + 433 def no_identify(self, func: t.Callable[..., str], *args, **kwargs) -> str: + 434 original = self.identify + 435 self.identify = False + 436 result = func(*args, **kwargs) + 437 self.identify = original + 438 return result + 439 + 440 def normalize_func(self, name: str) -> str: + 441 if self.normalize_functions == "upper" or self.normalize_functions is True: + 442 return name.upper() + 443 if self.normalize_functions == "lower": + 444 return name.lower() + 445 return name + 446 + 447 def indent( + 448 self, + 449 sql: str, + 450 level: int = 0, + 451 pad: t.Optional[int] = None, + 452 skip_first: bool = False, + 453 skip_last: bool = False, + 454 ) -> str: + 455 if not self.pretty: + 456 return sql + 457 + 458 pad = self.pad if pad is None else pad + 459 lines = sql.split("\n") + 460 + 461 return "\n".join( + 462 line + 463 if (skip_first and i == 0) or (skip_last and i == len(lines) - 1) + 464 else f"{' ' * (level * self._indent + pad)}{line}" + 465 for i, line in enumerate(lines) + 466 ) + 467 + 468 def sql( + 469 self, + 470 expression: t.Optional[str | exp.Expression], + 471 key: t.Optional[str] = None, + 472 comment: bool = True, + 473 ) -> str: + 474 if not expression: + 475 return "" + 476 + 477 if isinstance(expression, str): + 478 return expression + 479 + 480 if key: + 481 return self.sql(expression.args.get(key)) 482 - 483 def sql( - 484 self, - 485 expression: t.Optional[str | exp.Expression], - 486 key: t.Optional[str] = None, - 487 comment: bool = True, - 488 ) -> str: - 489 if not expression: - 490 return "" - 491 - 492 if isinstance(expression, str): - 493 return expression - 494 - 495 if key: - 496 return self.sql(expression.args.get(key)) + 483 if self._cache is not None: + 484 expression_id = hash(expression) + 485 + 486 if expression_id in self._cache: + 487 return self._cache[expression_id] + 488 + 489 transform = self.TRANSFORMS.get(expression.__class__) + 490 + 491 if callable(transform): + 492 sql = transform(self, expression) + 493 elif transform: + 494 sql = transform + 495 elif isinstance(expression, exp.Expression): + 496 exp_handler_name = f"{expression.key}_sql" 497 - 498 if self._cache is not None: - 499 expression_id = hash(expression) - 500 - 501 if expression_id in self._cache: - 502 return self._cache[expression_id] - 503 - 504 transform = self.TRANSFORMS.get(expression.__class__) - 505 - 506 if callable(transform): - 507 sql = transform(self, expression) - 508 elif transform: - 509 sql = transform - 510 elif isinstance(expression, exp.Expression): - 511 exp_handler_name = f"{expression.key}_sql" - 512 - 513 if hasattr(self, exp_handler_name): - 514 sql = getattr(self, exp_handler_name)(expression) - 515 elif isinstance(expression, exp.Func): - 516 sql = self.function_fallback_sql(expression) - 517 elif isinstance(expression, exp.Property): - 518 sql = self.property_sql(expression) - 519 else: - 520 raise ValueError(f"Unsupported expression type {expression.__class__.__name__}") - 521 else: - 522 raise ValueError(f"Expected an Expression. Received {type(expression)}: {expression}") - 523 - 524 sql = self.maybe_comment(sql, expression) if self._comments and comment else sql - 525 - 526 if self._cache is not None: - 527 self._cache[expression_id] = sql - 528 return sql + 498 if hasattr(self, exp_handler_name): + 499 sql = getattr(self, exp_handler_name)(expression) + 500 elif isinstance(expression, exp.Func): + 501 sql = self.function_fallback_sql(expression) + 502 elif isinstance(expression, exp.Property): + 503 sql = self.property_sql(expression) + 504 else: + 505 raise ValueError(f"Unsupported expression type {expression.__class__.__name__}") + 506 else: + 507 raise ValueError(f"Expected an Expression. Received {type(expression)}: {expression}") + 508 + 509 sql = self.maybe_comment(sql, expression) if self.comments and comment else sql + 510 + 511 if self._cache is not None: + 512 self._cache[expression_id] = sql + 513 return sql + 514 + 515 def uncache_sql(self, expression: exp.Uncache) -> str: + 516 table = self.sql(expression, "this") + 517 exists_sql = " IF EXISTS" if expression.args.get("exists") else "" + 518 return f"UNCACHE TABLE{exists_sql} {table}" + 519 + 520 def cache_sql(self, expression: exp.Cache) -> str: + 521 lazy = " LAZY" if expression.args.get("lazy") else "" + 522 table = self.sql(expression, "this") + 523 options = expression.args.get("options") + 524 options = f" OPTIONS({self.sql(options[0])} = {self.sql(options[1])})" if options else "" + 525 sql = self.sql(expression, "expression") + 526 sql = f" AS{self.sep()}{sql}" if sql else "" + 527 sql = f"CACHE{lazy} TABLE {table}{options}{sql}" + 528 return self.prepend_ctes(expression, sql) 529 - 530 def uncache_sql(self, expression: exp.Uncache) -> str: - 531 table = self.sql(expression, "this") - 532 exists_sql = " IF EXISTS" if expression.args.get("exists") else "" - 533 return f"UNCACHE TABLE{exists_sql} {table}" - 534 - 535 def cache_sql(self, expression: exp.Cache) -> str: - 536 lazy = " LAZY" if expression.args.get("lazy") else "" - 537 table = self.sql(expression, "this") - 538 options = expression.args.get("options") - 539 options = f" OPTIONS({self.sql(options[0])} = {self.sql(options[1])})" if options else "" - 540 sql = self.sql(expression, "expression") - 541 sql = f" AS{self.sep()}{sql}" if sql else "" - 542 sql = f"CACHE{lazy} TABLE {table}{options}{sql}" - 543 return self.prepend_ctes(expression, sql) - 544 - 545 def characterset_sql(self, expression: exp.CharacterSet) -> str: - 546 if isinstance(expression.parent, exp.Cast): - 547 return f"CHAR CHARACTER SET {self.sql(expression, 'this')}" - 548 default = "DEFAULT " if expression.args.get("default") else "" - 549 return f"{default}CHARACTER SET={self.sql(expression, 'this')}" - 550 - 551 def column_sql(self, expression: exp.Column) -> str: - 552 return ".".join( - 553 self.sql(part) - 554 for part in ( - 555 expression.args.get("catalog"), - 556 expression.args.get("db"), - 557 expression.args.get("table"), - 558 expression.args.get("this"), - 559 ) - 560 if part - 561 ) - 562 - 563 def columnposition_sql(self, expression: exp.ColumnPosition) -> str: - 564 this = self.sql(expression, "this") - 565 this = f" {this}" if this else "" - 566 position = self.sql(expression, "position") - 567 return f"{position}{this}" - 568 - 569 def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str: - 570 column = self.sql(expression, "this") - 571 kind = self.sql(expression, "kind") - 572 constraints = self.expressions(expression, key="constraints", sep=" ", flat=True) - 573 exists = "IF NOT EXISTS " if expression.args.get("exists") else "" - 574 kind = f"{sep}{kind}" if kind else "" - 575 constraints = f" {constraints}" if constraints else "" - 576 position = self.sql(expression, "position") - 577 position = f" {position}" if position else "" - 578 - 579 return f"{exists}{column}{kind}{constraints}{position}" - 580 - 581 def columnconstraint_sql(self, expression: exp.ColumnConstraint) -> str: - 582 this = self.sql(expression, "this") - 583 kind_sql = self.sql(expression, "kind").strip() - 584 return f"CONSTRAINT {this} {kind_sql}" if this else kind_sql - 585 - 586 def autoincrementcolumnconstraint_sql(self, _) -> str: - 587 return self.token_sql(TokenType.AUTO_INCREMENT) - 588 - 589 def compresscolumnconstraint_sql(self, expression: exp.CompressColumnConstraint) -> str: - 590 if isinstance(expression.this, list): - 591 this = self.wrap(self.expressions(expression, key="this", flat=True)) - 592 else: - 593 this = self.sql(expression, "this") - 594 - 595 return f"COMPRESS {this}" - 596 - 597 def generatedasidentitycolumnconstraint_sql( - 598 self, expression: exp.GeneratedAsIdentityColumnConstraint - 599 ) -> str: - 600 this = "" - 601 if expression.this is not None: - 602 on_null = "ON NULL " if expression.args.get("on_null") else "" - 603 this = " ALWAYS " if expression.this else f" BY DEFAULT {on_null}" + 530 def characterset_sql(self, expression: exp.CharacterSet) -> str: + 531 if isinstance(expression.parent, exp.Cast): + 532 return f"CHAR CHARACTER SET {self.sql(expression, 'this')}" + 533 default = "DEFAULT " if expression.args.get("default") else "" + 534 return f"{default}CHARACTER SET={self.sql(expression, 'this')}" + 535 + 536 def column_sql(self, expression: exp.Column) -> str: + 537 return ".".join( + 538 self.sql(part) + 539 for part in ( + 540 expression.args.get("catalog"), + 541 expression.args.get("db"), + 542 expression.args.get("table"), + 543 expression.args.get("this"), + 544 ) + 545 if part + 546 ) + 547 + 548 def columnposition_sql(self, expression: exp.ColumnPosition) -> str: + 549 this = self.sql(expression, "this") + 550 this = f" {this}" if this else "" + 551 position = self.sql(expression, "position") + 552 return f"{position}{this}" + 553 + 554 def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str: + 555 column = self.sql(expression, "this") + 556 kind = self.sql(expression, "kind") + 557 constraints = self.expressions(expression, key="constraints", sep=" ", flat=True) + 558 exists = "IF NOT EXISTS " if expression.args.get("exists") else "" + 559 kind = f"{sep}{kind}" if kind else "" + 560 constraints = f" {constraints}" if constraints else "" + 561 position = self.sql(expression, "position") + 562 position = f" {position}" if position else "" + 563 + 564 return f"{exists}{column}{kind}{constraints}{position}" + 565 + 566 def columnconstraint_sql(self, expression: exp.ColumnConstraint) -> str: + 567 this = self.sql(expression, "this") + 568 kind_sql = self.sql(expression, "kind").strip() + 569 return f"CONSTRAINT {this} {kind_sql}" if this else kind_sql + 570 + 571 def autoincrementcolumnconstraint_sql(self, _) -> str: + 572 return self.token_sql(TokenType.AUTO_INCREMENT) + 573 + 574 def compresscolumnconstraint_sql(self, expression: exp.CompressColumnConstraint) -> str: + 575 if isinstance(expression.this, list): + 576 this = self.wrap(self.expressions(expression, key="this", flat=True)) + 577 else: + 578 this = self.sql(expression, "this") + 579 + 580 return f"COMPRESS {this}" + 581 + 582 def generatedasidentitycolumnconstraint_sql( + 583 self, expression: exp.GeneratedAsIdentityColumnConstraint + 584 ) -> str: + 585 this = "" + 586 if expression.this is not None: + 587 on_null = "ON NULL " if expression.args.get("on_null") else "" + 588 this = " ALWAYS " if expression.this else f" BY DEFAULT {on_null}" + 589 + 590 start = expression.args.get("start") + 591 start = f"START WITH {start}" if start else "" + 592 increment = expression.args.get("increment") + 593 increment = f" INCREMENT BY {increment}" if increment else "" + 594 minvalue = expression.args.get("minvalue") + 595 minvalue = f" MINVALUE {minvalue}" if minvalue else "" + 596 maxvalue = expression.args.get("maxvalue") + 597 maxvalue = f" MAXVALUE {maxvalue}" if maxvalue else "" + 598 cycle = expression.args.get("cycle") + 599 cycle_sql = "" + 600 + 601 if cycle is not None: + 602 cycle_sql = f"{' NO' if not cycle else ''} CYCLE" + 603 cycle_sql = cycle_sql.strip() if not start and not increment else cycle_sql 604 - 605 start = expression.args.get("start") - 606 start = f"START WITH {start}" if start else "" - 607 increment = expression.args.get("increment") - 608 increment = f" INCREMENT BY {increment}" if increment else "" - 609 minvalue = expression.args.get("minvalue") - 610 minvalue = f" MINVALUE {minvalue}" if minvalue else "" - 611 maxvalue = expression.args.get("maxvalue") - 612 maxvalue = f" MAXVALUE {maxvalue}" if maxvalue else "" - 613 cycle = expression.args.get("cycle") - 614 cycle_sql = "" - 615 - 616 if cycle is not None: - 617 cycle_sql = f"{' NO' if not cycle else ''} CYCLE" - 618 cycle_sql = cycle_sql.strip() if not start and not increment else cycle_sql - 619 - 620 sequence_opts = "" - 621 if start or increment or cycle_sql: - 622 sequence_opts = f"{start}{increment}{minvalue}{maxvalue}{cycle_sql}" - 623 sequence_opts = f" ({sequence_opts.strip()})" - 624 - 625 expr = self.sql(expression, "expression") - 626 expr = f"({expr})" if expr else "IDENTITY" - 627 - 628 return f"GENERATED{this}AS {expr}{sequence_opts}" - 629 - 630 def notnullcolumnconstraint_sql(self, expression: exp.NotNullColumnConstraint) -> str: - 631 return f"{'' if expression.args.get('allow_null') else 'NOT '}NULL" - 632 - 633 def primarykeycolumnconstraint_sql(self, expression: exp.PrimaryKeyColumnConstraint) -> str: - 634 desc = expression.args.get("desc") - 635 if desc is not None: - 636 return f"PRIMARY KEY{' DESC' if desc else ' ASC'}" - 637 return f"PRIMARY KEY" + 605 sequence_opts = "" + 606 if start or increment or cycle_sql: + 607 sequence_opts = f"{start}{increment}{minvalue}{maxvalue}{cycle_sql}" + 608 sequence_opts = f" ({sequence_opts.strip()})" + 609 + 610 expr = self.sql(expression, "expression") + 611 expr = f"({expr})" if expr else "IDENTITY" + 612 + 613 return f"GENERATED{this}AS {expr}{sequence_opts}" + 614 + 615 def notnullcolumnconstraint_sql(self, expression: exp.NotNullColumnConstraint) -> str: + 616 return f"{'' if expression.args.get('allow_null') else 'NOT '}NULL" + 617 + 618 def primarykeycolumnconstraint_sql(self, expression: exp.PrimaryKeyColumnConstraint) -> str: + 619 desc = expression.args.get("desc") + 620 if desc is not None: + 621 return f"PRIMARY KEY{' DESC' if desc else ' ASC'}" + 622 return f"PRIMARY KEY" + 623 + 624 def uniquecolumnconstraint_sql(self, expression: exp.UniqueColumnConstraint) -> str: + 625 this = self.sql(expression, "this") + 626 this = f" {this}" if this else "" + 627 return f"UNIQUE{this}" + 628 + 629 def createable_sql( + 630 self, expression: exp.Create, locations: dict[exp.Properties.Location, list[exp.Property]] + 631 ) -> str: + 632 return self.sql(expression, "this") + 633 + 634 def create_sql(self, expression: exp.Create) -> str: + 635 kind = self.sql(expression, "kind").upper() + 636 properties = expression.args.get("properties") + 637 properties_locs = self.locate_properties(properties) if properties else {} 638 - 639 def uniquecolumnconstraint_sql(self, expression: exp.UniqueColumnConstraint) -> str: - 640 this = self.sql(expression, "this") - 641 this = f" {this}" if this else "" - 642 return f"UNIQUE{this}" - 643 - 644 def create_sql(self, expression: exp.Create) -> str: - 645 kind = self.sql(expression, "kind").upper() - 646 properties = expression.args.get("properties") - 647 properties_exp = expression.copy() - 648 properties_locs = self.locate_properties(properties) if properties else {} - 649 if properties_locs.get(exp.Properties.Location.POST_SCHEMA) or properties_locs.get( - 650 exp.Properties.Location.POST_WITH - 651 ): - 652 properties_exp.set( - 653 "properties", - 654 exp.Properties( - 655 expressions=[ - 656 *properties_locs[exp.Properties.Location.POST_SCHEMA], - 657 *properties_locs[exp.Properties.Location.POST_WITH], - 658 ] - 659 ), - 660 ) - 661 if kind == "TABLE" and properties_locs.get(exp.Properties.Location.POST_NAME): - 662 this_name = self.sql(expression.this, "this") - 663 this_properties = self.properties( - 664 exp.Properties(expressions=properties_locs[exp.Properties.Location.POST_NAME]), - 665 wrapped=False, - 666 ) - 667 this_schema = f"({self.expressions(expression.this)})" - 668 this = f"{this_name}, {this_properties} {this_schema}" - 669 properties_sql = "" - 670 else: - 671 this = self.sql(expression, "this") - 672 properties_sql = self.sql(properties_exp, "properties") - 673 begin = " BEGIN" if expression.args.get("begin") else "" - 674 expression_sql = self.sql(expression, "expression") - 675 if expression_sql: - 676 expression_sql = f"{begin}{self.sep()}{expression_sql}" - 677 - 678 if self.CREATE_FUNCTION_RETURN_AS or not isinstance(expression.expression, exp.Return): - 679 if properties_locs.get(exp.Properties.Location.POST_ALIAS): - 680 postalias_props_sql = self.properties( - 681 exp.Properties( - 682 expressions=properties_locs[exp.Properties.Location.POST_ALIAS] - 683 ), - 684 wrapped=False, - 685 ) - 686 expression_sql = f" AS {postalias_props_sql}{expression_sql}" - 687 else: - 688 expression_sql = f" AS{expression_sql}" - 689 - 690 postindex_props_sql = "" - 691 if properties_locs.get(exp.Properties.Location.POST_INDEX): - 692 postindex_props_sql = self.properties( - 693 exp.Properties(expressions=properties_locs[exp.Properties.Location.POST_INDEX]), - 694 wrapped=False, - 695 prefix=" ", - 696 ) - 697 - 698 indexes = self.expressions(expression, key="indexes", indent=False, sep=" ") - 699 indexes = f" {indexes}" if indexes else "" - 700 index_sql = indexes + postindex_props_sql - 701 - 702 replace = " OR REPLACE" if expression.args.get("replace") else "" - 703 unique = " UNIQUE" if expression.args.get("unique") else "" - 704 - 705 postcreate_props_sql = "" - 706 if properties_locs.get(exp.Properties.Location.POST_CREATE): - 707 postcreate_props_sql = self.properties( - 708 exp.Properties(expressions=properties_locs[exp.Properties.Location.POST_CREATE]), - 709 sep=" ", - 710 prefix=" ", - 711 wrapped=False, - 712 ) - 713 - 714 modifiers = "".join((replace, unique, postcreate_props_sql)) + 639 this = self.createable_sql(expression, properties_locs) + 640 + 641 properties_sql = "" + 642 if properties_locs.get(exp.Properties.Location.POST_SCHEMA) or properties_locs.get( + 643 exp.Properties.Location.POST_WITH + 644 ): + 645 properties_sql = self.sql( + 646 exp.Properties( + 647 expressions=[ + 648 *properties_locs[exp.Properties.Location.POST_SCHEMA], + 649 *properties_locs[exp.Properties.Location.POST_WITH], + 650 ] + 651 ) + 652 ) + 653 + 654 begin = " BEGIN" if expression.args.get("begin") else "" + 655 expression_sql = self.sql(expression, "expression") + 656 if expression_sql: + 657 expression_sql = f"{begin}{self.sep()}{expression_sql}" + 658 + 659 if self.CREATE_FUNCTION_RETURN_AS or not isinstance(expression.expression, exp.Return): + 660 if properties_locs.get(exp.Properties.Location.POST_ALIAS): + 661 postalias_props_sql = self.properties( + 662 exp.Properties( + 663 expressions=properties_locs[exp.Properties.Location.POST_ALIAS] + 664 ), + 665 wrapped=False, + 666 ) + 667 expression_sql = f" AS {postalias_props_sql}{expression_sql}" + 668 else: + 669 expression_sql = f" AS{expression_sql}" + 670 + 671 postindex_props_sql = "" + 672 if properties_locs.get(exp.Properties.Location.POST_INDEX): + 673 postindex_props_sql = self.properties( + 674 exp.Properties(expressions=properties_locs[exp.Properties.Location.POST_INDEX]), + 675 wrapped=False, + 676 prefix=" ", + 677 ) + 678 + 679 indexes = self.expressions(expression, key="indexes", indent=False, sep=" ") + 680 indexes = f" {indexes}" if indexes else "" + 681 index_sql = indexes + postindex_props_sql + 682 + 683 replace = " OR REPLACE" if expression.args.get("replace") else "" + 684 unique = " UNIQUE" if expression.args.get("unique") else "" + 685 + 686 postcreate_props_sql = "" + 687 if properties_locs.get(exp.Properties.Location.POST_CREATE): + 688 postcreate_props_sql = self.properties( + 689 exp.Properties(expressions=properties_locs[exp.Properties.Location.POST_CREATE]), + 690 sep=" ", + 691 prefix=" ", + 692 wrapped=False, + 693 ) + 694 + 695 modifiers = "".join((replace, unique, postcreate_props_sql)) + 696 + 697 postexpression_props_sql = "" + 698 if properties_locs.get(exp.Properties.Location.POST_EXPRESSION): + 699 postexpression_props_sql = self.properties( + 700 exp.Properties( + 701 expressions=properties_locs[exp.Properties.Location.POST_EXPRESSION] + 702 ), + 703 sep=" ", + 704 prefix=" ", + 705 wrapped=False, + 706 ) + 707 + 708 exists_sql = " IF NOT EXISTS" if expression.args.get("exists") else "" + 709 no_schema_binding = ( + 710 " WITH NO SCHEMA BINDING" if expression.args.get("no_schema_binding") else "" + 711 ) + 712 + 713 clone = self.sql(expression, "clone") + 714 clone = f" {clone}" if clone else "" 715 - 716 postexpression_props_sql = "" - 717 if properties_locs.get(exp.Properties.Location.POST_EXPRESSION): - 718 postexpression_props_sql = self.properties( - 719 exp.Properties( - 720 expressions=properties_locs[exp.Properties.Location.POST_EXPRESSION] - 721 ), - 722 sep=" ", - 723 prefix=" ", - 724 wrapped=False, - 725 ) - 726 - 727 exists_sql = " IF NOT EXISTS" if expression.args.get("exists") else "" - 728 no_schema_binding = ( - 729 " WITH NO SCHEMA BINDING" if expression.args.get("no_schema_binding") else "" - 730 ) - 731 - 732 clone = self.sql(expression, "clone") - 733 clone = f" {clone}" if clone else "" - 734 - 735 expression_sql = f"CREATE{modifiers} {kind}{exists_sql} {this}{properties_sql}{expression_sql}{postexpression_props_sql}{index_sql}{no_schema_binding}{clone}" - 736 return self.prepend_ctes(expression, expression_sql) - 737 - 738 def clone_sql(self, expression: exp.Clone) -> str: - 739 this = self.sql(expression, "this") - 740 when = self.sql(expression, "when") - 741 - 742 if when: - 743 kind = self.sql(expression, "kind") - 744 expr = self.sql(expression, "expression") - 745 return f"CLONE {this} {when} ({kind} => {expr})" - 746 - 747 return f"CLONE {this}" + 716 expression_sql = f"CREATE{modifiers} {kind}{exists_sql} {this}{properties_sql}{expression_sql}{postexpression_props_sql}{index_sql}{no_schema_binding}{clone}" + 717 return self.prepend_ctes(expression, expression_sql) + 718 + 719 def clone_sql(self, expression: exp.Clone) -> str: + 720 this = self.sql(expression, "this") + 721 when = self.sql(expression, "when") + 722 + 723 if when: + 724 kind = self.sql(expression, "kind") + 725 expr = self.sql(expression, "expression") + 726 return f"CLONE {this} {when} ({kind} => {expr})" + 727 + 728 return f"CLONE {this}" + 729 + 730 def describe_sql(self, expression: exp.Describe) -> str: + 731 return f"DESCRIBE {self.sql(expression, 'this')}" + 732 + 733 def prepend_ctes(self, expression: exp.Expression, sql: str) -> str: + 734 with_ = self.sql(expression, "with") + 735 if with_: + 736 sql = f"{with_}{self.sep()}{sql}" + 737 return sql + 738 + 739 def with_sql(self, expression: exp.With) -> str: + 740 sql = self.expressions(expression, flat=True) + 741 recursive = "RECURSIVE " if expression.args.get("recursive") else "" + 742 + 743 return f"WITH {recursive}{sql}" + 744 + 745 def cte_sql(self, expression: exp.CTE) -> str: + 746 alias = self.sql(expression, "alias") + 747 return f"{alias} AS {self.wrap(expression)}" 748 - 749 def describe_sql(self, expression: exp.Describe) -> str: - 750 return f"DESCRIBE {self.sql(expression, 'this')}" - 751 - 752 def prepend_ctes(self, expression: exp.Expression, sql: str) -> str: - 753 with_ = self.sql(expression, "with") - 754 if with_: - 755 sql = f"{with_}{self.sep()}{sql}" - 756 return sql - 757 - 758 def with_sql(self, expression: exp.With) -> str: - 759 sql = self.expressions(expression, flat=True) - 760 recursive = "RECURSIVE " if expression.args.get("recursive") else "" - 761 - 762 return f"WITH {recursive}{sql}" - 763 - 764 def cte_sql(self, expression: exp.CTE) -> str: - 765 alias = self.sql(expression, "alias") - 766 return f"{alias} AS {self.wrap(expression)}" - 767 - 768 def tablealias_sql(self, expression: exp.TableAlias) -> str: - 769 alias = self.sql(expression, "this") - 770 columns = self.expressions(expression, key="columns", flat=True) - 771 columns = f"({columns})" if columns else "" - 772 return f"{alias}{columns}" - 773 - 774 def bitstring_sql(self, expression: exp.BitString) -> str: - 775 this = self.sql(expression, "this") - 776 if self.bit_start: - 777 return f"{self.bit_start}{this}{self.bit_end}" - 778 return f"{int(this, 2)}" - 779 - 780 def hexstring_sql(self, expression: exp.HexString) -> str: - 781 this = self.sql(expression, "this") - 782 if self.hex_start: - 783 return f"{self.hex_start}{this}{self.hex_end}" - 784 return f"{int(this, 16)}" - 785 - 786 def bytestring_sql(self, expression: exp.ByteString) -> str: - 787 this = self.sql(expression, "this") - 788 if self.byte_start: - 789 return f"{self.byte_start}{this}{self.byte_end}" - 790 return this - 791 - 792 def rawstring_sql(self, expression: exp.RawString) -> str: - 793 if self.raw_start: - 794 return f"{self.raw_start}{expression.name}{self.raw_end}" - 795 return self.sql(exp.Literal.string(expression.name.replace("\\", "\\\\"))) - 796 - 797 def datatypesize_sql(self, expression: exp.DataTypeSize) -> str: - 798 this = self.sql(expression, "this") - 799 specifier = self.sql(expression, "expression") - 800 specifier = f" {specifier}" if specifier else "" - 801 return f"{this}{specifier}" - 802 - 803 def datatype_sql(self, expression: exp.DataType) -> str: - 804 type_value = expression.this - 805 type_sql = self.TYPE_MAPPING.get(type_value, type_value.value) - 806 nested = "" - 807 interior = self.expressions(expression, flat=True) - 808 values = "" - 809 if interior: - 810 if expression.args.get("nested"): - 811 nested = f"{self.STRUCT_DELIMITER[0]}{interior}{self.STRUCT_DELIMITER[1]}" - 812 if expression.args.get("values") is not None: - 813 delimiters = ("[", "]") if type_value == exp.DataType.Type.ARRAY else ("(", ")") - 814 values = self.expressions(expression, key="values", flat=True) - 815 values = f"{delimiters[0]}{values}{delimiters[1]}" - 816 else: - 817 nested = f"({interior})" - 818 - 819 return f"{type_sql}{nested}{values}" + 749 def tablealias_sql(self, expression: exp.TableAlias) -> str: + 750 alias = self.sql(expression, "this") + 751 columns = self.expressions(expression, key="columns", flat=True) + 752 columns = f"({columns})" if columns else "" + 753 return f"{alias}{columns}" + 754 + 755 def bitstring_sql(self, expression: exp.BitString) -> str: + 756 this = self.sql(expression, "this") + 757 if self.BIT_START: + 758 return f"{self.BIT_START}{this}{self.BIT_END}" + 759 return f"{int(this, 2)}" + 760 + 761 def hexstring_sql(self, expression: exp.HexString) -> str: + 762 this = self.sql(expression, "this") + 763 if self.HEX_START: + 764 return f"{self.HEX_START}{this}{self.HEX_END}" + 765 return f"{int(this, 16)}" + 766 + 767 def bytestring_sql(self, expression: exp.ByteString) -> str: + 768 this = self.sql(expression, "this") + 769 if self.BYTE_START: + 770 return f"{self.BYTE_START}{this}{self.BYTE_END}" + 771 return this + 772 + 773 def rawstring_sql(self, expression: exp.RawString) -> str: + 774 if self.RAW_START: + 775 return f"{self.RAW_START}{expression.name}{self.RAW_END}" + 776 return self.sql(exp.Literal.string(expression.name.replace("\\", "\\\\"))) + 777 + 778 def datatypesize_sql(self, expression: exp.DataTypeSize) -> str: + 779 this = self.sql(expression, "this") + 780 specifier = self.sql(expression, "expression") + 781 specifier = f" {specifier}" if specifier else "" + 782 return f"{this}{specifier}" + 783 + 784 def datatype_sql(self, expression: exp.DataType) -> str: + 785 type_value = expression.this + 786 type_sql = self.TYPE_MAPPING.get(type_value, type_value.value) + 787 nested = "" + 788 interior = self.expressions(expression, flat=True) + 789 values = "" + 790 if interior: + 791 if expression.args.get("nested"): + 792 nested = f"{self.STRUCT_DELIMITER[0]}{interior}{self.STRUCT_DELIMITER[1]}" + 793 if expression.args.get("values") is not None: + 794 delimiters = ("[", "]") if type_value == exp.DataType.Type.ARRAY else ("(", ")") + 795 values = self.expressions(expression, key="values", flat=True) + 796 values = f"{delimiters[0]}{values}{delimiters[1]}" + 797 else: + 798 nested = f"({interior})" + 799 + 800 return f"{type_sql}{nested}{values}" + 801 + 802 def directory_sql(self, expression: exp.Directory) -> str: + 803 local = "LOCAL " if expression.args.get("local") else "" + 804 row_format = self.sql(expression, "row_format") + 805 row_format = f" {row_format}" if row_format else "" + 806 return f"{local}DIRECTORY {self.sql(expression, 'this')}{row_format}" + 807 + 808 def delete_sql(self, expression: exp.Delete) -> str: + 809 this = self.sql(expression, "this") + 810 this = f" FROM {this}" if this else "" + 811 using_sql = ( + 812 f" USING {self.expressions(expression, key='using', sep=', USING ')}" + 813 if expression.args.get("using") + 814 else "" + 815 ) + 816 where_sql = self.sql(expression, "where") + 817 returning = self.sql(expression, "returning") + 818 sql = f"DELETE{this}{using_sql}{where_sql}{returning}" + 819 return self.prepend_ctes(expression, sql) 820 - 821 def directory_sql(self, expression: exp.Directory) -> str: - 822 local = "LOCAL " if expression.args.get("local") else "" - 823 row_format = self.sql(expression, "row_format") - 824 row_format = f" {row_format}" if row_format else "" - 825 return f"{local}DIRECTORY {self.sql(expression, 'this')}{row_format}" - 826 - 827 def delete_sql(self, expression: exp.Delete) -> str: - 828 this = self.sql(expression, "this") - 829 this = f" FROM {this}" if this else "" - 830 using_sql = ( - 831 f" USING {self.expressions(expression, key='using', sep=', USING ')}" - 832 if expression.args.get("using") - 833 else "" - 834 ) - 835 where_sql = self.sql(expression, "where") - 836 returning = self.sql(expression, "returning") - 837 sql = f"DELETE{this}{using_sql}{where_sql}{returning}" - 838 return self.prepend_ctes(expression, sql) + 821 def drop_sql(self, expression: exp.Drop) -> str: + 822 this = self.sql(expression, "this") + 823 kind = expression.args["kind"] + 824 exists_sql = " IF EXISTS " if expression.args.get("exists") else " " + 825 temporary = " TEMPORARY" if expression.args.get("temporary") else "" + 826 materialized = " MATERIALIZED" if expression.args.get("materialized") else "" + 827 cascade = " CASCADE" if expression.args.get("cascade") else "" + 828 constraints = " CONSTRAINTS" if expression.args.get("constraints") else "" + 829 purge = " PURGE" if expression.args.get("purge") else "" + 830 return ( + 831 f"DROP{temporary}{materialized} {kind}{exists_sql}{this}{cascade}{constraints}{purge}" + 832 ) + 833 + 834 def except_sql(self, expression: exp.Except) -> str: + 835 return self.prepend_ctes( + 836 expression, + 837 self.set_operation(expression, self.except_op(expression)), + 838 ) 839 - 840 def drop_sql(self, expression: exp.Drop) -> str: - 841 this = self.sql(expression, "this") - 842 kind = expression.args["kind"] - 843 exists_sql = " IF EXISTS " if expression.args.get("exists") else " " - 844 temporary = " TEMPORARY" if expression.args.get("temporary") else "" - 845 materialized = " MATERIALIZED" if expression.args.get("materialized") else "" - 846 cascade = " CASCADE" if expression.args.get("cascade") else "" - 847 constraints = " CONSTRAINTS" if expression.args.get("constraints") else "" - 848 purge = " PURGE" if expression.args.get("purge") else "" - 849 return ( - 850 f"DROP{temporary}{materialized} {kind}{exists_sql}{this}{cascade}{constraints}{purge}" - 851 ) + 840 def except_op(self, expression: exp.Except) -> str: + 841 return f"EXCEPT{'' if expression.args.get('distinct') else ' ALL'}" + 842 + 843 def fetch_sql(self, expression: exp.Fetch) -> str: + 844 direction = expression.args.get("direction") + 845 direction = f" {direction.upper()}" if direction else "" + 846 count = expression.args.get("count") + 847 count = f" {count}" if count else "" + 848 if expression.args.get("percent"): + 849 count = f"{count} PERCENT" + 850 with_ties_or_only = "WITH TIES" if expression.args.get("with_ties") else "ONLY" + 851 return f"{self.seg('FETCH')}{direction}{count} ROWS {with_ties_or_only}" 852 - 853 def except_sql(self, expression: exp.Except) -> str: - 854 return self.prepend_ctes( - 855 expression, - 856 self.set_operation(expression, self.except_op(expression)), - 857 ) - 858 - 859 def except_op(self, expression: exp.Except) -> str: - 860 return f"EXCEPT{'' if expression.args.get('distinct') else ' ALL'}" - 861 - 862 def fetch_sql(self, expression: exp.Fetch) -> str: - 863 direction = expression.args.get("direction") - 864 direction = f" {direction.upper()}" if direction else "" - 865 count = expression.args.get("count") - 866 count = f" {count}" if count else "" - 867 if expression.args.get("percent"): - 868 count = f"{count} PERCENT" - 869 with_ties_or_only = "WITH TIES" if expression.args.get("with_ties") else "ONLY" - 870 return f"{self.seg('FETCH')}{direction}{count} ROWS {with_ties_or_only}" - 871 - 872 def filter_sql(self, expression: exp.Filter) -> str: - 873 this = self.sql(expression, "this") - 874 where = self.sql(expression, "expression")[1:] # where has a leading space - 875 return f"{this} FILTER({where})" - 876 - 877 def hint_sql(self, expression: exp.Hint) -> str: - 878 if self.sql(expression, "this"): - 879 self.unsupported("Hints are not supported") - 880 return "" - 881 - 882 def index_sql(self, expression: exp.Index) -> str: - 883 unique = "UNIQUE " if expression.args.get("unique") else "" - 884 primary = "PRIMARY " if expression.args.get("primary") else "" - 885 amp = "AMP " if expression.args.get("amp") else "" - 886 name = f"{expression.name} " if expression.name else "" - 887 table = self.sql(expression, "table") - 888 table = f"{self.INDEX_ON} {table} " if table else "" - 889 index = "INDEX " if not table else "" - 890 columns = self.expressions(expression, key="columns", flat=True) - 891 partition_by = self.expressions(expression, key="partition_by", flat=True) - 892 partition_by = f" PARTITION BY {partition_by}" if partition_by else "" - 893 return f"{unique}{primary}{amp}{index}{name}{table}({columns}){partition_by}" - 894 - 895 def identifier_sql(self, expression: exp.Identifier) -> str: - 896 text = expression.name - 897 lower = text.lower() - 898 text = lower if self.normalize and not expression.quoted else text - 899 text = text.replace(self.identifier_end, self._escaped_identifier_end) - 900 if ( - 901 expression.quoted - 902 or should_identify(text, self.identify) - 903 or lower in self.RESERVED_KEYWORDS - 904 or (not self.identifiers_can_start_with_digit and text[:1].isdigit()) - 905 ): - 906 text = f"{self.identifier_start}{text}{self.identifier_end}" - 907 return text - 908 - 909 def inputoutputformat_sql(self, expression: exp.InputOutputFormat) -> str: - 910 input_format = self.sql(expression, "input_format") - 911 input_format = f"INPUTFORMAT {input_format}" if input_format else "" - 912 output_format = self.sql(expression, "output_format") - 913 output_format = f"OUTPUTFORMAT {output_format}" if output_format else "" - 914 return self.sep().join((input_format, output_format)) - 915 - 916 def national_sql(self, expression: exp.National, prefix: str = "N") -> str: - 917 string = self.sql(exp.Literal.string(expression.name)) - 918 return f"{prefix}{string}" - 919 - 920 def partition_sql(self, expression: exp.Partition) -> str: - 921 return f"PARTITION({self.expressions(expression)})" - 922 - 923 def properties_sql(self, expression: exp.Properties) -> str: - 924 root_properties = [] - 925 with_properties = [] + 853 def filter_sql(self, expression: exp.Filter) -> str: + 854 this = self.sql(expression, "this") + 855 where = self.sql(expression, "expression")[1:] # where has a leading space + 856 return f"{this} FILTER({where})" + 857 + 858 def hint_sql(self, expression: exp.Hint) -> str: + 859 if self.sql(expression, "this"): + 860 self.unsupported("Hints are not supported") + 861 return "" + 862 + 863 def index_sql(self, expression: exp.Index) -> str: + 864 unique = "UNIQUE " if expression.args.get("unique") else "" + 865 primary = "PRIMARY " if expression.args.get("primary") else "" + 866 amp = "AMP " if expression.args.get("amp") else "" + 867 name = f"{expression.name} " if expression.name else "" + 868 table = self.sql(expression, "table") + 869 table = f"{self.INDEX_ON} {table} " if table else "" + 870 using = self.sql(expression, "using") + 871 using = f"USING {using} " if using else "" + 872 index = "INDEX " if not table else "" + 873 columns = self.expressions(expression, key="columns", flat=True) + 874 columns = f"({columns})" if columns else "" + 875 partition_by = self.expressions(expression, key="partition_by", flat=True) + 876 partition_by = f" PARTITION BY {partition_by}" if partition_by else "" + 877 return f"{unique}{primary}{amp}{index}{name}{table}{using}{columns}{partition_by}" + 878 + 879 def identifier_sql(self, expression: exp.Identifier) -> str: + 880 text = expression.name + 881 lower = text.lower() + 882 text = lower if self.normalize and not expression.quoted else text + 883 text = text.replace(self.IDENTIFIER_END, self._escaped_identifier_end) + 884 if ( + 885 expression.quoted + 886 or should_identify(text, self.identify) + 887 or lower in self.RESERVED_KEYWORDS + 888 or (not self.IDENTIFIERS_CAN_START_WITH_DIGIT and text[:1].isdigit()) + 889 ): + 890 text = f"{self.IDENTIFIER_START}{text}{self.IDENTIFIER_END}" + 891 return text + 892 + 893 def inputoutputformat_sql(self, expression: exp.InputOutputFormat) -> str: + 894 input_format = self.sql(expression, "input_format") + 895 input_format = f"INPUTFORMAT {input_format}" if input_format else "" + 896 output_format = self.sql(expression, "output_format") + 897 output_format = f"OUTPUTFORMAT {output_format}" if output_format else "" + 898 return self.sep().join((input_format, output_format)) + 899 + 900 def national_sql(self, expression: exp.National, prefix: str = "N") -> str: + 901 string = self.sql(exp.Literal.string(expression.name)) + 902 return f"{prefix}{string}" + 903 + 904 def partition_sql(self, expression: exp.Partition) -> str: + 905 return f"PARTITION({self.expressions(expression)})" + 906 + 907 def properties_sql(self, expression: exp.Properties) -> str: + 908 root_properties = [] + 909 with_properties = [] + 910 + 911 for p in expression.expressions: + 912 p_loc = self.PROPERTIES_LOCATION[p.__class__] + 913 if p_loc == exp.Properties.Location.POST_WITH: + 914 with_properties.append(p) + 915 elif p_loc == exp.Properties.Location.POST_SCHEMA: + 916 root_properties.append(p) + 917 + 918 return self.root_properties( + 919 exp.Properties(expressions=root_properties) + 920 ) + self.with_properties(exp.Properties(expressions=with_properties)) + 921 + 922 def root_properties(self, properties: exp.Properties) -> str: + 923 if properties.expressions: + 924 return self.sep() + self.expressions(properties, indent=False, sep=" ") + 925 return "" 926 - 927 for p in expression.expressions: - 928 p_loc = self.PROPERTIES_LOCATION[p.__class__] - 929 if p_loc == exp.Properties.Location.POST_WITH: - 930 with_properties.append(p) - 931 elif p_loc == exp.Properties.Location.POST_SCHEMA: - 932 root_properties.append(p) - 933 - 934 return self.root_properties( - 935 exp.Properties(expressions=root_properties) - 936 ) + self.with_properties(exp.Properties(expressions=with_properties)) - 937 - 938 def root_properties(self, properties: exp.Properties) -> str: - 939 if properties.expressions: - 940 return self.sep() + self.expressions(properties, indent=False, sep=" ") - 941 return "" - 942 - 943 def properties( - 944 self, - 945 properties: exp.Properties, - 946 prefix: str = "", - 947 sep: str = ", ", - 948 suffix: str = "", - 949 wrapped: bool = True, - 950 ) -> str: - 951 if properties.expressions: - 952 expressions = self.expressions(properties, sep=sep, indent=False) - 953 expressions = self.wrap(expressions) if wrapped else expressions - 954 return f"{prefix}{' ' if prefix and prefix != ' ' else ''}{expressions}{suffix}" - 955 return "" - 956 - 957 def with_properties(self, properties: exp.Properties) -> str: - 958 return self.properties(properties, prefix=self.seg("WITH")) - 959 - 960 def locate_properties( - 961 self, properties: exp.Properties - 962 ) -> t.Dict[exp.Properties.Location, list[exp.Property]]: - 963 properties_locs: t.Dict[exp.Properties.Location, list[exp.Property]] = { - 964 key: [] for key in exp.Properties.Location - 965 } - 966 - 967 for p in properties.expressions: - 968 p_loc = self.PROPERTIES_LOCATION[p.__class__] - 969 if p_loc == exp.Properties.Location.POST_NAME: - 970 properties_locs[exp.Properties.Location.POST_NAME].append(p) - 971 elif p_loc == exp.Properties.Location.POST_INDEX: - 972 properties_locs[exp.Properties.Location.POST_INDEX].append(p) - 973 elif p_loc == exp.Properties.Location.POST_SCHEMA: - 974 properties_locs[exp.Properties.Location.POST_SCHEMA].append(p) - 975 elif p_loc == exp.Properties.Location.POST_WITH: - 976 properties_locs[exp.Properties.Location.POST_WITH].append(p) - 977 elif p_loc == exp.Properties.Location.POST_CREATE: - 978 properties_locs[exp.Properties.Location.POST_CREATE].append(p) - 979 elif p_loc == exp.Properties.Location.POST_ALIAS: - 980 properties_locs[exp.Properties.Location.POST_ALIAS].append(p) - 981 elif p_loc == exp.Properties.Location.POST_EXPRESSION: - 982 properties_locs[exp.Properties.Location.POST_EXPRESSION].append(p) - 983 elif p_loc == exp.Properties.Location.UNSUPPORTED: - 984 self.unsupported(f"Unsupported property {p.key}") - 985 - 986 return properties_locs + 927 def properties( + 928 self, + 929 properties: exp.Properties, + 930 prefix: str = "", + 931 sep: str = ", ", + 932 suffix: str = "", + 933 wrapped: bool = True, + 934 ) -> str: + 935 if properties.expressions: + 936 expressions = self.expressions(properties, sep=sep, indent=False) + 937 expressions = self.wrap(expressions) if wrapped else expressions + 938 return f"{prefix}{' ' if prefix and prefix != ' ' else ''}{expressions}{suffix}" + 939 return "" + 940 + 941 def with_properties(self, properties: exp.Properties) -> str: + 942 return self.properties(properties, prefix=self.seg("WITH")) + 943 + 944 def locate_properties( + 945 self, properties: exp.Properties + 946 ) -> t.Dict[exp.Properties.Location, list[exp.Property]]: + 947 properties_locs: t.Dict[exp.Properties.Location, list[exp.Property]] = { + 948 key: [] for key in exp.Properties.Location + 949 } + 950 + 951 for p in properties.expressions: + 952 p_loc = self.PROPERTIES_LOCATION[p.__class__] + 953 if p_loc == exp.Properties.Location.POST_NAME: + 954 properties_locs[exp.Properties.Location.POST_NAME].append(p) + 955 elif p_loc == exp.Properties.Location.POST_INDEX: + 956 properties_locs[exp.Properties.Location.POST_INDEX].append(p) + 957 elif p_loc == exp.Properties.Location.POST_SCHEMA: + 958 properties_locs[exp.Properties.Location.POST_SCHEMA].append(p) + 959 elif p_loc == exp.Properties.Location.POST_WITH: + 960 properties_locs[exp.Properties.Location.POST_WITH].append(p) + 961 elif p_loc == exp.Properties.Location.POST_CREATE: + 962 properties_locs[exp.Properties.Location.POST_CREATE].append(p) + 963 elif p_loc == exp.Properties.Location.POST_ALIAS: + 964 properties_locs[exp.Properties.Location.POST_ALIAS].append(p) + 965 elif p_loc == exp.Properties.Location.POST_EXPRESSION: + 966 properties_locs[exp.Properties.Location.POST_EXPRESSION].append(p) + 967 elif p_loc == exp.Properties.Location.UNSUPPORTED: + 968 self.unsupported(f"Unsupported property {p.key}") + 969 + 970 return properties_locs + 971 + 972 def property_sql(self, expression: exp.Property) -> str: + 973 property_cls = expression.__class__ + 974 if property_cls == exp.Property: + 975 return f"{expression.name}={self.sql(expression, 'value')}" + 976 + 977 property_name = exp.Properties.PROPERTY_TO_NAME.get(property_cls) + 978 if not property_name: + 979 self.unsupported(f"Unsupported property {expression.key}") + 980 + 981 return f"{property_name}={self.sql(expression, 'this')}" + 982 + 983 def likeproperty_sql(self, expression: exp.LikeProperty) -> str: + 984 options = " ".join(f"{e.name} {self.sql(e, 'value')}" for e in expression.expressions) + 985 options = f" {options}" if options else "" + 986 return f"LIKE {self.sql(expression, 'this')}{options}" 987 - 988 def property_sql(self, expression: exp.Property) -> str: - 989 property_cls = expression.__class__ - 990 if property_cls == exp.Property: - 991 return f"{expression.name}={self.sql(expression, 'value')}" + 988 def fallbackproperty_sql(self, expression: exp.FallbackProperty) -> str: + 989 no = "NO " if expression.args.get("no") else "" + 990 protection = " PROTECTION" if expression.args.get("protection") else "" + 991 return f"{no}FALLBACK{protection}" 992 - 993 property_name = exp.Properties.PROPERTY_TO_NAME.get(property_cls) - 994 if not property_name: - 995 self.unsupported(f"Unsupported property {expression.key}") - 996 - 997 return f"{property_name}={self.sql(expression, 'this')}" - 998 - 999 def likeproperty_sql(self, expression: exp.LikeProperty) -> str: -1000 options = " ".join(f"{e.name} {self.sql(e, 'value')}" for e in expression.expressions) -1001 options = f" {options}" if options else "" -1002 return f"LIKE {self.sql(expression, 'this')}{options}" -1003 -1004 def fallbackproperty_sql(self, expression: exp.FallbackProperty) -> str: -1005 no = "NO " if expression.args.get("no") else "" -1006 protection = " PROTECTION" if expression.args.get("protection") else "" -1007 return f"{no}FALLBACK{protection}" -1008 -1009 def journalproperty_sql(self, expression: exp.JournalProperty) -> str: -1010 no = "NO " if expression.args.get("no") else "" -1011 local = expression.args.get("local") -1012 local = f"{local} " if local else "" -1013 dual = "DUAL " if expression.args.get("dual") else "" -1014 before = "BEFORE " if expression.args.get("before") else "" -1015 after = "AFTER " if expression.args.get("after") else "" -1016 return f"{no}{local}{dual}{before}{after}JOURNAL" -1017 -1018 def freespaceproperty_sql(self, expression: exp.FreespaceProperty) -> str: -1019 freespace = self.sql(expression, "this") -1020 percent = " PERCENT" if expression.args.get("percent") else "" -1021 return f"FREESPACE={freespace}{percent}" -1022 -1023 def checksumproperty_sql(self, expression: exp.ChecksumProperty) -> str: -1024 if expression.args.get("default"): -1025 property = "DEFAULT" -1026 elif expression.args.get("on"): -1027 property = "ON" -1028 else: -1029 property = "OFF" -1030 return f"CHECKSUM={property}" -1031 -1032 def mergeblockratioproperty_sql(self, expression: exp.MergeBlockRatioProperty) -> str: -1033 if expression.args.get("no"): -1034 return "NO MERGEBLOCKRATIO" -1035 if expression.args.get("default"): -1036 return "DEFAULT MERGEBLOCKRATIO" -1037 -1038 percent = " PERCENT" if expression.args.get("percent") else "" -1039 return f"MERGEBLOCKRATIO={self.sql(expression, 'this')}{percent}" + 993 def journalproperty_sql(self, expression: exp.JournalProperty) -> str: + 994 no = "NO " if expression.args.get("no") else "" + 995 local = expression.args.get("local") + 996 local = f"{local} " if local else "" + 997 dual = "DUAL " if expression.args.get("dual") else "" + 998 before = "BEFORE " if expression.args.get("before") else "" + 999 after = "AFTER " if expression.args.get("after") else "" +1000 return f"{no}{local}{dual}{before}{after}JOURNAL" +1001 +1002 def freespaceproperty_sql(self, expression: exp.FreespaceProperty) -> str: +1003 freespace = self.sql(expression, "this") +1004 percent = " PERCENT" if expression.args.get("percent") else "" +1005 return f"FREESPACE={freespace}{percent}" +1006 +1007 def checksumproperty_sql(self, expression: exp.ChecksumProperty) -> str: +1008 if expression.args.get("default"): +1009 property = "DEFAULT" +1010 elif expression.args.get("on"): +1011 property = "ON" +1012 else: +1013 property = "OFF" +1014 return f"CHECKSUM={property}" +1015 +1016 def mergeblockratioproperty_sql(self, expression: exp.MergeBlockRatioProperty) -> str: +1017 if expression.args.get("no"): +1018 return "NO MERGEBLOCKRATIO" +1019 if expression.args.get("default"): +1020 return "DEFAULT MERGEBLOCKRATIO" +1021 +1022 percent = " PERCENT" if expression.args.get("percent") else "" +1023 return f"MERGEBLOCKRATIO={self.sql(expression, 'this')}{percent}" +1024 +1025 def datablocksizeproperty_sql(self, expression: exp.DataBlocksizeProperty) -> str: +1026 default = expression.args.get("default") +1027 minimum = expression.args.get("minimum") +1028 maximum = expression.args.get("maximum") +1029 if default or minimum or maximum: +1030 if default: +1031 prop = "DEFAULT" +1032 elif minimum: +1033 prop = "MINIMUM" +1034 else: +1035 prop = "MAXIMUM" +1036 return f"{prop} DATABLOCKSIZE" +1037 units = expression.args.get("units") +1038 units = f" {units}" if units else "" +1039 return f"DATABLOCKSIZE={self.sql(expression, 'size')}{units}" 1040 -1041 def datablocksizeproperty_sql(self, expression: exp.DataBlocksizeProperty) -> str: -1042 default = expression.args.get("default") -1043 minimum = expression.args.get("minimum") -1044 maximum = expression.args.get("maximum") -1045 if default or minimum or maximum: -1046 if default: -1047 prop = "DEFAULT" -1048 elif minimum: -1049 prop = "MINIMUM" -1050 else: -1051 prop = "MAXIMUM" -1052 return f"{prop} DATABLOCKSIZE" -1053 units = expression.args.get("units") -1054 units = f" {units}" if units else "" -1055 return f"DATABLOCKSIZE={self.sql(expression, 'size')}{units}" -1056 -1057 def blockcompressionproperty_sql(self, expression: exp.BlockCompressionProperty) -> str: -1058 autotemp = expression.args.get("autotemp") -1059 always = expression.args.get("always") -1060 default = expression.args.get("default") -1061 manual = expression.args.get("manual") -1062 never = expression.args.get("never") -1063 -1064 if autotemp is not None: -1065 prop = f"AUTOTEMP({self.expressions(autotemp)})" -1066 elif always: -1067 prop = "ALWAYS" -1068 elif default: -1069 prop = "DEFAULT" -1070 elif manual: -1071 prop = "MANUAL" -1072 elif never: -1073 prop = "NEVER" -1074 return f"BLOCKCOMPRESSION={prop}" -1075 -1076 def isolatedloadingproperty_sql(self, expression: exp.IsolatedLoadingProperty) -> str: -1077 no = expression.args.get("no") -1078 no = " NO" if no else "" -1079 concurrent = expression.args.get("concurrent") -1080 concurrent = " CONCURRENT" if concurrent else "" -1081 -1082 for_ = "" -1083 if expression.args.get("for_all"): -1084 for_ = " FOR ALL" -1085 elif expression.args.get("for_insert"): -1086 for_ = " FOR INSERT" -1087 elif expression.args.get("for_none"): -1088 for_ = " FOR NONE" -1089 return f"WITH{no}{concurrent} ISOLATED LOADING{for_}" +1041 def blockcompressionproperty_sql(self, expression: exp.BlockCompressionProperty) -> str: +1042 autotemp = expression.args.get("autotemp") +1043 always = expression.args.get("always") +1044 default = expression.args.get("default") +1045 manual = expression.args.get("manual") +1046 never = expression.args.get("never") +1047 +1048 if autotemp is not None: +1049 prop = f"AUTOTEMP({self.expressions(autotemp)})" +1050 elif always: +1051 prop = "ALWAYS" +1052 elif default: +1053 prop = "DEFAULT" +1054 elif manual: +1055 prop = "MANUAL" +1056 elif never: +1057 prop = "NEVER" +1058 return f"BLOCKCOMPRESSION={prop}" +1059 +1060 def isolatedloadingproperty_sql(self, expression: exp.IsolatedLoadingProperty) -> str: +1061 no = expression.args.get("no") +1062 no = " NO" if no else "" +1063 concurrent = expression.args.get("concurrent") +1064 concurrent = " CONCURRENT" if concurrent else "" +1065 +1066 for_ = "" +1067 if expression.args.get("for_all"): +1068 for_ = " FOR ALL" +1069 elif expression.args.get("for_insert"): +1070 for_ = " FOR INSERT" +1071 elif expression.args.get("for_none"): +1072 for_ = " FOR NONE" +1073 return f"WITH{no}{concurrent} ISOLATED LOADING{for_}" +1074 +1075 def lockingproperty_sql(self, expression: exp.LockingProperty) -> str: +1076 kind = expression.args.get("kind") +1077 this = f" {self.sql(expression, 'this')}" if expression.this else "" +1078 for_or_in = expression.args.get("for_or_in") +1079 lock_type = expression.args.get("lock_type") +1080 override = " OVERRIDE" if expression.args.get("override") else "" +1081 return f"LOCKING {kind}{this} {for_or_in} {lock_type}{override}" +1082 +1083 def withdataproperty_sql(self, expression: exp.WithDataProperty) -> str: +1084 data_sql = f"WITH {'NO ' if expression.args.get('no') else ''}DATA" +1085 statistics = expression.args.get("statistics") +1086 statistics_sql = "" +1087 if statistics is not None: +1088 statistics_sql = f" AND {'NO ' if not statistics else ''}STATISTICS" +1089 return f"{data_sql}{statistics_sql}" 1090 -1091 def lockingproperty_sql(self, expression: exp.LockingProperty) -> str: -1092 kind = expression.args.get("kind") -1093 this = f" {self.sql(expression, 'this')}" if expression.this else "" -1094 for_or_in = expression.args.get("for_or_in") -1095 lock_type = expression.args.get("lock_type") -1096 override = " OVERRIDE" if expression.args.get("override") else "" -1097 return f"LOCKING {kind}{this} {for_or_in} {lock_type}{override}" +1091 def insert_sql(self, expression: exp.Insert) -> str: +1092 overwrite = expression.args.get("overwrite") +1093 +1094 if isinstance(expression.this, exp.Directory): +1095 this = "OVERWRITE " if overwrite else "INTO " +1096 else: +1097 this = "OVERWRITE TABLE " if overwrite else "INTO " 1098 -1099 def withdataproperty_sql(self, expression: exp.WithDataProperty) -> str: -1100 data_sql = f"WITH {'NO ' if expression.args.get('no') else ''}DATA" -1101 statistics = expression.args.get("statistics") -1102 statistics_sql = "" -1103 if statistics is not None: -1104 statistics_sql = f" AND {'NO ' if not statistics else ''}STATISTICS" -1105 return f"{data_sql}{statistics_sql}" -1106 -1107 def insert_sql(self, expression: exp.Insert) -> str: -1108 overwrite = expression.args.get("overwrite") -1109 -1110 if isinstance(expression.this, exp.Directory): -1111 this = "OVERWRITE " if overwrite else "INTO " -1112 else: -1113 this = "OVERWRITE TABLE " if overwrite else "INTO " -1114 -1115 alternative = expression.args.get("alternative") -1116 alternative = f" OR {alternative} " if alternative else " " -1117 this = f"{this}{self.sql(expression, 'this')}" -1118 -1119 exists = " IF EXISTS " if expression.args.get("exists") else " " -1120 partition_sql = ( -1121 self.sql(expression, "partition") if expression.args.get("partition") else "" -1122 ) -1123 expression_sql = self.sql(expression, "expression") -1124 conflict = self.sql(expression, "conflict") -1125 returning = self.sql(expression, "returning") -1126 sep = self.sep() if partition_sql else "" -1127 sql = f"INSERT{alternative}{this}{exists}{partition_sql}{sep}{expression_sql}{conflict}{returning}" -1128 return self.prepend_ctes(expression, sql) -1129 -1130 def intersect_sql(self, expression: exp.Intersect) -> str: -1131 return self.prepend_ctes( -1132 expression, -1133 self.set_operation(expression, self.intersect_op(expression)), -1134 ) -1135 -1136 def intersect_op(self, expression: exp.Intersect) -> str: -1137 return f"INTERSECT{'' if expression.args.get('distinct') else ' ALL'}" -1138 -1139 def introducer_sql(self, expression: exp.Introducer) -> str: -1140 return f"{self.sql(expression, 'this')} {self.sql(expression, 'expression')}" +1099 alternative = expression.args.get("alternative") +1100 alternative = f" OR {alternative} " if alternative else " " +1101 this = f"{this}{self.sql(expression, 'this')}" +1102 +1103 exists = " IF EXISTS " if expression.args.get("exists") else " " +1104 partition_sql = ( +1105 self.sql(expression, "partition") if expression.args.get("partition") else "" +1106 ) +1107 expression_sql = self.sql(expression, "expression") +1108 conflict = self.sql(expression, "conflict") +1109 returning = self.sql(expression, "returning") +1110 sep = self.sep() if partition_sql else "" +1111 sql = f"INSERT{alternative}{this}{exists}{partition_sql}{sep}{expression_sql}{conflict}{returning}" +1112 return self.prepend_ctes(expression, sql) +1113 +1114 def intersect_sql(self, expression: exp.Intersect) -> str: +1115 return self.prepend_ctes( +1116 expression, +1117 self.set_operation(expression, self.intersect_op(expression)), +1118 ) +1119 +1120 def intersect_op(self, expression: exp.Intersect) -> str: +1121 return f"INTERSECT{'' if expression.args.get('distinct') else ' ALL'}" +1122 +1123 def introducer_sql(self, expression: exp.Introducer) -> str: +1124 return f"{self.sql(expression, 'this')} {self.sql(expression, 'expression')}" +1125 +1126 def pseudotype_sql(self, expression: exp.PseudoType) -> str: +1127 return expression.name.upper() +1128 +1129 def onconflict_sql(self, expression: exp.OnConflict) -> str: +1130 conflict = "ON DUPLICATE KEY" if expression.args.get("duplicate") else "ON CONFLICT" +1131 constraint = self.sql(expression, "constraint") +1132 if constraint: +1133 constraint = f"ON CONSTRAINT {constraint}" +1134 key = self.expressions(expression, key="key", flat=True) +1135 do = "" if expression.args.get("duplicate") else " DO " +1136 nothing = "NOTHING" if expression.args.get("nothing") else "" +1137 expressions = self.expressions(expression, flat=True) +1138 if expressions: +1139 expressions = f"UPDATE SET {expressions}" +1140 return f"{self.seg(conflict)} {constraint}{key}{do}{nothing}{expressions}" 1141 -1142 def pseudotype_sql(self, expression: exp.PseudoType) -> str: -1143 return expression.name.upper() +1142 def returning_sql(self, expression: exp.Returning) -> str: +1143 return f"{self.seg('RETURNING')} {self.expressions(expression, flat=True)}" 1144 -1145 def onconflict_sql(self, expression: exp.OnConflict) -> str: -1146 conflict = "ON DUPLICATE KEY" if expression.args.get("duplicate") else "ON CONFLICT" -1147 constraint = self.sql(expression, "constraint") -1148 if constraint: -1149 constraint = f"ON CONSTRAINT {constraint}" -1150 key = self.expressions(expression, key="key", flat=True) -1151 do = "" if expression.args.get("duplicate") else " DO " -1152 nothing = "NOTHING" if expression.args.get("nothing") else "" -1153 expressions = self.expressions(expression, flat=True) -1154 if expressions: -1155 expressions = f"UPDATE SET {expressions}" -1156 return f"{self.seg(conflict)} {constraint}{key}{do}{nothing}{expressions}" -1157 -1158 def returning_sql(self, expression: exp.Returning) -> str: -1159 return f"{self.seg('RETURNING')} {self.expressions(expression, flat=True)}" -1160 -1161 def rowformatdelimitedproperty_sql(self, expression: exp.RowFormatDelimitedProperty) -> str: -1162 fields = expression.args.get("fields") -1163 fields = f" FIELDS TERMINATED BY {fields}" if fields else "" -1164 escaped = expression.args.get("escaped") -1165 escaped = f" ESCAPED BY {escaped}" if escaped else "" -1166 items = expression.args.get("collection_items") -1167 items = f" COLLECTION ITEMS TERMINATED BY {items}" if items else "" -1168 keys = expression.args.get("map_keys") -1169 keys = f" MAP KEYS TERMINATED BY {keys}" if keys else "" -1170 lines = expression.args.get("lines") -1171 lines = f" LINES TERMINATED BY {lines}" if lines else "" -1172 null = expression.args.get("null") -1173 null = f" NULL DEFINED AS {null}" if null else "" -1174 return f"ROW FORMAT DELIMITED{fields}{escaped}{items}{keys}{lines}{null}" -1175 -1176 def table_sql(self, expression: exp.Table, sep: str = " AS ") -> str: -1177 table = ".".join( -1178 part -1179 for part in [ -1180 self.sql(expression, "catalog"), -1181 self.sql(expression, "db"), -1182 self.sql(expression, "this"), -1183 ] -1184 if part -1185 ) -1186 -1187 alias = self.sql(expression, "alias") -1188 alias = f"{sep}{alias}" if alias else "" -1189 hints = self.expressions(expression, key="hints", flat=True) -1190 hints = f" WITH ({hints})" if hints and self.TABLE_HINTS else "" -1191 pivots = self.expressions(expression, key="pivots", sep=" ", flat=True) -1192 pivots = f" {pivots}" if pivots else "" -1193 joins = self.expressions(expression, key="joins", sep="") -1194 laterals = self.expressions(expression, key="laterals", sep="") -1195 system_time = expression.args.get("system_time") -1196 system_time = f" {self.sql(expression, 'system_time')}" if system_time else "" -1197 -1198 return f"{table}{system_time}{alias}{hints}{pivots}{joins}{laterals}" -1199 -1200 def tablesample_sql( -1201 self, expression: exp.TableSample, seed_prefix: str = "SEED", sep=" AS " -1202 ) -> str: -1203 if self.alias_post_tablesample and expression.this.alias: -1204 table = expression.this.copy() -1205 table.set("alias", None) -1206 this = self.sql(table) -1207 alias = f"{sep}{self.sql(expression.this, 'alias')}" -1208 else: -1209 this = self.sql(expression, "this") -1210 alias = "" -1211 method = self.sql(expression, "method") -1212 method = f"{method.upper()} " if method and self.TABLESAMPLE_WITH_METHOD else "" -1213 numerator = self.sql(expression, "bucket_numerator") -1214 denominator = self.sql(expression, "bucket_denominator") -1215 field = self.sql(expression, "bucket_field") -1216 field = f" ON {field}" if field else "" -1217 bucket = f"BUCKET {numerator} OUT OF {denominator}{field}" if numerator else "" -1218 percent = self.sql(expression, "percent") -1219 percent = f"{percent} PERCENT" if percent else "" -1220 rows = self.sql(expression, "rows") -1221 rows = f"{rows} ROWS" if rows else "" -1222 size = self.sql(expression, "size") -1223 if size and self.TABLESAMPLE_SIZE_IS_PERCENT: -1224 size = f"{size} PERCENT" -1225 seed = self.sql(expression, "seed") -1226 seed = f" {seed_prefix} ({seed})" if seed else "" -1227 kind = expression.args.get("kind", "TABLESAMPLE") -1228 return f"{this} {kind} {method}({bucket}{percent}{rows}{size}){seed}{alias}" -1229 -1230 def pivot_sql(self, expression: exp.Pivot) -> str: -1231 expressions = self.expressions(expression, flat=True) -1232 -1233 if expression.this: -1234 this = self.sql(expression, "this") -1235 on = f"{self.seg('ON')} {expressions}" -1236 using = self.expressions(expression, key="using", flat=True) -1237 using = f"{self.seg('USING')} {using}" if using else "" -1238 group = self.sql(expression, "group") -1239 return f"PIVOT {this}{on}{using}{group}" -1240 -1241 alias = self.sql(expression, "alias") -1242 alias = f" AS {alias}" if alias else "" -1243 unpivot = expression.args.get("unpivot") -1244 direction = "UNPIVOT" if unpivot else "PIVOT" -1245 field = self.sql(expression, "field") -1246 return f"{direction}({expressions} FOR {field}){alias}" -1247 -1248 def tuple_sql(self, expression: exp.Tuple) -> str: -1249 return f"({self.expressions(expression, flat=True)})" -1250 -1251 def update_sql(self, expression: exp.Update) -> str: -1252 this = self.sql(expression, "this") -1253 set_sql = self.expressions(expression, flat=True) -1254 from_sql = self.sql(expression, "from") -1255 where_sql = self.sql(expression, "where") -1256 returning = self.sql(expression, "returning") -1257 sql = f"UPDATE {this} SET {set_sql}{from_sql}{where_sql}{returning}" -1258 return self.prepend_ctes(expression, sql) -1259 -1260 def values_sql(self, expression: exp.Values) -> str: -1261 args = self.expressions(expression) -1262 alias = self.sql(expression, "alias") -1263 values = f"VALUES{self.seg('')}{args}" -1264 values = ( -1265 f"({values})" -1266 if self.WRAP_DERIVED_VALUES and (alias or isinstance(expression.parent, exp.From)) -1267 else values -1268 ) -1269 return f"{values} AS {alias}" if alias else values -1270 -1271 def var_sql(self, expression: exp.Var) -> str: -1272 return self.sql(expression, "this") -1273 -1274 def into_sql(self, expression: exp.Into) -> str: -1275 temporary = " TEMPORARY" if expression.args.get("temporary") else "" -1276 unlogged = " UNLOGGED" if expression.args.get("unlogged") else "" -1277 return f"{self.seg('INTO')}{temporary or unlogged} {self.sql(expression, 'this')}" -1278 -1279 def from_sql(self, expression: exp.From) -> str: -1280 return f"{self.seg('FROM')} {self.sql(expression, 'this')}" -1281 -1282 def group_sql(self, expression: exp.Group) -> str: -1283 group_by = self.op_expressions("GROUP BY", expression) -1284 grouping_sets = self.expressions(expression, key="grouping_sets", indent=False) -1285 grouping_sets = ( -1286 f"{self.seg('GROUPING SETS')} {self.wrap(grouping_sets)}" if grouping_sets else "" -1287 ) -1288 -1289 cube = expression.args.get("cube", []) -1290 if seq_get(cube, 0) is True: -1291 return f"{group_by}{self.seg('WITH CUBE')}" -1292 else: -1293 cube_sql = self.expressions(expression, key="cube", indent=False) -1294 cube_sql = f"{self.seg('CUBE')} {self.wrap(cube_sql)}" if cube_sql else "" -1295 -1296 rollup = expression.args.get("rollup", []) -1297 if seq_get(rollup, 0) is True: -1298 return f"{group_by}{self.seg('WITH ROLLUP')}" -1299 else: -1300 rollup_sql = self.expressions(expression, key="rollup", indent=False) -1301 rollup_sql = f"{self.seg('ROLLUP')} {self.wrap(rollup_sql)}" if rollup_sql else "" -1302 -1303 groupings = csv( -1304 grouping_sets, -1305 cube_sql, -1306 rollup_sql, -1307 self.seg("WITH TOTALS") if expression.args.get("totals") else "", -1308 sep=self.GROUPINGS_SEP, -1309 ) -1310 -1311 if expression.args.get("expressions") and groupings: -1312 group_by = f"{group_by}{self.GROUPINGS_SEP}" -1313 -1314 return f"{group_by}{groupings}" -1315 -1316 def having_sql(self, expression: exp.Having) -> str: -1317 this = self.indent(self.sql(expression, "this")) -1318 return f"{self.seg('HAVING')}{self.sep()}{this}" -1319 -1320 def join_sql(self, expression: exp.Join) -> str: -1321 op_sql = " ".join( -1322 op -1323 for op in ( -1324 expression.method, -1325 "GLOBAL" if expression.args.get("global") else None, -1326 expression.side, -1327 expression.kind, -1328 expression.hint if self.JOIN_HINTS else None, -1329 ) -1330 if op -1331 ) -1332 on_sql = self.sql(expression, "on") -1333 using = expression.args.get("using") -1334 -1335 if not on_sql and using: -1336 on_sql = csv(*(self.sql(column) for column in using)) -1337 -1338 this_sql = self.sql(expression, "this") -1339 -1340 if on_sql: -1341 on_sql = self.indent(on_sql, skip_first=True) -1342 space = self.seg(" " * self.pad) if self.pretty else " " -1343 if using: -1344 on_sql = f"{space}USING ({on_sql})" -1345 else: -1346 on_sql = f"{space}ON {on_sql}" -1347 elif not op_sql: -1348 return f", {this_sql}" -1349 -1350 op_sql = f"{op_sql} JOIN" if op_sql else "JOIN" -1351 return f"{self.seg(op_sql)} {this_sql}{on_sql}" -1352 -1353 def lambda_sql(self, expression: exp.Lambda, arrow_sep: str = "->") -> str: -1354 args = self.expressions(expression, flat=True) -1355 args = f"({args})" if len(args.split(",")) > 1 else args -1356 return f"{args} {arrow_sep} {self.sql(expression, 'this')}" -1357 -1358 def lateral_sql(self, expression: exp.Lateral) -> str: -1359 this = self.sql(expression, "this") -1360 -1361 if isinstance(expression.this, exp.Subquery): -1362 return f"LATERAL {this}" -1363 -1364 if expression.args.get("view"): -1365 alias = expression.args["alias"] -1366 columns = self.expressions(alias, key="columns", flat=True) -1367 table = f" {alias.name}" if alias.name else "" -1368 columns = f" AS {columns}" if columns else "" -1369 op_sql = self.seg(f"LATERAL VIEW{' OUTER' if expression.args.get('outer') else ''}") -1370 return f"{op_sql}{self.sep()}{this}{table}{columns}" +1145 def rowformatdelimitedproperty_sql(self, expression: exp.RowFormatDelimitedProperty) -> str: +1146 fields = expression.args.get("fields") +1147 fields = f" FIELDS TERMINATED BY {fields}" if fields else "" +1148 escaped = expression.args.get("escaped") +1149 escaped = f" ESCAPED BY {escaped}" if escaped else "" +1150 items = expression.args.get("collection_items") +1151 items = f" COLLECTION ITEMS TERMINATED BY {items}" if items else "" +1152 keys = expression.args.get("map_keys") +1153 keys = f" MAP KEYS TERMINATED BY {keys}" if keys else "" +1154 lines = expression.args.get("lines") +1155 lines = f" LINES TERMINATED BY {lines}" if lines else "" +1156 null = expression.args.get("null") +1157 null = f" NULL DEFINED AS {null}" if null else "" +1158 return f"ROW FORMAT DELIMITED{fields}{escaped}{items}{keys}{lines}{null}" +1159 +1160 def table_sql(self, expression: exp.Table, sep: str = " AS ") -> str: +1161 table = ".".join( +1162 part +1163 for part in [ +1164 self.sql(expression, "catalog"), +1165 self.sql(expression, "db"), +1166 self.sql(expression, "this"), +1167 ] +1168 if part +1169 ) +1170 +1171 alias = self.sql(expression, "alias") +1172 alias = f"{sep}{alias}" if alias else "" +1173 hints = self.expressions(expression, key="hints", flat=True) +1174 hints = f" WITH ({hints})" if hints and self.TABLE_HINTS else "" +1175 pivots = self.expressions(expression, key="pivots", sep=" ", flat=True) +1176 pivots = f" {pivots}" if pivots else "" +1177 joins = self.expressions(expression, key="joins", sep="") +1178 laterals = self.expressions(expression, key="laterals", sep="") +1179 system_time = expression.args.get("system_time") +1180 system_time = f" {self.sql(expression, 'system_time')}" if system_time else "" +1181 +1182 return f"{table}{system_time}{alias}{hints}{pivots}{joins}{laterals}" +1183 +1184 def tablesample_sql( +1185 self, expression: exp.TableSample, seed_prefix: str = "SEED", sep=" AS " +1186 ) -> str: +1187 if self.ALIAS_POST_TABLESAMPLE and expression.this.alias: +1188 table = expression.this.copy() +1189 table.set("alias", None) +1190 this = self.sql(table) +1191 alias = f"{sep}{self.sql(expression.this, 'alias')}" +1192 else: +1193 this = self.sql(expression, "this") +1194 alias = "" +1195 method = self.sql(expression, "method") +1196 method = f"{method.upper()} " if method and self.TABLESAMPLE_WITH_METHOD else "" +1197 numerator = self.sql(expression, "bucket_numerator") +1198 denominator = self.sql(expression, "bucket_denominator") +1199 field = self.sql(expression, "bucket_field") +1200 field = f" ON {field}" if field else "" +1201 bucket = f"BUCKET {numerator} OUT OF {denominator}{field}" if numerator else "" +1202 percent = self.sql(expression, "percent") +1203 percent = f"{percent} PERCENT" if percent else "" +1204 rows = self.sql(expression, "rows") +1205 rows = f"{rows} ROWS" if rows else "" +1206 size = self.sql(expression, "size") +1207 if size and self.TABLESAMPLE_SIZE_IS_PERCENT: +1208 size = f"{size} PERCENT" +1209 seed = self.sql(expression, "seed") +1210 seed = f" {seed_prefix} ({seed})" if seed else "" +1211 kind = expression.args.get("kind", "TABLESAMPLE") +1212 return f"{this} {kind} {method}({bucket}{percent}{rows}{size}){seed}{alias}" +1213 +1214 def pivot_sql(self, expression: exp.Pivot) -> str: +1215 expressions = self.expressions(expression, flat=True) +1216 +1217 if expression.this: +1218 this = self.sql(expression, "this") +1219 on = f"{self.seg('ON')} {expressions}" +1220 using = self.expressions(expression, key="using", flat=True) +1221 using = f"{self.seg('USING')} {using}" if using else "" +1222 group = self.sql(expression, "group") +1223 return f"PIVOT {this}{on}{using}{group}" +1224 +1225 alias = self.sql(expression, "alias") +1226 alias = f" AS {alias}" if alias else "" +1227 unpivot = expression.args.get("unpivot") +1228 direction = "UNPIVOT" if unpivot else "PIVOT" +1229 field = self.sql(expression, "field") +1230 return f"{direction}({expressions} FOR {field}){alias}" +1231 +1232 def tuple_sql(self, expression: exp.Tuple) -> str: +1233 return f"({self.expressions(expression, flat=True)})" +1234 +1235 def update_sql(self, expression: exp.Update) -> str: +1236 this = self.sql(expression, "this") +1237 set_sql = self.expressions(expression, flat=True) +1238 from_sql = self.sql(expression, "from") +1239 where_sql = self.sql(expression, "where") +1240 returning = self.sql(expression, "returning") +1241 sql = f"UPDATE {this} SET {set_sql}{from_sql}{where_sql}{returning}" +1242 return self.prepend_ctes(expression, sql) +1243 +1244 def values_sql(self, expression: exp.Values) -> str: +1245 args = self.expressions(expression) +1246 alias = self.sql(expression, "alias") +1247 values = f"VALUES{self.seg('')}{args}" +1248 values = ( +1249 f"({values})" +1250 if self.WRAP_DERIVED_VALUES and (alias or isinstance(expression.parent, exp.From)) +1251 else values +1252 ) +1253 return f"{values} AS {alias}" if alias else values +1254 +1255 def var_sql(self, expression: exp.Var) -> str: +1256 return self.sql(expression, "this") +1257 +1258 def into_sql(self, expression: exp.Into) -> str: +1259 temporary = " TEMPORARY" if expression.args.get("temporary") else "" +1260 unlogged = " UNLOGGED" if expression.args.get("unlogged") else "" +1261 return f"{self.seg('INTO')}{temporary or unlogged} {self.sql(expression, 'this')}" +1262 +1263 def from_sql(self, expression: exp.From) -> str: +1264 return f"{self.seg('FROM')} {self.sql(expression, 'this')}" +1265 +1266 def group_sql(self, expression: exp.Group) -> str: +1267 group_by = self.op_expressions("GROUP BY", expression) +1268 grouping_sets = self.expressions(expression, key="grouping_sets", indent=False) +1269 grouping_sets = ( +1270 f"{self.seg('GROUPING SETS')} {self.wrap(grouping_sets)}" if grouping_sets else "" +1271 ) +1272 +1273 cube = expression.args.get("cube", []) +1274 if seq_get(cube, 0) is True: +1275 return f"{group_by}{self.seg('WITH CUBE')}" +1276 else: +1277 cube_sql = self.expressions(expression, key="cube", indent=False) +1278 cube_sql = f"{self.seg('CUBE')} {self.wrap(cube_sql)}" if cube_sql else "" +1279 +1280 rollup = expression.args.get("rollup", []) +1281 if seq_get(rollup, 0) is True: +1282 return f"{group_by}{self.seg('WITH ROLLUP')}" +1283 else: +1284 rollup_sql = self.expressions(expression, key="rollup", indent=False) +1285 rollup_sql = f"{self.seg('ROLLUP')} {self.wrap(rollup_sql)}" if rollup_sql else "" +1286 +1287 groupings = csv( +1288 grouping_sets, +1289 cube_sql, +1290 rollup_sql, +1291 self.seg("WITH TOTALS") if expression.args.get("totals") else "", +1292 sep=self.GROUPINGS_SEP, +1293 ) +1294 +1295 if expression.args.get("expressions") and groupings: +1296 group_by = f"{group_by}{self.GROUPINGS_SEP}" +1297 +1298 return f"{group_by}{groupings}" +1299 +1300 def having_sql(self, expression: exp.Having) -> str: +1301 this = self.indent(self.sql(expression, "this")) +1302 return f"{self.seg('HAVING')}{self.sep()}{this}" +1303 +1304 def join_sql(self, expression: exp.Join) -> str: +1305 op_sql = " ".join( +1306 op +1307 for op in ( +1308 expression.method, +1309 "GLOBAL" if expression.args.get("global") else None, +1310 expression.side, +1311 expression.kind, +1312 expression.hint if self.JOIN_HINTS else None, +1313 ) +1314 if op +1315 ) +1316 on_sql = self.sql(expression, "on") +1317 using = expression.args.get("using") +1318 +1319 if not on_sql and using: +1320 on_sql = csv(*(self.sql(column) for column in using)) +1321 +1322 this_sql = self.sql(expression, "this") +1323 +1324 if on_sql: +1325 on_sql = self.indent(on_sql, skip_first=True) +1326 space = self.seg(" " * self.pad) if self.pretty else " " +1327 if using: +1328 on_sql = f"{space}USING ({on_sql})" +1329 else: +1330 on_sql = f"{space}ON {on_sql}" +1331 elif not op_sql: +1332 return f", {this_sql}" +1333 +1334 op_sql = f"{op_sql} JOIN" if op_sql else "JOIN" +1335 return f"{self.seg(op_sql)} {this_sql}{on_sql}" +1336 +1337 def lambda_sql(self, expression: exp.Lambda, arrow_sep: str = "->") -> str: +1338 args = self.expressions(expression, flat=True) +1339 args = f"({args})" if len(args.split(",")) > 1 else args +1340 return f"{args} {arrow_sep} {self.sql(expression, 'this')}" +1341 +1342 def lateral_sql(self, expression: exp.Lateral) -> str: +1343 this = self.sql(expression, "this") +1344 +1345 if isinstance(expression.this, exp.Subquery): +1346 return f"LATERAL {this}" +1347 +1348 if expression.args.get("view"): +1349 alias = expression.args["alias"] +1350 columns = self.expressions(alias, key="columns", flat=True) +1351 table = f" {alias.name}" if alias.name else "" +1352 columns = f" AS {columns}" if columns else "" +1353 op_sql = self.seg(f"LATERAL VIEW{' OUTER' if expression.args.get('outer') else ''}") +1354 return f"{op_sql}{self.sep()}{this}{table}{columns}" +1355 +1356 alias = self.sql(expression, "alias") +1357 alias = f" AS {alias}" if alias else "" +1358 return f"LATERAL {this}{alias}" +1359 +1360 def limit_sql(self, expression: exp.Limit) -> str: +1361 this = self.sql(expression, "this") +1362 args = ", ".join( +1363 sql +1364 for sql in ( +1365 self.sql(expression, "offset"), +1366 self.sql(expression, "expression"), +1367 ) +1368 if sql +1369 ) +1370 return f"{this}{self.seg('LIMIT')} {args}" 1371 -1372 alias = self.sql(expression, "alias") -1373 alias = f" AS {alias}" if alias else "" -1374 return f"LATERAL {this}{alias}" +1372 def offset_sql(self, expression: exp.Offset) -> str: +1373 this = self.sql(expression, "this") +1374 return f"{this}{self.seg('OFFSET')} {self.sql(expression, 'expression')}" 1375 -1376 def limit_sql(self, expression: exp.Limit) -> str: -1377 this = self.sql(expression, "this") -1378 return f"{this}{self.seg('LIMIT')} {self.sql(expression, 'expression')}" -1379 -1380 def offset_sql(self, expression: exp.Offset) -> str: -1381 this = self.sql(expression, "this") -1382 return f"{this}{self.seg('OFFSET')} {self.sql(expression, 'expression')}" -1383 -1384 def setitem_sql(self, expression: exp.SetItem) -> str: -1385 kind = self.sql(expression, "kind") -1386 kind = f"{kind} " if kind else "" -1387 this = self.sql(expression, "this") -1388 expressions = self.expressions(expression) -1389 collate = self.sql(expression, "collate") -1390 collate = f" COLLATE {collate}" if collate else "" -1391 global_ = "GLOBAL " if expression.args.get("global") else "" -1392 return f"{global_}{kind}{this}{expressions}{collate}" -1393 -1394 def set_sql(self, expression: exp.Set) -> str: -1395 expressions = ( -1396 f" {self.expressions(expression, flat=True)}" if expression.expressions else "" -1397 ) -1398 return f"SET{expressions}" +1376 def setitem_sql(self, expression: exp.SetItem) -> str: +1377 kind = self.sql(expression, "kind") +1378 kind = f"{kind} " if kind else "" +1379 this = self.sql(expression, "this") +1380 expressions = self.expressions(expression) +1381 collate = self.sql(expression, "collate") +1382 collate = f" COLLATE {collate}" if collate else "" +1383 global_ = "GLOBAL " if expression.args.get("global") else "" +1384 return f"{global_}{kind}{this}{expressions}{collate}" +1385 +1386 def set_sql(self, expression: exp.Set) -> str: +1387 expressions = ( +1388 f" {self.expressions(expression, flat=True)}" if expression.expressions else "" +1389 ) +1390 return f"SET{expressions}" +1391 +1392 def pragma_sql(self, expression: exp.Pragma) -> str: +1393 return f"PRAGMA {self.sql(expression, 'this')}" +1394 +1395 def lock_sql(self, expression: exp.Lock) -> str: +1396 if not self.LOCKING_READS_SUPPORTED: +1397 self.unsupported("Locking reads using 'FOR UPDATE/SHARE' are not supported") +1398 return "" 1399 -1400 def pragma_sql(self, expression: exp.Pragma) -> str: -1401 return f"PRAGMA {self.sql(expression, 'this')}" -1402 -1403 def lock_sql(self, expression: exp.Lock) -> str: -1404 if not self.LOCKING_READS_SUPPORTED: -1405 self.unsupported("Locking reads using 'FOR UPDATE/SHARE' are not supported") -1406 return "" -1407 -1408 lock_type = "FOR UPDATE" if expression.args["update"] else "FOR SHARE" -1409 expressions = self.expressions(expression, flat=True) -1410 expressions = f" OF {expressions}" if expressions else "" -1411 wait = expression.args.get("wait") +1400 lock_type = "FOR UPDATE" if expression.args["update"] else "FOR SHARE" +1401 expressions = self.expressions(expression, flat=True) +1402 expressions = f" OF {expressions}" if expressions else "" +1403 wait = expression.args.get("wait") +1404 +1405 if wait is not None: +1406 if isinstance(wait, exp.Literal): +1407 wait = f" WAIT {self.sql(wait)}" +1408 else: +1409 wait = " NOWAIT" if wait else " SKIP LOCKED" +1410 +1411 return f"{lock_type}{expressions}{wait or ''}" 1412 -1413 if wait is not None: -1414 if isinstance(wait, exp.Literal): -1415 wait = f" WAIT {self.sql(wait)}" -1416 else: -1417 wait = " NOWAIT" if wait else " SKIP LOCKED" -1418 -1419 return f"{lock_type}{expressions}{wait or ''}" -1420 -1421 def literal_sql(self, expression: exp.Literal) -> str: -1422 text = expression.this or "" -1423 if expression.is_string: -1424 text = text.replace(self.quote_end, self._escaped_quote_end) -1425 if self.pretty: -1426 text = text.replace("\n", self.SENTINEL_LINE_BREAK) -1427 text = f"{self.quote_start}{text}{self.quote_end}" -1428 return text -1429 -1430 def loaddata_sql(self, expression: exp.LoadData) -> str: -1431 local = " LOCAL" if expression.args.get("local") else "" -1432 inpath = f" INPATH {self.sql(expression, 'inpath')}" -1433 overwrite = " OVERWRITE" if expression.args.get("overwrite") else "" -1434 this = f" INTO TABLE {self.sql(expression, 'this')}" -1435 partition = self.sql(expression, "partition") -1436 partition = f" {partition}" if partition else "" -1437 input_format = self.sql(expression, "input_format") -1438 input_format = f" INPUTFORMAT {input_format}" if input_format else "" -1439 serde = self.sql(expression, "serde") -1440 serde = f" SERDE {serde}" if serde else "" -1441 return f"LOAD DATA{local}{inpath}{overwrite}{this}{partition}{input_format}{serde}" -1442 -1443 def null_sql(self, *_) -> str: -1444 return "NULL" +1413 def literal_sql(self, expression: exp.Literal) -> str: +1414 text = expression.this or "" +1415 if expression.is_string: +1416 text = text.replace(self.QUOTE_END, self._escaped_quote_end) +1417 if self.pretty: +1418 text = text.replace("\n", self.SENTINEL_LINE_BREAK) +1419 text = f"{self.QUOTE_START}{text}{self.QUOTE_END}" +1420 return text +1421 +1422 def loaddata_sql(self, expression: exp.LoadData) -> str: +1423 local = " LOCAL" if expression.args.get("local") else "" +1424 inpath = f" INPATH {self.sql(expression, 'inpath')}" +1425 overwrite = " OVERWRITE" if expression.args.get("overwrite") else "" +1426 this = f" INTO TABLE {self.sql(expression, 'this')}" +1427 partition = self.sql(expression, "partition") +1428 partition = f" {partition}" if partition else "" +1429 input_format = self.sql(expression, "input_format") +1430 input_format = f" INPUTFORMAT {input_format}" if input_format else "" +1431 serde = self.sql(expression, "serde") +1432 serde = f" SERDE {serde}" if serde else "" +1433 return f"LOAD DATA{local}{inpath}{overwrite}{this}{partition}{input_format}{serde}" +1434 +1435 def null_sql(self, *_) -> str: +1436 return "NULL" +1437 +1438 def boolean_sql(self, expression: exp.Boolean) -> str: +1439 return "TRUE" if expression.this else "FALSE" +1440 +1441 def order_sql(self, expression: exp.Order, flat: bool = False) -> str: +1442 this = self.sql(expression, "this") +1443 this = f"{this} " if this else this +1444 return self.op_expressions(f"{this}ORDER BY", expression, flat=this or flat) # type: ignore 1445 -1446 def boolean_sql(self, expression: exp.Boolean) -> str: -1447 return "TRUE" if expression.this else "FALSE" +1446 def cluster_sql(self, expression: exp.Cluster) -> str: +1447 return self.op_expressions("CLUSTER BY", expression) 1448 -1449 def order_sql(self, expression: exp.Order, flat: bool = False) -> str: -1450 this = self.sql(expression, "this") -1451 this = f"{this} " if this else this -1452 return self.op_expressions(f"{this}ORDER BY", expression, flat=this or flat) # type: ignore -1453 -1454 def cluster_sql(self, expression: exp.Cluster) -> str: -1455 return self.op_expressions("CLUSTER BY", expression) -1456 -1457 def distribute_sql(self, expression: exp.Distribute) -> str: -1458 return self.op_expressions("DISTRIBUTE BY", expression) -1459 -1460 def sort_sql(self, expression: exp.Sort) -> str: -1461 return self.op_expressions("SORT BY", expression) -1462 -1463 def ordered_sql(self, expression: exp.Ordered) -> str: -1464 desc = expression.args.get("desc") -1465 asc = not desc -1466 -1467 nulls_first = expression.args.get("nulls_first") -1468 nulls_last = not nulls_first -1469 nulls_are_large = self.null_ordering == "nulls_are_large" -1470 nulls_are_small = self.null_ordering == "nulls_are_small" -1471 nulls_are_last = self.null_ordering == "nulls_are_last" -1472 -1473 sort_order = " DESC" if desc else "" -1474 nulls_sort_change = "" -1475 if nulls_first and ( -1476 (asc and nulls_are_large) or (desc and nulls_are_small) or nulls_are_last -1477 ): -1478 nulls_sort_change = " NULLS FIRST" -1479 elif ( -1480 nulls_last -1481 and ((asc and nulls_are_small) or (desc and nulls_are_large)) -1482 and not nulls_are_last -1483 ): -1484 nulls_sort_change = " NULLS LAST" +1449 def distribute_sql(self, expression: exp.Distribute) -> str: +1450 return self.op_expressions("DISTRIBUTE BY", expression) +1451 +1452 def sort_sql(self, expression: exp.Sort) -> str: +1453 return self.op_expressions("SORT BY", expression) +1454 +1455 def ordered_sql(self, expression: exp.Ordered) -> str: +1456 desc = expression.args.get("desc") +1457 asc = not desc +1458 +1459 nulls_first = expression.args.get("nulls_first") +1460 nulls_last = not nulls_first +1461 nulls_are_large = self.NULL_ORDERING == "nulls_are_large" +1462 nulls_are_small = self.NULL_ORDERING == "nulls_are_small" +1463 nulls_are_last = self.NULL_ORDERING == "nulls_are_last" +1464 +1465 sort_order = " DESC" if desc else "" +1466 nulls_sort_change = "" +1467 if nulls_first and ( +1468 (asc and nulls_are_large) or (desc and nulls_are_small) or nulls_are_last +1469 ): +1470 nulls_sort_change = " NULLS FIRST" +1471 elif ( +1472 nulls_last +1473 and ((asc and nulls_are_small) or (desc and nulls_are_large)) +1474 and not nulls_are_last +1475 ): +1476 nulls_sort_change = " NULLS LAST" +1477 +1478 if nulls_sort_change and not self.NULL_ORDERING_SUPPORTED: +1479 self.unsupported( +1480 "Sorting in an ORDER BY on NULLS FIRST/NULLS LAST is not supported by this dialect" +1481 ) +1482 nulls_sort_change = "" +1483 +1484 return f"{self.sql(expression, 'this')}{sort_order}{nulls_sort_change}" 1485 -1486 if nulls_sort_change and not self.NULL_ORDERING_SUPPORTED: -1487 self.unsupported( -1488 "Sorting in an ORDER BY on NULLS FIRST/NULLS LAST is not supported by this dialect" -1489 ) -1490 nulls_sort_change = "" -1491 -1492 return f"{self.sql(expression, 'this')}{sort_order}{nulls_sort_change}" -1493 -1494 def matchrecognize_sql(self, expression: exp.MatchRecognize) -> str: -1495 partition = self.partition_by_sql(expression) -1496 order = self.sql(expression, "order") -1497 measures = self.expressions(expression, key="measures") -1498 measures = self.seg(f"MEASURES{self.seg(measures)}") if measures else "" -1499 rows = self.sql(expression, "rows") -1500 rows = self.seg(rows) if rows else "" -1501 after = self.sql(expression, "after") -1502 after = self.seg(after) if after else "" -1503 pattern = self.sql(expression, "pattern") -1504 pattern = self.seg(f"PATTERN ({pattern})") if pattern else "" -1505 definition_sqls = [ -1506 f"{self.sql(definition, 'alias')} AS {self.sql(definition, 'this')}" -1507 for definition in expression.args.get("define", []) -1508 ] -1509 definitions = self.expressions(sqls=definition_sqls) -1510 define = self.seg(f"DEFINE{self.seg(definitions)}") if definitions else "" -1511 body = "".join( -1512 ( -1513 partition, -1514 order, -1515 measures, -1516 rows, -1517 after, -1518 pattern, -1519 define, -1520 ) -1521 ) -1522 alias = self.sql(expression, "alias") -1523 alias = f" {alias}" if alias else "" -1524 return f"{self.seg('MATCH_RECOGNIZE')} {self.wrap(body)}{alias}" +1486 def matchrecognize_sql(self, expression: exp.MatchRecognize) -> str: +1487 partition = self.partition_by_sql(expression) +1488 order = self.sql(expression, "order") +1489 measures = self.expressions(expression, key="measures") +1490 measures = self.seg(f"MEASURES{self.seg(measures)}") if measures else "" +1491 rows = self.sql(expression, "rows") +1492 rows = self.seg(rows) if rows else "" +1493 after = self.sql(expression, "after") +1494 after = self.seg(after) if after else "" +1495 pattern = self.sql(expression, "pattern") +1496 pattern = self.seg(f"PATTERN ({pattern})") if pattern else "" +1497 definition_sqls = [ +1498 f"{self.sql(definition, 'alias')} AS {self.sql(definition, 'this')}" +1499 for definition in expression.args.get("define", []) +1500 ] +1501 definitions = self.expressions(sqls=definition_sqls) +1502 define = self.seg(f"DEFINE{self.seg(definitions)}") if definitions else "" +1503 body = "".join( +1504 ( +1505 partition, +1506 order, +1507 measures, +1508 rows, +1509 after, +1510 pattern, +1511 define, +1512 ) +1513 ) +1514 alias = self.sql(expression, "alias") +1515 alias = f" {alias}" if alias else "" +1516 return f"{self.seg('MATCH_RECOGNIZE')} {self.wrap(body)}{alias}" +1517 +1518 def query_modifiers(self, expression: exp.Expression, *sqls: str) -> str: +1519 limit: t.Optional[exp.Fetch | exp.Limit] = expression.args.get("limit") +1520 +1521 if self.LIMIT_FETCH == "LIMIT" and isinstance(limit, exp.Fetch): +1522 limit = exp.Limit(expression=limit.args.get("count")) +1523 elif self.LIMIT_FETCH == "FETCH" and isinstance(limit, exp.Limit): +1524 limit = exp.Fetch(direction="FIRST", count=limit.expression) 1525 -1526 def query_modifiers(self, expression: exp.Expression, *sqls: str) -> str: -1527 limit = expression.args.get("limit") -1528 -1529 if self.LIMIT_FETCH == "LIMIT" and isinstance(limit, exp.Fetch): -1530 limit = exp.Limit(expression=limit.args.get("count")) -1531 elif self.LIMIT_FETCH == "FETCH" and isinstance(limit, exp.Limit): -1532 limit = exp.Fetch(direction="FIRST", count=limit.expression) -1533 -1534 fetch = isinstance(limit, exp.Fetch) -1535 -1536 return csv( -1537 *sqls, -1538 *[self.sql(join) for join in expression.args.get("joins") or []], -1539 self.sql(expression, "match"), -1540 *[self.sql(lateral) for lateral in expression.args.get("laterals") or []], -1541 self.sql(expression, "where"), -1542 self.sql(expression, "group"), -1543 self.sql(expression, "having"), -1544 *self.after_having_modifiers(expression), -1545 self.sql(expression, "order"), -1546 self.sql(expression, "offset") if fetch else self.sql(limit), -1547 self.sql(limit) if fetch else self.sql(expression, "offset"), -1548 *self.after_limit_modifiers(expression), -1549 sep="", -1550 ) -1551 -1552 def after_having_modifiers(self, expression: exp.Expression) -> t.List[str]: -1553 return [ -1554 self.sql(expression, "qualify"), -1555 self.seg("WINDOW ") + self.expressions(expression, key="windows", flat=True) -1556 if expression.args.get("windows") -1557 else "", -1558 ] -1559 -1560 def after_limit_modifiers(self, expression: exp.Expression) -> t.List[str]: -1561 locks = self.expressions(expression, key="locks", sep=" ") -1562 locks = f" {locks}" if locks else "" -1563 return [locks, self.sql(expression, "sample")] -1564 -1565 def select_sql(self, expression: exp.Select) -> str: -1566 hint = self.sql(expression, "hint") -1567 distinct = self.sql(expression, "distinct") -1568 distinct = f" {distinct}" if distinct else "" -1569 kind = expression.args.get("kind") -1570 kind = f" AS {kind}" if kind else "" -1571 expressions = self.expressions(expression) -1572 expressions = f"{self.sep()}{expressions}" if expressions else expressions -1573 sql = self.query_modifiers( -1574 expression, -1575 f"SELECT{hint}{distinct}{kind}{expressions}", -1576 self.sql(expression, "into", comment=False), -1577 self.sql(expression, "from", comment=False), -1578 ) -1579 return self.prepend_ctes(expression, sql) -1580 -1581 def schema_sql(self, expression: exp.Schema) -> str: -1582 this = self.sql(expression, "this") -1583 this = f"{this} " if this else "" -1584 sql = f"({self.sep('')}{self.expressions(expression)}{self.seg(')', sep='')}" -1585 return f"{this}{sql}" -1586 -1587 def star_sql(self, expression: exp.Star) -> str: -1588 except_ = self.expressions(expression, key="except", flat=True) -1589 except_ = f"{self.seg(self.STAR_MAPPING['except'])} ({except_})" if except_ else "" -1590 replace = self.expressions(expression, key="replace", flat=True) -1591 replace = f"{self.seg(self.STAR_MAPPING['replace'])} ({replace})" if replace else "" -1592 return f"*{except_}{replace}" -1593 -1594 def parameter_sql(self, expression: exp.Parameter) -> str: -1595 this = self.sql(expression, "this") -1596 this = f"{{{this}}}" if expression.args.get("wrapped") else f"{this}" -1597 return f"{self.PARAMETER_TOKEN}{this}" -1598 -1599 def sessionparameter_sql(self, expression: exp.SessionParameter) -> str: -1600 this = self.sql(expression, "this") -1601 kind = expression.text("kind") -1602 if kind: -1603 kind = f"{kind}." -1604 return f"@@{kind}{this}" -1605 -1606 def placeholder_sql(self, expression: exp.Placeholder) -> str: -1607 return f":{expression.name}" if expression.name else "?" -1608 -1609 def subquery_sql(self, expression: exp.Subquery, sep: str = " AS ") -> str: -1610 alias = self.sql(expression, "alias") -1611 alias = f"{sep}{alias}" if alias else "" -1612 -1613 pivots = self.expressions(expression, key="pivots", sep=" ", flat=True) -1614 pivots = f" {pivots}" if pivots else "" -1615 -1616 sql = self.query_modifiers(expression, self.wrap(expression), alias, pivots) -1617 return self.prepend_ctes(expression, sql) -1618 -1619 def qualify_sql(self, expression: exp.Qualify) -> str: -1620 this = self.indent(self.sql(expression, "this")) -1621 return f"{self.seg('QUALIFY')}{self.sep()}{this}" -1622 -1623 def union_sql(self, expression: exp.Union) -> str: -1624 return self.prepend_ctes( -1625 expression, -1626 self.set_operation(expression, self.union_op(expression)), -1627 ) -1628 -1629 def union_op(self, expression: exp.Union) -> str: -1630 kind = " DISTINCT" if self.EXPLICIT_UNION else "" -1631 kind = kind if expression.args.get("distinct") else " ALL" -1632 return f"UNION{kind}" -1633 -1634 def unnest_sql(self, expression: exp.Unnest) -> str: -1635 args = self.expressions(expression, flat=True) -1636 alias = expression.args.get("alias") -1637 if alias and self.unnest_column_only: -1638 columns = alias.columns -1639 alias = self.sql(columns[0]) if columns else "" -1640 else: -1641 alias = self.sql(expression, "alias") -1642 alias = f" AS {alias}" if alias else alias -1643 ordinality = " WITH ORDINALITY" if expression.args.get("ordinality") else "" -1644 offset = expression.args.get("offset") -1645 offset = f" WITH OFFSET AS {self.sql(offset)}" if offset else "" -1646 return f"UNNEST({args}){ordinality}{alias}{offset}" -1647 -1648 def where_sql(self, expression: exp.Where) -> str: -1649 this = self.indent(self.sql(expression, "this")) -1650 return f"{self.seg('WHERE')}{self.sep()}{this}" -1651 -1652 def window_sql(self, expression: exp.Window) -> str: -1653 this = self.sql(expression, "this") -1654 partition = self.partition_by_sql(expression) -1655 order = expression.args.get("order") -1656 order = self.order_sql(order, flat=True) if order else "" -1657 spec = self.sql(expression, "spec") -1658 alias = self.sql(expression, "alias") -1659 over = self.sql(expression, "over") or "OVER" -1660 -1661 this = f"{this} {'AS' if expression.arg_key == 'windows' else over}" +1526 fetch = isinstance(limit, exp.Fetch) +1527 +1528 return csv( +1529 *sqls, +1530 *[self.sql(join) for join in expression.args.get("joins") or []], +1531 self.sql(expression, "match"), +1532 *[self.sql(lateral) for lateral in expression.args.get("laterals") or []], +1533 self.sql(expression, "where"), +1534 self.sql(expression, "group"), +1535 self.sql(expression, "having"), +1536 *self.after_having_modifiers(expression), +1537 self.sql(expression, "order"), +1538 *self.offset_limit_modifiers(expression, fetch, limit), +1539 *self.after_limit_modifiers(expression), +1540 sep="", +1541 ) +1542 +1543 def offset_limit_modifiers( +1544 self, expression: exp.Expression, fetch: bool, limit: t.Optional[exp.Fetch | exp.Limit] +1545 ) -> t.List[str]: +1546 return [ +1547 self.sql(expression, "offset") if fetch else self.sql(limit), +1548 self.sql(limit) if fetch else self.sql(expression, "offset"), +1549 ] +1550 +1551 def after_having_modifiers(self, expression: exp.Expression) -> t.List[str]: +1552 return [ +1553 self.sql(expression, "qualify"), +1554 self.seg("WINDOW ") + self.expressions(expression, key="windows", flat=True) +1555 if expression.args.get("windows") +1556 else "", +1557 ] +1558 +1559 def after_limit_modifiers(self, expression: exp.Expression) -> t.List[str]: +1560 locks = self.expressions(expression, key="locks", sep=" ") +1561 locks = f" {locks}" if locks else "" +1562 return [locks, self.sql(expression, "sample")] +1563 +1564 def select_sql(self, expression: exp.Select) -> str: +1565 hint = self.sql(expression, "hint") +1566 distinct = self.sql(expression, "distinct") +1567 distinct = f" {distinct}" if distinct else "" +1568 kind = expression.args.get("kind") +1569 kind = f" AS {kind}" if kind else "" +1570 expressions = self.expressions(expression) +1571 expressions = f"{self.sep()}{expressions}" if expressions else expressions +1572 sql = self.query_modifiers( +1573 expression, +1574 f"SELECT{hint}{distinct}{kind}{expressions}", +1575 self.sql(expression, "into", comment=False), +1576 self.sql(expression, "from", comment=False), +1577 ) +1578 return self.prepend_ctes(expression, sql) +1579 +1580 def schema_sql(self, expression: exp.Schema) -> str: +1581 this = self.sql(expression, "this") +1582 this = f"{this} " if this else "" +1583 sql = self.schema_columns_sql(expression) +1584 return f"{this}{sql}" +1585 +1586 def schema_columns_sql(self, expression: exp.Schema) -> str: +1587 return f"({self.sep('')}{self.expressions(expression)}{self.seg(')', sep='')}" +1588 +1589 def star_sql(self, expression: exp.Star) -> str: +1590 except_ = self.expressions(expression, key="except", flat=True) +1591 except_ = f"{self.seg(self.STAR_MAPPING['except'])} ({except_})" if except_ else "" +1592 replace = self.expressions(expression, key="replace", flat=True) +1593 replace = f"{self.seg(self.STAR_MAPPING['replace'])} ({replace})" if replace else "" +1594 return f"*{except_}{replace}" +1595 +1596 def parameter_sql(self, expression: exp.Parameter) -> str: +1597 this = self.sql(expression, "this") +1598 this = f"{{{this}}}" if expression.args.get("wrapped") else f"{this}" +1599 return f"{self.PARAMETER_TOKEN}{this}" +1600 +1601 def sessionparameter_sql(self, expression: exp.SessionParameter) -> str: +1602 this = self.sql(expression, "this") +1603 kind = expression.text("kind") +1604 if kind: +1605 kind = f"{kind}." +1606 return f"@@{kind}{this}" +1607 +1608 def placeholder_sql(self, expression: exp.Placeholder) -> str: +1609 return f":{expression.name}" if expression.name else "?" +1610 +1611 def subquery_sql(self, expression: exp.Subquery, sep: str = " AS ") -> str: +1612 alias = self.sql(expression, "alias") +1613 alias = f"{sep}{alias}" if alias else "" +1614 +1615 pivots = self.expressions(expression, key="pivots", sep=" ", flat=True) +1616 pivots = f" {pivots}" if pivots else "" +1617 +1618 sql = self.query_modifiers(expression, self.wrap(expression), alias, pivots) +1619 return self.prepend_ctes(expression, sql) +1620 +1621 def qualify_sql(self, expression: exp.Qualify) -> str: +1622 this = self.indent(self.sql(expression, "this")) +1623 return f"{self.seg('QUALIFY')}{self.sep()}{this}" +1624 +1625 def union_sql(self, expression: exp.Union) -> str: +1626 return self.prepend_ctes( +1627 expression, +1628 self.set_operation(expression, self.union_op(expression)), +1629 ) +1630 +1631 def union_op(self, expression: exp.Union) -> str: +1632 kind = " DISTINCT" if self.EXPLICIT_UNION else "" +1633 kind = kind if expression.args.get("distinct") else " ALL" +1634 return f"UNION{kind}" +1635 +1636 def unnest_sql(self, expression: exp.Unnest) -> str: +1637 args = self.expressions(expression, flat=True) +1638 alias = expression.args.get("alias") +1639 if alias and self.UNNEST_COLUMN_ONLY: +1640 columns = alias.columns +1641 alias = self.sql(columns[0]) if columns else "" +1642 else: +1643 alias = self.sql(expression, "alias") +1644 alias = f" AS {alias}" if alias else alias +1645 ordinality = " WITH ORDINALITY" if expression.args.get("ordinality") else "" +1646 offset = expression.args.get("offset") +1647 offset = f" WITH OFFSET AS {self.sql(offset)}" if offset else "" +1648 return f"UNNEST({args}){ordinality}{alias}{offset}" +1649 +1650 def where_sql(self, expression: exp.Where) -> str: +1651 this = self.indent(self.sql(expression, "this")) +1652 return f"{self.seg('WHERE')}{self.sep()}{this}" +1653 +1654 def window_sql(self, expression: exp.Window) -> str: +1655 this = self.sql(expression, "this") +1656 partition = self.partition_by_sql(expression) +1657 order = expression.args.get("order") +1658 order = self.order_sql(order, flat=True) if order else "" +1659 spec = self.sql(expression, "spec") +1660 alias = self.sql(expression, "alias") +1661 over = self.sql(expression, "over") or "OVER" 1662 -1663 first = expression.args.get("first") -1664 if first is None: -1665 first = "" -1666 else: -1667 first = "FIRST" if first else "LAST" -1668 -1669 if not partition and not order and not spec and alias: -1670 return f"{this} {alias}" -1671 -1672 args = " ".join(arg for arg in (alias, first, partition, order, spec) if arg) -1673 return f"{this} ({args})" -1674 -1675 def partition_by_sql(self, expression: exp.Window | exp.MatchRecognize) -> str: -1676 partition = self.expressions(expression, key="partition_by", flat=True) -1677 return f"PARTITION BY {partition}" if partition else "" -1678 -1679 def windowspec_sql(self, expression: exp.WindowSpec) -> str: -1680 kind = self.sql(expression, "kind") -1681 start = csv(self.sql(expression, "start"), self.sql(expression, "start_side"), sep=" ") -1682 end = ( -1683 csv(self.sql(expression, "end"), self.sql(expression, "end_side"), sep=" ") -1684 or "CURRENT ROW" -1685 ) -1686 return f"{kind} BETWEEN {start} AND {end}" -1687 -1688 def withingroup_sql(self, expression: exp.WithinGroup) -> str: -1689 this = self.sql(expression, "this") -1690 expression_sql = self.sql(expression, "expression")[1:] # order has a leading space -1691 return f"{this} WITHIN GROUP ({expression_sql})" -1692 -1693 def between_sql(self, expression: exp.Between) -> str: -1694 this = self.sql(expression, "this") -1695 low = self.sql(expression, "low") -1696 high = self.sql(expression, "high") -1697 return f"{this} BETWEEN {low} AND {high}" -1698 -1699 def bracket_sql(self, expression: exp.Bracket) -> str: -1700 expressions = apply_index_offset(expression.this, expression.expressions, self.index_offset) -1701 expressions_sql = ", ".join(self.sql(e) for e in expressions) -1702 -1703 return f"{self.sql(expression, 'this')}[{expressions_sql}]" +1663 this = f"{this} {'AS' if expression.arg_key == 'windows' else over}" +1664 +1665 first = expression.args.get("first") +1666 if first is None: +1667 first = "" +1668 else: +1669 first = "FIRST" if first else "LAST" +1670 +1671 if not partition and not order and not spec and alias: +1672 return f"{this} {alias}" +1673 +1674 args = " ".join(arg for arg in (alias, first, partition, order, spec) if arg) +1675 return f"{this} ({args})" +1676 +1677 def partition_by_sql(self, expression: exp.Window | exp.MatchRecognize) -> str: +1678 partition = self.expressions(expression, key="partition_by", flat=True) +1679 return f"PARTITION BY {partition}" if partition else "" +1680 +1681 def windowspec_sql(self, expression: exp.WindowSpec) -> str: +1682 kind = self.sql(expression, "kind") +1683 start = csv(self.sql(expression, "start"), self.sql(expression, "start_side"), sep=" ") +1684 end = ( +1685 csv(self.sql(expression, "end"), self.sql(expression, "end_side"), sep=" ") +1686 or "CURRENT ROW" +1687 ) +1688 return f"{kind} BETWEEN {start} AND {end}" +1689 +1690 def withingroup_sql(self, expression: exp.WithinGroup) -> str: +1691 this = self.sql(expression, "this") +1692 expression_sql = self.sql(expression, "expression")[1:] # order has a leading space +1693 return f"{this} WITHIN GROUP ({expression_sql})" +1694 +1695 def between_sql(self, expression: exp.Between) -> str: +1696 this = self.sql(expression, "this") +1697 low = self.sql(expression, "low") +1698 high = self.sql(expression, "high") +1699 return f"{this} BETWEEN {low} AND {high}" +1700 +1701 def bracket_sql(self, expression: exp.Bracket) -> str: +1702 expressions = apply_index_offset(expression.this, expression.expressions, self.INDEX_OFFSET) +1703 expressions_sql = ", ".join(self.sql(e) for e in expressions) 1704 -1705 def all_sql(self, expression: exp.All) -> str: -1706 return f"ALL {self.wrap(expression)}" -1707 -1708 def any_sql(self, expression: exp.Any) -> str: -1709 this = self.sql(expression, "this") -1710 if isinstance(expression.this, exp.Subqueryable): -1711 this = self.wrap(this) -1712 return f"ANY {this}" -1713 -1714 def exists_sql(self, expression: exp.Exists) -> str: -1715 return f"EXISTS{self.wrap(expression)}" -1716 -1717 def case_sql(self, expression: exp.Case) -> str: -1718 this = self.sql(expression, "this") -1719 statements = [f"CASE {this}" if this else "CASE"] -1720 -1721 for e in expression.args["ifs"]: -1722 statements.append(f"WHEN {self.sql(e, 'this')}") -1723 statements.append(f"THEN {self.sql(e, 'true')}") -1724 -1725 default = self.sql(expression, "default") +1705 return f"{self.sql(expression, 'this')}[{expressions_sql}]" +1706 +1707 def all_sql(self, expression: exp.All) -> str: +1708 return f"ALL {self.wrap(expression)}" +1709 +1710 def any_sql(self, expression: exp.Any) -> str: +1711 this = self.sql(expression, "this") +1712 if isinstance(expression.this, exp.Subqueryable): +1713 this = self.wrap(this) +1714 return f"ANY {this}" +1715 +1716 def exists_sql(self, expression: exp.Exists) -> str: +1717 return f"EXISTS{self.wrap(expression)}" +1718 +1719 def case_sql(self, expression: exp.Case) -> str: +1720 this = self.sql(expression, "this") +1721 statements = [f"CASE {this}" if this else "CASE"] +1722 +1723 for e in expression.args["ifs"]: +1724 statements.append(f"WHEN {self.sql(e, 'this')}") +1725 statements.append(f"THEN {self.sql(e, 'true')}") 1726 -1727 if default: -1728 statements.append(f"ELSE {default}") -1729 -1730 statements.append("END") +1727 default = self.sql(expression, "default") +1728 +1729 if default: +1730 statements.append(f"ELSE {default}") 1731 -1732 if self.pretty and self.text_width(statements) > self._max_text_width: -1733 return self.indent("\n".join(statements), skip_first=True, skip_last=True) -1734 -1735 return " ".join(statements) +1732 statements.append("END") +1733 +1734 if self.pretty and self.text_width(statements) > self.max_text_width: +1735 return self.indent("\n".join(statements), skip_first=True, skip_last=True) 1736 -1737 def constraint_sql(self, expression: exp.Constraint) -> str: -1738 this = self.sql(expression, "this") -1739 expressions = self.expressions(expression, flat=True) -1740 return f"CONSTRAINT {this} {expressions}" -1741 -1742 def nextvaluefor_sql(self, expression: exp.NextValueFor) -> str: -1743 order = expression.args.get("order") -1744 order = f" OVER ({self.order_sql(order, flat=True)})" if order else "" -1745 return f"NEXT VALUE FOR {self.sql(expression, 'this')}{order}" -1746 -1747 def extract_sql(self, expression: exp.Extract) -> str: -1748 this = self.sql(expression, "this") -1749 expression_sql = self.sql(expression, "expression") -1750 return f"EXTRACT({this} FROM {expression_sql})" -1751 -1752 def trim_sql(self, expression: exp.Trim) -> str: -1753 trim_type = self.sql(expression, "position") -1754 -1755 if trim_type == "LEADING": -1756 return self.func("LTRIM", expression.this) -1757 elif trim_type == "TRAILING": -1758 return self.func("RTRIM", expression.this) -1759 else: -1760 return self.func("TRIM", expression.this, expression.expression) -1761 -1762 def concat_sql(self, expression: exp.Concat) -> str: -1763 if len(expression.expressions) == 1: -1764 return self.sql(expression.expressions[0]) -1765 return self.function_fallback_sql(expression) -1766 -1767 def check_sql(self, expression: exp.Check) -> str: -1768 this = self.sql(expression, key="this") -1769 return f"CHECK ({this})" -1770 -1771 def foreignkey_sql(self, expression: exp.ForeignKey) -> str: -1772 expressions = self.expressions(expression, flat=True) -1773 reference = self.sql(expression, "reference") -1774 reference = f" {reference}" if reference else "" -1775 delete = self.sql(expression, "delete") -1776 delete = f" ON DELETE {delete}" if delete else "" -1777 update = self.sql(expression, "update") -1778 update = f" ON UPDATE {update}" if update else "" -1779 return f"FOREIGN KEY ({expressions}){reference}{delete}{update}" -1780 -1781 def primarykey_sql(self, expression: exp.ForeignKey) -> str: -1782 expressions = self.expressions(expression, flat=True) -1783 options = self.expressions(expression, key="options", flat=True, sep=" ") -1784 options = f" {options}" if options else "" -1785 return f"PRIMARY KEY ({expressions}){options}" -1786 -1787 def if_sql(self, expression: exp.If) -> str: -1788 return self.case_sql( -1789 exp.Case(ifs=[expression.copy()], default=expression.args.get("false")) -1790 ) -1791 -1792 def matchagainst_sql(self, expression: exp.MatchAgainst) -> str: -1793 modifier = expression.args.get("modifier") -1794 modifier = f" {modifier}" if modifier else "" -1795 return f"{self.func('MATCH', *expression.expressions)} AGAINST({self.sql(expression, 'this')}{modifier})" -1796 -1797 def jsonkeyvalue_sql(self, expression: exp.JSONKeyValue) -> str: -1798 return f"{self.sql(expression, 'this')}: {self.sql(expression, 'expression')}" -1799 -1800 def jsonobject_sql(self, expression: exp.JSONObject) -> str: -1801 expressions = self.expressions(expression) +1737 return " ".join(statements) +1738 +1739 def constraint_sql(self, expression: exp.Constraint) -> str: +1740 this = self.sql(expression, "this") +1741 expressions = self.expressions(expression, flat=True) +1742 return f"CONSTRAINT {this} {expressions}" +1743 +1744 def nextvaluefor_sql(self, expression: exp.NextValueFor) -> str: +1745 order = expression.args.get("order") +1746 order = f" OVER ({self.order_sql(order, flat=True)})" if order else "" +1747 return f"NEXT VALUE FOR {self.sql(expression, 'this')}{order}" +1748 +1749 def extract_sql(self, expression: exp.Extract) -> str: +1750 this = self.sql(expression, "this") +1751 expression_sql = self.sql(expression, "expression") +1752 return f"EXTRACT({this} FROM {expression_sql})" +1753 +1754 def trim_sql(self, expression: exp.Trim) -> str: +1755 trim_type = self.sql(expression, "position") +1756 +1757 if trim_type == "LEADING": +1758 return self.func("LTRIM", expression.this) +1759 elif trim_type == "TRAILING": +1760 return self.func("RTRIM", expression.this) +1761 else: +1762 return self.func("TRIM", expression.this, expression.expression) +1763 +1764 def safeconcat_sql(self, expression: exp.SafeConcat) -> str: +1765 expressions = expression.expressions +1766 if self.STRICT_STRING_CONCAT: +1767 expressions = (exp.cast(e, "text") for e in expressions) +1768 return self.func("CONCAT", *expressions) +1769 +1770 def check_sql(self, expression: exp.Check) -> str: +1771 this = self.sql(expression, key="this") +1772 return f"CHECK ({this})" +1773 +1774 def foreignkey_sql(self, expression: exp.ForeignKey) -> str: +1775 expressions = self.expressions(expression, flat=True) +1776 reference = self.sql(expression, "reference") +1777 reference = f" {reference}" if reference else "" +1778 delete = self.sql(expression, "delete") +1779 delete = f" ON DELETE {delete}" if delete else "" +1780 update = self.sql(expression, "update") +1781 update = f" ON UPDATE {update}" if update else "" +1782 return f"FOREIGN KEY ({expressions}){reference}{delete}{update}" +1783 +1784 def primarykey_sql(self, expression: exp.ForeignKey) -> str: +1785 expressions = self.expressions(expression, flat=True) +1786 options = self.expressions(expression, key="options", flat=True, sep=" ") +1787 options = f" {options}" if options else "" +1788 return f"PRIMARY KEY ({expressions}){options}" +1789 +1790 def if_sql(self, expression: exp.If) -> str: +1791 return self.case_sql(exp.Case(ifs=[expression], default=expression.args.get("false"))) +1792 +1793 def matchagainst_sql(self, expression: exp.MatchAgainst) -> str: +1794 modifier = expression.args.get("modifier") +1795 modifier = f" {modifier}" if modifier else "" +1796 return f"{self.func('MATCH', *expression.expressions)} AGAINST({self.sql(expression, 'this')}{modifier})" +1797 +1798 def jsonkeyvalue_sql(self, expression: exp.JSONKeyValue) -> str: +1799 return f"{self.sql(expression, 'this')}: {self.sql(expression, 'expression')}" +1800 +1801 def jsonobject_sql(self, expression: exp.JSONObject) -> str: 1802 null_handling = expression.args.get("null_handling") 1803 null_handling = f" {null_handling}" if null_handling else "" 1804 unique_keys = expression.args.get("unique_keys") @@ -2577,543 +2592,567 @@ 1811 format_json = " FORMAT JSON" if expression.args.get("format_json") else "" 1812 encoding = self.sql(expression, "encoding") 1813 encoding = f" ENCODING {encoding}" if encoding else "" -1814 return f"JSON_OBJECT({expressions}{null_handling}{unique_keys}{return_type}{format_json}{encoding})" -1815 -1816 def openjsoncolumndef_sql(self, expression: exp.OpenJSONColumnDef) -> str: -1817 this = self.sql(expression, "this") -1818 kind = self.sql(expression, "kind") -1819 path = self.sql(expression, "path") -1820 path = f" {path}" if path else "" -1821 as_json = " AS JSON" if expression.args.get("as_json") else "" -1822 return f"{this} {kind}{path}{as_json}" -1823 -1824 def openjson_sql(self, expression: exp.OpenJSON) -> str: -1825 this = self.sql(expression, "this") -1826 path = self.sql(expression, "path") -1827 path = f", {path}" if path else "" -1828 expressions = self.expressions(expression) -1829 with_ = ( -1830 f" WITH ({self.seg(self.indent(expressions), sep='')}{self.seg(')', sep='')}" -1831 if expressions -1832 else "" -1833 ) -1834 return f"OPENJSON({this}{path}){with_}" -1835 -1836 def in_sql(self, expression: exp.In) -> str: -1837 query = expression.args.get("query") -1838 unnest = expression.args.get("unnest") -1839 field = expression.args.get("field") -1840 is_global = " GLOBAL" if expression.args.get("is_global") else "" -1841 -1842 if query: -1843 in_sql = self.wrap(query) -1844 elif unnest: -1845 in_sql = self.in_unnest_op(unnest) -1846 elif field: -1847 in_sql = self.sql(field) -1848 else: -1849 in_sql = f"({self.expressions(expression, flat=True)})" -1850 -1851 return f"{self.sql(expression, 'this')}{is_global} IN {in_sql}" -1852 -1853 def in_unnest_op(self, unnest: exp.Unnest) -> str: -1854 return f"(SELECT {self.sql(unnest)})" -1855 -1856 def interval_sql(self, expression: exp.Interval) -> str: -1857 unit = self.sql(expression, "unit") -1858 if not self.INTERVAL_ALLOWS_PLURAL_FORM: -1859 unit = self.TIME_PART_SINGULARS.get(unit.lower(), unit) -1860 unit = f" {unit}" if unit else "" -1861 -1862 if self.SINGLE_STRING_INTERVAL: -1863 this = expression.this.name if expression.this else "" -1864 return f"INTERVAL '{this}{unit}'" if this else f"INTERVAL{unit}" +1814 return self.func( +1815 "JSON_OBJECT", +1816 *expression.expressions, +1817 suffix=f"{null_handling}{unique_keys}{return_type}{format_json}{encoding})", +1818 ) +1819 +1820 def openjsoncolumndef_sql(self, expression: exp.OpenJSONColumnDef) -> str: +1821 this = self.sql(expression, "this") +1822 kind = self.sql(expression, "kind") +1823 path = self.sql(expression, "path") +1824 path = f" {path}" if path else "" +1825 as_json = " AS JSON" if expression.args.get("as_json") else "" +1826 return f"{this} {kind}{path}{as_json}" +1827 +1828 def openjson_sql(self, expression: exp.OpenJSON) -> str: +1829 this = self.sql(expression, "this") +1830 path = self.sql(expression, "path") +1831 path = f", {path}" if path else "" +1832 expressions = self.expressions(expression) +1833 with_ = ( +1834 f" WITH ({self.seg(self.indent(expressions), sep='')}{self.seg(')', sep='')}" +1835 if expressions +1836 else "" +1837 ) +1838 return f"OPENJSON({this}{path}){with_}" +1839 +1840 def in_sql(self, expression: exp.In) -> str: +1841 query = expression.args.get("query") +1842 unnest = expression.args.get("unnest") +1843 field = expression.args.get("field") +1844 is_global = " GLOBAL" if expression.args.get("is_global") else "" +1845 +1846 if query: +1847 in_sql = self.wrap(query) +1848 elif unnest: +1849 in_sql = self.in_unnest_op(unnest) +1850 elif field: +1851 in_sql = self.sql(field) +1852 else: +1853 in_sql = f"({self.expressions(expression, flat=True)})" +1854 +1855 return f"{self.sql(expression, 'this')}{is_global} IN {in_sql}" +1856 +1857 def in_unnest_op(self, unnest: exp.Unnest) -> str: +1858 return f"(SELECT {self.sql(unnest)})" +1859 +1860 def interval_sql(self, expression: exp.Interval) -> str: +1861 unit = self.sql(expression, "unit") +1862 if not self.INTERVAL_ALLOWS_PLURAL_FORM: +1863 unit = self.TIME_PART_SINGULARS.get(unit.lower(), unit) +1864 unit = f" {unit}" if unit else "" 1865 -1866 this = self.sql(expression, "this") -1867 if this: -1868 unwrapped = isinstance(expression.this, self.UNWRAPPED_INTERVAL_VALUES) -1869 this = f" {this}" if unwrapped else f" ({this})" -1870 -1871 return f"INTERVAL{this}{unit}" -1872 -1873 def return_sql(self, expression: exp.Return) -> str: -1874 return f"RETURN {self.sql(expression, 'this')}" -1875 -1876 def reference_sql(self, expression: exp.Reference) -> str: -1877 this = self.sql(expression, "this") -1878 expressions = self.expressions(expression, flat=True) -1879 expressions = f"({expressions})" if expressions else "" -1880 options = self.expressions(expression, key="options", flat=True, sep=" ") -1881 options = f" {options}" if options else "" -1882 return f"REFERENCES {this}{expressions}{options}" -1883 -1884 def anonymous_sql(self, expression: exp.Anonymous) -> str: -1885 return self.func(expression.name, *expression.expressions) -1886 -1887 def paren_sql(self, expression: exp.Paren) -> str: -1888 if isinstance(expression.unnest(), exp.Select): -1889 sql = self.wrap(expression) -1890 else: -1891 sql = self.seg(self.indent(self.sql(expression, "this")), sep="") -1892 sql = f"({sql}{self.seg(')', sep='')}" -1893 -1894 return self.prepend_ctes(expression, sql) -1895 -1896 def neg_sql(self, expression: exp.Neg) -> str: -1897 # This makes sure we don't convert "- - 5" to "--5", which is a comment -1898 this_sql = self.sql(expression, "this") -1899 sep = " " if this_sql[0] == "-" else "" -1900 return f"-{sep}{this_sql}" -1901 -1902 def not_sql(self, expression: exp.Not) -> str: -1903 return f"NOT {self.sql(expression, 'this')}" -1904 -1905 def alias_sql(self, expression: exp.Alias) -> str: -1906 alias = self.sql(expression, "alias") -1907 alias = f" AS {alias}" if alias else "" -1908 return f"{self.sql(expression, 'this')}{alias}" -1909 -1910 def aliases_sql(self, expression: exp.Aliases) -> str: -1911 return f"{self.sql(expression, 'this')} AS ({self.expressions(expression, flat=True)})" -1912 -1913 def attimezone_sql(self, expression: exp.AtTimeZone) -> str: -1914 this = self.sql(expression, "this") -1915 zone = self.sql(expression, "zone") -1916 return f"{this} AT TIME ZONE {zone}" -1917 -1918 def add_sql(self, expression: exp.Add) -> str: -1919 return self.binary(expression, "+") -1920 -1921 def and_sql(self, expression: exp.And) -> str: -1922 return self.connector_sql(expression, "AND") -1923 -1924 def connector_sql(self, expression: exp.Connector, op: str) -> str: -1925 if not self.pretty: -1926 return self.binary(expression, op) +1866 if self.SINGLE_STRING_INTERVAL: +1867 this = expression.this.name if expression.this else "" +1868 return f"INTERVAL '{this}{unit}'" if this else f"INTERVAL{unit}" +1869 +1870 this = self.sql(expression, "this") +1871 if this: +1872 unwrapped = isinstance(expression.this, self.UNWRAPPED_INTERVAL_VALUES) +1873 this = f" {this}" if unwrapped else f" ({this})" +1874 +1875 return f"INTERVAL{this}{unit}" +1876 +1877 def return_sql(self, expression: exp.Return) -> str: +1878 return f"RETURN {self.sql(expression, 'this')}" +1879 +1880 def reference_sql(self, expression: exp.Reference) -> str: +1881 this = self.sql(expression, "this") +1882 expressions = self.expressions(expression, flat=True) +1883 expressions = f"({expressions})" if expressions else "" +1884 options = self.expressions(expression, key="options", flat=True, sep=" ") +1885 options = f" {options}" if options else "" +1886 return f"REFERENCES {this}{expressions}{options}" +1887 +1888 def anonymous_sql(self, expression: exp.Anonymous) -> str: +1889 return self.func(expression.name, *expression.expressions) +1890 +1891 def paren_sql(self, expression: exp.Paren) -> str: +1892 if isinstance(expression.unnest(), exp.Select): +1893 sql = self.wrap(expression) +1894 else: +1895 sql = self.seg(self.indent(self.sql(expression, "this")), sep="") +1896 sql = f"({sql}{self.seg(')', sep='')}" +1897 +1898 return self.prepend_ctes(expression, sql) +1899 +1900 def neg_sql(self, expression: exp.Neg) -> str: +1901 # This makes sure we don't convert "- - 5" to "--5", which is a comment +1902 this_sql = self.sql(expression, "this") +1903 sep = " " if this_sql[0] == "-" else "" +1904 return f"-{sep}{this_sql}" +1905 +1906 def not_sql(self, expression: exp.Not) -> str: +1907 return f"NOT {self.sql(expression, 'this')}" +1908 +1909 def alias_sql(self, expression: exp.Alias) -> str: +1910 alias = self.sql(expression, "alias") +1911 alias = f" AS {alias}" if alias else "" +1912 return f"{self.sql(expression, 'this')}{alias}" +1913 +1914 def aliases_sql(self, expression: exp.Aliases) -> str: +1915 return f"{self.sql(expression, 'this')} AS ({self.expressions(expression, flat=True)})" +1916 +1917 def attimezone_sql(self, expression: exp.AtTimeZone) -> str: +1918 this = self.sql(expression, "this") +1919 zone = self.sql(expression, "zone") +1920 return f"{this} AT TIME ZONE {zone}" +1921 +1922 def add_sql(self, expression: exp.Add) -> str: +1923 return self.binary(expression, "+") +1924 +1925 def and_sql(self, expression: exp.And) -> str: +1926 return self.connector_sql(expression, "AND") 1927 -1928 sqls = tuple( -1929 self.maybe_comment(self.sql(e), e, e.parent.comments or []) if i != 1 else self.sql(e) -1930 for i, e in enumerate(expression.flatten(unnest=False)) -1931 ) -1932 -1933 sep = "\n" if self.text_width(sqls) > self._max_text_width else " " -1934 return f"{sep}{op} ".join(sqls) -1935 -1936 def bitwiseand_sql(self, expression: exp.BitwiseAnd) -> str: -1937 return self.binary(expression, "&") -1938 -1939 def bitwiseleftshift_sql(self, expression: exp.BitwiseLeftShift) -> str: -1940 return self.binary(expression, "<<") -1941 -1942 def bitwisenot_sql(self, expression: exp.BitwiseNot) -> str: -1943 return f"~{self.sql(expression, 'this')}" -1944 -1945 def bitwiseor_sql(self, expression: exp.BitwiseOr) -> str: -1946 return self.binary(expression, "|") -1947 -1948 def bitwiserightshift_sql(self, expression: exp.BitwiseRightShift) -> str: -1949 return self.binary(expression, ">>") -1950 -1951 def bitwisexor_sql(self, expression: exp.BitwiseXor) -> str: -1952 return self.binary(expression, "^") -1953 -1954 def cast_sql(self, expression: exp.Cast) -> str: -1955 return f"CAST({self.sql(expression, 'this')} AS {self.sql(expression, 'to')})" -1956 -1957 def currentdate_sql(self, expression: exp.CurrentDate) -> str: -1958 zone = self.sql(expression, "this") -1959 return f"CURRENT_DATE({zone})" if zone else "CURRENT_DATE" +1928 def connector_sql(self, expression: exp.Connector, op: str) -> str: +1929 if not self.pretty: +1930 return self.binary(expression, op) +1931 +1932 sqls = tuple( +1933 self.maybe_comment(self.sql(e), e, e.parent.comments or []) if i != 1 else self.sql(e) +1934 for i, e in enumerate(expression.flatten(unnest=False)) +1935 ) +1936 +1937 sep = "\n" if self.text_width(sqls) > self.max_text_width else " " +1938 return f"{sep}{op} ".join(sqls) +1939 +1940 def bitwiseand_sql(self, expression: exp.BitwiseAnd) -> str: +1941 return self.binary(expression, "&") +1942 +1943 def bitwiseleftshift_sql(self, expression: exp.BitwiseLeftShift) -> str: +1944 return self.binary(expression, "<<") +1945 +1946 def bitwisenot_sql(self, expression: exp.BitwiseNot) -> str: +1947 return f"~{self.sql(expression, 'this')}" +1948 +1949 def bitwiseor_sql(self, expression: exp.BitwiseOr) -> str: +1950 return self.binary(expression, "|") +1951 +1952 def bitwiserightshift_sql(self, expression: exp.BitwiseRightShift) -> str: +1953 return self.binary(expression, ">>") +1954 +1955 def bitwisexor_sql(self, expression: exp.BitwiseXor) -> str: +1956 return self.binary(expression, "^") +1957 +1958 def cast_sql(self, expression: exp.Cast) -> str: +1959 return f"CAST({self.sql(expression, 'this')} AS {self.sql(expression, 'to')})" 1960 -1961 def collate_sql(self, expression: exp.Collate) -> str: -1962 return self.binary(expression, "COLLATE") -1963 -1964 def command_sql(self, expression: exp.Command) -> str: -1965 return f"{self.sql(expression, 'this').upper()} {expression.text('expression').strip()}" -1966 -1967 def comment_sql(self, expression: exp.Comment) -> str: -1968 this = self.sql(expression, "this") -1969 kind = expression.args["kind"] -1970 exists_sql = " IF EXISTS " if expression.args.get("exists") else " " -1971 expression_sql = self.sql(expression, "expression") -1972 return f"COMMENT{exists_sql}ON {kind} {this} IS {expression_sql}" -1973 -1974 def mergetreettlaction_sql(self, expression: exp.MergeTreeTTLAction) -> str: -1975 this = self.sql(expression, "this") -1976 delete = " DELETE" if expression.args.get("delete") else "" -1977 recompress = self.sql(expression, "recompress") -1978 recompress = f" RECOMPRESS {recompress}" if recompress else "" -1979 to_disk = self.sql(expression, "to_disk") -1980 to_disk = f" TO DISK {to_disk}" if to_disk else "" -1981 to_volume = self.sql(expression, "to_volume") -1982 to_volume = f" TO VOLUME {to_volume}" if to_volume else "" -1983 return f"{this}{delete}{recompress}{to_disk}{to_volume}" -1984 -1985 def mergetreettl_sql(self, expression: exp.MergeTreeTTL) -> str: -1986 where = self.sql(expression, "where") -1987 group = self.sql(expression, "group") -1988 aggregates = self.expressions(expression, key="aggregates") -1989 aggregates = self.seg("SET") + self.seg(aggregates) if aggregates else "" -1990 -1991 if not (where or group or aggregates) and len(expression.expressions) == 1: -1992 return f"TTL {self.expressions(expression, flat=True)}" -1993 -1994 return f"TTL{self.seg(self.expressions(expression))}{where}{group}{aggregates}" -1995 -1996 def transaction_sql(self, expression: exp.Transaction) -> str: -1997 return "BEGIN" -1998 -1999 def commit_sql(self, expression: exp.Commit) -> str: -2000 chain = expression.args.get("chain") -2001 if chain is not None: -2002 chain = " AND CHAIN" if chain else " AND NO CHAIN" -2003 -2004 return f"COMMIT{chain or ''}" -2005 -2006 def rollback_sql(self, expression: exp.Rollback) -> str: -2007 savepoint = expression.args.get("savepoint") -2008 savepoint = f" TO {savepoint}" if savepoint else "" -2009 return f"ROLLBACK{savepoint}" -2010 -2011 def altercolumn_sql(self, expression: exp.AlterColumn) -> str: -2012 this = self.sql(expression, "this") -2013 -2014 dtype = self.sql(expression, "dtype") -2015 if dtype: -2016 collate = self.sql(expression, "collate") -2017 collate = f" COLLATE {collate}" if collate else "" -2018 using = self.sql(expression, "using") -2019 using = f" USING {using}" if using else "" -2020 return f"ALTER COLUMN {this} TYPE {dtype}{collate}{using}" -2021 -2022 default = self.sql(expression, "default") -2023 if default: -2024 return f"ALTER COLUMN {this} SET DEFAULT {default}" +1961 def currentdate_sql(self, expression: exp.CurrentDate) -> str: +1962 zone = self.sql(expression, "this") +1963 return f"CURRENT_DATE({zone})" if zone else "CURRENT_DATE" +1964 +1965 def collate_sql(self, expression: exp.Collate) -> str: +1966 return self.binary(expression, "COLLATE") +1967 +1968 def command_sql(self, expression: exp.Command) -> str: +1969 return f"{self.sql(expression, 'this').upper()} {expression.text('expression').strip()}" +1970 +1971 def comment_sql(self, expression: exp.Comment) -> str: +1972 this = self.sql(expression, "this") +1973 kind = expression.args["kind"] +1974 exists_sql = " IF EXISTS " if expression.args.get("exists") else " " +1975 expression_sql = self.sql(expression, "expression") +1976 return f"COMMENT{exists_sql}ON {kind} {this} IS {expression_sql}" +1977 +1978 def mergetreettlaction_sql(self, expression: exp.MergeTreeTTLAction) -> str: +1979 this = self.sql(expression, "this") +1980 delete = " DELETE" if expression.args.get("delete") else "" +1981 recompress = self.sql(expression, "recompress") +1982 recompress = f" RECOMPRESS {recompress}" if recompress else "" +1983 to_disk = self.sql(expression, "to_disk") +1984 to_disk = f" TO DISK {to_disk}" if to_disk else "" +1985 to_volume = self.sql(expression, "to_volume") +1986 to_volume = f" TO VOLUME {to_volume}" if to_volume else "" +1987 return f"{this}{delete}{recompress}{to_disk}{to_volume}" +1988 +1989 def mergetreettl_sql(self, expression: exp.MergeTreeTTL) -> str: +1990 where = self.sql(expression, "where") +1991 group = self.sql(expression, "group") +1992 aggregates = self.expressions(expression, key="aggregates") +1993 aggregates = self.seg("SET") + self.seg(aggregates) if aggregates else "" +1994 +1995 if not (where or group or aggregates) and len(expression.expressions) == 1: +1996 return f"TTL {self.expressions(expression, flat=True)}" +1997 +1998 return f"TTL{self.seg(self.expressions(expression))}{where}{group}{aggregates}" +1999 +2000 def transaction_sql(self, expression: exp.Transaction) -> str: +2001 return "BEGIN" +2002 +2003 def commit_sql(self, expression: exp.Commit) -> str: +2004 chain = expression.args.get("chain") +2005 if chain is not None: +2006 chain = " AND CHAIN" if chain else " AND NO CHAIN" +2007 +2008 return f"COMMIT{chain or ''}" +2009 +2010 def rollback_sql(self, expression: exp.Rollback) -> str: +2011 savepoint = expression.args.get("savepoint") +2012 savepoint = f" TO {savepoint}" if savepoint else "" +2013 return f"ROLLBACK{savepoint}" +2014 +2015 def altercolumn_sql(self, expression: exp.AlterColumn) -> str: +2016 this = self.sql(expression, "this") +2017 +2018 dtype = self.sql(expression, "dtype") +2019 if dtype: +2020 collate = self.sql(expression, "collate") +2021 collate = f" COLLATE {collate}" if collate else "" +2022 using = self.sql(expression, "using") +2023 using = f" USING {using}" if using else "" +2024 return f"ALTER COLUMN {this} TYPE {dtype}{collate}{using}" 2025 -2026 if not expression.args.get("drop"): -2027 self.unsupported("Unsupported ALTER COLUMN syntax") -2028 -2029 return f"ALTER COLUMN {this} DROP DEFAULT" -2030 -2031 def renametable_sql(self, expression: exp.RenameTable) -> str: -2032 if not self.RENAME_TABLE_WITH_DB: -2033 # Remove db from tables -2034 expression = expression.transform( -2035 lambda n: exp.table_(n.this) if isinstance(n, exp.Table) else n -2036 ) -2037 this = self.sql(expression, "this") -2038 return f"RENAME TO {this}" -2039 -2040 def altertable_sql(self, expression: exp.AlterTable) -> str: -2041 actions = expression.args["actions"] -2042 -2043 if isinstance(actions[0], exp.ColumnDef): -2044 actions = self.expressions(expression, key="actions", prefix="ADD COLUMN ") -2045 elif isinstance(actions[0], exp.Schema): -2046 actions = self.expressions(expression, key="actions", prefix="ADD COLUMNS ") -2047 elif isinstance(actions[0], exp.Delete): -2048 actions = self.expressions(expression, key="actions", flat=True) -2049 else: -2050 actions = self.expressions(expression, key="actions") -2051 -2052 exists = " IF EXISTS" if expression.args.get("exists") else "" -2053 return f"ALTER TABLE{exists} {self.sql(expression, 'this')} {actions}" -2054 -2055 def droppartition_sql(self, expression: exp.DropPartition) -> str: -2056 expressions = self.expressions(expression) -2057 exists = " IF EXISTS " if expression.args.get("exists") else " " -2058 return f"DROP{exists}{expressions}" -2059 -2060 def addconstraint_sql(self, expression: exp.AddConstraint) -> str: -2061 this = self.sql(expression, "this") -2062 expression_ = self.sql(expression, "expression") -2063 add_constraint = f"ADD CONSTRAINT {this}" if this else "ADD" -2064 -2065 enforced = expression.args.get("enforced") -2066 if enforced is not None: -2067 return f"{add_constraint} CHECK ({expression_}){' ENFORCED' if enforced else ''}" +2026 default = self.sql(expression, "default") +2027 if default: +2028 return f"ALTER COLUMN {this} SET DEFAULT {default}" +2029 +2030 if not expression.args.get("drop"): +2031 self.unsupported("Unsupported ALTER COLUMN syntax") +2032 +2033 return f"ALTER COLUMN {this} DROP DEFAULT" +2034 +2035 def renametable_sql(self, expression: exp.RenameTable) -> str: +2036 if not self.RENAME_TABLE_WITH_DB: +2037 # Remove db from tables +2038 expression = expression.transform( +2039 lambda n: exp.table_(n.this) if isinstance(n, exp.Table) else n +2040 ) +2041 this = self.sql(expression, "this") +2042 return f"RENAME TO {this}" +2043 +2044 def altertable_sql(self, expression: exp.AlterTable) -> str: +2045 actions = expression.args["actions"] +2046 +2047 if isinstance(actions[0], exp.ColumnDef): +2048 actions = self.expressions(expression, key="actions", prefix="ADD COLUMN ") +2049 elif isinstance(actions[0], exp.Schema): +2050 actions = self.expressions(expression, key="actions", prefix="ADD COLUMNS ") +2051 elif isinstance(actions[0], exp.Delete): +2052 actions = self.expressions(expression, key="actions", flat=True) +2053 else: +2054 actions = self.expressions(expression, key="actions") +2055 +2056 exists = " IF EXISTS" if expression.args.get("exists") else "" +2057 return f"ALTER TABLE{exists} {self.sql(expression, 'this')} {actions}" +2058 +2059 def droppartition_sql(self, expression: exp.DropPartition) -> str: +2060 expressions = self.expressions(expression) +2061 exists = " IF EXISTS " if expression.args.get("exists") else " " +2062 return f"DROP{exists}{expressions}" +2063 +2064 def addconstraint_sql(self, expression: exp.AddConstraint) -> str: +2065 this = self.sql(expression, "this") +2066 expression_ = self.sql(expression, "expression") +2067 add_constraint = f"ADD CONSTRAINT {this}" if this else "ADD" 2068 -2069 return f"{add_constraint} {expression_}" -2070 -2071 def distinct_sql(self, expression: exp.Distinct) -> str: -2072 this = self.expressions(expression, flat=True) -2073 this = f" {this}" if this else "" +2069 enforced = expression.args.get("enforced") +2070 if enforced is not None: +2071 return f"{add_constraint} CHECK ({expression_}){' ENFORCED' if enforced else ''}" +2072 +2073 return f"{add_constraint} {expression_}" 2074 -2075 on = self.sql(expression, "on") -2076 on = f" ON {on}" if on else "" -2077 return f"DISTINCT{this}{on}" +2075 def distinct_sql(self, expression: exp.Distinct) -> str: +2076 this = self.expressions(expression, flat=True) +2077 this = f" {this}" if this else "" 2078 -2079 def ignorenulls_sql(self, expression: exp.IgnoreNulls) -> str: -2080 return f"{self.sql(expression, 'this')} IGNORE NULLS" -2081 -2082 def respectnulls_sql(self, expression: exp.RespectNulls) -> str: -2083 return f"{self.sql(expression, 'this')} RESPECT NULLS" -2084 -2085 def intdiv_sql(self, expression: exp.IntDiv) -> str: -2086 return self.sql( -2087 exp.Cast( -2088 this=exp.Div(this=expression.this, expression=expression.expression), -2089 to=exp.DataType(this=exp.DataType.Type.INT), -2090 ) -2091 ) -2092 -2093 def dpipe_sql(self, expression: exp.DPipe) -> str: -2094 return self.binary(expression, "||") -2095 -2096 def div_sql(self, expression: exp.Div) -> str: -2097 return self.binary(expression, "/") -2098 -2099 def overlaps_sql(self, expression: exp.Overlaps) -> str: -2100 return self.binary(expression, "OVERLAPS") -2101 -2102 def distance_sql(self, expression: exp.Distance) -> str: -2103 return self.binary(expression, "<->") +2079 on = self.sql(expression, "on") +2080 on = f" ON {on}" if on else "" +2081 return f"DISTINCT{this}{on}" +2082 +2083 def ignorenulls_sql(self, expression: exp.IgnoreNulls) -> str: +2084 return f"{self.sql(expression, 'this')} IGNORE NULLS" +2085 +2086 def respectnulls_sql(self, expression: exp.RespectNulls) -> str: +2087 return f"{self.sql(expression, 'this')} RESPECT NULLS" +2088 +2089 def intdiv_sql(self, expression: exp.IntDiv) -> str: +2090 return self.sql( +2091 exp.Cast( +2092 this=exp.Div(this=expression.this, expression=expression.expression), +2093 to=exp.DataType(this=exp.DataType.Type.INT), +2094 ) +2095 ) +2096 +2097 def dpipe_sql(self, expression: exp.DPipe) -> str: +2098 return self.binary(expression, "||") +2099 +2100 def safedpipe_sql(self, expression: exp.SafeDPipe) -> str: +2101 if self.STRICT_STRING_CONCAT: +2102 return self.func("CONCAT", *(exp.cast(e, "text") for e in expression.flatten())) +2103 return self.dpipe_sql(expression) 2104 -2105 def dot_sql(self, expression: exp.Dot) -> str: -2106 return f"{self.sql(expression, 'this')}.{self.sql(expression, 'expression')}" +2105 def div_sql(self, expression: exp.Div) -> str: +2106 return self.binary(expression, "/") 2107 -2108 def eq_sql(self, expression: exp.EQ) -> str: -2109 return self.binary(expression, "=") +2108 def overlaps_sql(self, expression: exp.Overlaps) -> str: +2109 return self.binary(expression, "OVERLAPS") 2110 -2111 def escape_sql(self, expression: exp.Escape) -> str: -2112 return self.binary(expression, "ESCAPE") +2111 def distance_sql(self, expression: exp.Distance) -> str: +2112 return self.binary(expression, "<->") 2113 -2114 def glob_sql(self, expression: exp.Glob) -> str: -2115 return self.binary(expression, "GLOB") +2114 def dot_sql(self, expression: exp.Dot) -> str: +2115 return f"{self.sql(expression, 'this')}.{self.sql(expression, 'expression')}" 2116 -2117 def gt_sql(self, expression: exp.GT) -> str: -2118 return self.binary(expression, ">") +2117 def eq_sql(self, expression: exp.EQ) -> str: +2118 return self.binary(expression, "=") 2119 -2120 def gte_sql(self, expression: exp.GTE) -> str: -2121 return self.binary(expression, ">=") +2120 def escape_sql(self, expression: exp.Escape) -> str: +2121 return self.binary(expression, "ESCAPE") 2122 -2123 def ilike_sql(self, expression: exp.ILike) -> str: -2124 return self.binary(expression, "ILIKE") +2123 def glob_sql(self, expression: exp.Glob) -> str: +2124 return self.binary(expression, "GLOB") 2125 -2126 def ilikeany_sql(self, expression: exp.ILikeAny) -> str: -2127 return self.binary(expression, "ILIKE ANY") +2126 def gt_sql(self, expression: exp.GT) -> str: +2127 return self.binary(expression, ">") 2128 -2129 def is_sql(self, expression: exp.Is) -> str: -2130 return self.binary(expression, "IS") +2129 def gte_sql(self, expression: exp.GTE) -> str: +2130 return self.binary(expression, ">=") 2131 -2132 def like_sql(self, expression: exp.Like) -> str: -2133 return self.binary(expression, "LIKE") +2132 def ilike_sql(self, expression: exp.ILike) -> str: +2133 return self.binary(expression, "ILIKE") 2134 -2135 def likeany_sql(self, expression: exp.LikeAny) -> str: -2136 return self.binary(expression, "LIKE ANY") +2135 def ilikeany_sql(self, expression: exp.ILikeAny) -> str: +2136 return self.binary(expression, "ILIKE ANY") 2137 -2138 def similarto_sql(self, expression: exp.SimilarTo) -> str: -2139 return self.binary(expression, "SIMILAR TO") -2140 -2141 def lt_sql(self, expression: exp.LT) -> str: -2142 return self.binary(expression, "<") -2143 -2144 def lte_sql(self, expression: exp.LTE) -> str: -2145 return self.binary(expression, "<=") -2146 -2147 def mod_sql(self, expression: exp.Mod) -> str: -2148 return self.binary(expression, "%") -2149 -2150 def mul_sql(self, expression: exp.Mul) -> str: -2151 return self.binary(expression, "*") -2152 -2153 def neq_sql(self, expression: exp.NEQ) -> str: -2154 return self.binary(expression, "<>") -2155 -2156 def nullsafeeq_sql(self, expression: exp.NullSafeEQ) -> str: -2157 return self.binary(expression, "IS NOT DISTINCT FROM") -2158 -2159 def nullsafeneq_sql(self, expression: exp.NullSafeNEQ) -> str: -2160 return self.binary(expression, "IS DISTINCT FROM") -2161 -2162 def or_sql(self, expression: exp.Or) -> str: -2163 return self.connector_sql(expression, "OR") -2164 -2165 def slice_sql(self, expression: exp.Slice) -> str: -2166 return self.binary(expression, ":") -2167 -2168 def sub_sql(self, expression: exp.Sub) -> str: -2169 return self.binary(expression, "-") -2170 -2171 def trycast_sql(self, expression: exp.TryCast) -> str: -2172 return f"TRY_CAST({self.sql(expression, 'this')} AS {self.sql(expression, 'to')})" -2173 -2174 def use_sql(self, expression: exp.Use) -> str: -2175 kind = self.sql(expression, "kind") -2176 kind = f" {kind}" if kind else "" -2177 this = self.sql(expression, "this") -2178 this = f" {this}" if this else "" -2179 return f"USE{kind}{this}" +2138 def is_sql(self, expression: exp.Is) -> str: +2139 if not self.IS_BOOL_ALLOWED and isinstance(expression.expression, exp.Boolean): +2140 return self.sql( +2141 expression.this if expression.expression.this else exp.not_(expression.this) +2142 ) +2143 return self.binary(expression, "IS") +2144 +2145 def like_sql(self, expression: exp.Like) -> str: +2146 return self.binary(expression, "LIKE") +2147 +2148 def likeany_sql(self, expression: exp.LikeAny) -> str: +2149 return self.binary(expression, "LIKE ANY") +2150 +2151 def similarto_sql(self, expression: exp.SimilarTo) -> str: +2152 return self.binary(expression, "SIMILAR TO") +2153 +2154 def lt_sql(self, expression: exp.LT) -> str: +2155 return self.binary(expression, "<") +2156 +2157 def lte_sql(self, expression: exp.LTE) -> str: +2158 return self.binary(expression, "<=") +2159 +2160 def mod_sql(self, expression: exp.Mod) -> str: +2161 return self.binary(expression, "%") +2162 +2163 def mul_sql(self, expression: exp.Mul) -> str: +2164 return self.binary(expression, "*") +2165 +2166 def neq_sql(self, expression: exp.NEQ) -> str: +2167 return self.binary(expression, "<>") +2168 +2169 def nullsafeeq_sql(self, expression: exp.NullSafeEQ) -> str: +2170 return self.binary(expression, "IS NOT DISTINCT FROM") +2171 +2172 def nullsafeneq_sql(self, expression: exp.NullSafeNEQ) -> str: +2173 return self.binary(expression, "IS DISTINCT FROM") +2174 +2175 def or_sql(self, expression: exp.Or) -> str: +2176 return self.connector_sql(expression, "OR") +2177 +2178 def slice_sql(self, expression: exp.Slice) -> str: +2179 return self.binary(expression, ":") 2180 -2181 def binary(self, expression: exp.Binary, op: str) -> str: -2182 op = self.maybe_comment(op, comments=expression.comments) -2183 return f"{self.sql(expression, 'this')} {op} {self.sql(expression, 'expression')}" -2184 -2185 def function_fallback_sql(self, expression: exp.Func) -> str: -2186 args = [] -2187 for arg_value in expression.args.values(): -2188 if isinstance(arg_value, list): -2189 for value in arg_value: -2190 args.append(value) -2191 else: -2192 args.append(arg_value) +2181 def sub_sql(self, expression: exp.Sub) -> str: +2182 return self.binary(expression, "-") +2183 +2184 def trycast_sql(self, expression: exp.TryCast) -> str: +2185 return f"TRY_CAST({self.sql(expression, 'this')} AS {self.sql(expression, 'to')})" +2186 +2187 def use_sql(self, expression: exp.Use) -> str: +2188 kind = self.sql(expression, "kind") +2189 kind = f" {kind}" if kind else "" +2190 this = self.sql(expression, "this") +2191 this = f" {this}" if this else "" +2192 return f"USE{kind}{this}" 2193 -2194 return self.func(expression.sql_name(), *args) -2195 -2196 def func(self, name: str, *args: t.Optional[exp.Expression | str]) -> str: -2197 return f"{self.normalize_func(name)}({self.format_args(*args)})" -2198 -2199 def format_args(self, *args: t.Optional[str | exp.Expression]) -> str: -2200 arg_sqls = tuple(self.sql(arg) for arg in args if arg is not None) -2201 if self.pretty and self.text_width(arg_sqls) > self._max_text_width: -2202 return self.indent("\n" + f",\n".join(arg_sqls) + "\n", skip_first=True, skip_last=True) -2203 return ", ".join(arg_sqls) -2204 -2205 def text_width(self, args: t.Iterable) -> int: -2206 return sum(len(arg) for arg in args) -2207 -2208 def format_time(self, expression: exp.Expression) -> t.Optional[str]: -2209 return format_time(self.sql(expression, "format"), self.time_mapping, self.time_trie) -2210 -2211 def expressions( -2212 self, -2213 expression: t.Optional[exp.Expression] = None, -2214 key: t.Optional[str] = None, -2215 sqls: t.Optional[t.List[str]] = None, -2216 flat: bool = False, -2217 indent: bool = True, -2218 sep: str = ", ", -2219 prefix: str = "", -2220 ) -> str: -2221 expressions = expression.args.get(key or "expressions") if expression else sqls -2222 -2223 if not expressions: -2224 return "" -2225 -2226 if flat: -2227 return sep.join(self.sql(e) for e in expressions) -2228 -2229 num_sqls = len(expressions) -2230 -2231 # These are calculated once in case we have the leading_comma / pretty option set, correspondingly -2232 pad = " " * self.pad -2233 stripped_sep = sep.strip() -2234 -2235 result_sqls = [] -2236 for i, e in enumerate(expressions): -2237 sql = self.sql(e, comment=False) -2238 comments = self.maybe_comment("", e) if isinstance(e, exp.Expression) else "" -2239 -2240 if self.pretty: -2241 if self._leading_comma: -2242 result_sqls.append(f"{sep if i > 0 else pad}{prefix}{sql}{comments}") -2243 else: -2244 result_sqls.append( -2245 f"{prefix}{sql}{stripped_sep if i + 1 < num_sqls else ''}{comments}" -2246 ) -2247 else: -2248 result_sqls.append(f"{prefix}{sql}{comments}{sep if i + 1 < num_sqls else ''}") +2194 def binary(self, expression: exp.Binary, op: str) -> str: +2195 op = self.maybe_comment(op, comments=expression.comments) +2196 return f"{self.sql(expression, 'this')} {op} {self.sql(expression, 'expression')}" +2197 +2198 def function_fallback_sql(self, expression: exp.Func) -> str: +2199 args = [] +2200 for arg_value in expression.args.values(): +2201 if isinstance(arg_value, list): +2202 for value in arg_value: +2203 args.append(value) +2204 else: +2205 args.append(arg_value) +2206 +2207 return self.func(expression.sql_name(), *args) +2208 +2209 def func( +2210 self, +2211 name: str, +2212 *args: t.Optional[exp.Expression | str], +2213 prefix: str = "(", +2214 suffix: str = ")", +2215 ) -> str: +2216 return f"{self.normalize_func(name)}{prefix}{self.format_args(*args)}{suffix}" +2217 +2218 def format_args(self, *args: t.Optional[str | exp.Expression]) -> str: +2219 arg_sqls = tuple(self.sql(arg) for arg in args if arg is not None) +2220 if self.pretty and self.text_width(arg_sqls) > self.max_text_width: +2221 return self.indent("\n" + f",\n".join(arg_sqls) + "\n", skip_first=True, skip_last=True) +2222 return ", ".join(arg_sqls) +2223 +2224 def text_width(self, args: t.Iterable) -> int: +2225 return sum(len(arg) for arg in args) +2226 +2227 def format_time(self, expression: exp.Expression) -> t.Optional[str]: +2228 return format_time( +2229 self.sql(expression, "format"), self.INVERSE_TIME_MAPPING, self.INVERSE_TIME_TRIE +2230 ) +2231 +2232 def expressions( +2233 self, +2234 expression: t.Optional[exp.Expression] = None, +2235 key: t.Optional[str] = None, +2236 sqls: t.Optional[t.List[str]] = None, +2237 flat: bool = False, +2238 indent: bool = True, +2239 sep: str = ", ", +2240 prefix: str = "", +2241 ) -> str: +2242 expressions = expression.args.get(key or "expressions") if expression else sqls +2243 +2244 if not expressions: +2245 return "" +2246 +2247 if flat: +2248 return sep.join(self.sql(e) for e in expressions) 2249 -2250 result_sql = "\n".join(result_sqls) if self.pretty else "".join(result_sqls) -2251 return self.indent(result_sql, skip_first=False) if indent else result_sql -2252 -2253 def op_expressions(self, op: str, expression: exp.Expression, flat: bool = False) -> str: -2254 flat = flat or isinstance(expression.parent, exp.Properties) -2255 expressions_sql = self.expressions(expression, flat=flat) -2256 if flat: -2257 return f"{op} {expressions_sql}" -2258 return f"{self.seg(op)}{self.sep() if expressions_sql else ''}{expressions_sql}" -2259 -2260 def naked_property(self, expression: exp.Property) -> str: -2261 property_name = exp.Properties.PROPERTY_TO_NAME.get(expression.__class__) -2262 if not property_name: -2263 self.unsupported(f"Unsupported property {expression.__class__.__name__}") -2264 return f"{property_name} {self.sql(expression, 'this')}" -2265 -2266 def set_operation(self, expression: exp.Expression, op: str) -> str: -2267 this = self.sql(expression, "this") -2268 op = self.seg(op) -2269 return self.query_modifiers( -2270 expression, f"{this}{op}{self.sep()}{self.sql(expression, 'expression')}" -2271 ) -2272 -2273 def tag_sql(self, expression: exp.Tag) -> str: -2274 return f"{expression.args.get('prefix')}{self.sql(expression.this)}{expression.args.get('postfix')}" -2275 -2276 def token_sql(self, token_type: TokenType) -> str: -2277 return self.TOKEN_MAPPING.get(token_type, token_type.name) -2278 -2279 def userdefinedfunction_sql(self, expression: exp.UserDefinedFunction) -> str: -2280 this = self.sql(expression, "this") -2281 expressions = self.no_identify(self.expressions, expression) -2282 expressions = ( -2283 self.wrap(expressions) if expression.args.get("wrapped") else f" {expressions}" -2284 ) -2285 return f"{this}{expressions}" +2250 num_sqls = len(expressions) +2251 +2252 # These are calculated once in case we have the leading_comma / pretty option set, correspondingly +2253 pad = " " * self.pad +2254 stripped_sep = sep.strip() +2255 +2256 result_sqls = [] +2257 for i, e in enumerate(expressions): +2258 sql = self.sql(e, comment=False) +2259 comments = self.maybe_comment("", e) if isinstance(e, exp.Expression) else "" +2260 +2261 if self.pretty: +2262 if self.leading_comma: +2263 result_sqls.append(f"{sep if i > 0 else pad}{prefix}{sql}{comments}") +2264 else: +2265 result_sqls.append( +2266 f"{prefix}{sql}{stripped_sep if i + 1 < num_sqls else ''}{comments}" +2267 ) +2268 else: +2269 result_sqls.append(f"{prefix}{sql}{comments}{sep if i + 1 < num_sqls else ''}") +2270 +2271 result_sql = "\n".join(result_sqls) if self.pretty else "".join(result_sqls) +2272 return self.indent(result_sql, skip_first=False) if indent else result_sql +2273 +2274 def op_expressions(self, op: str, expression: exp.Expression, flat: bool = False) -> str: +2275 flat = flat or isinstance(expression.parent, exp.Properties) +2276 expressions_sql = self.expressions(expression, flat=flat) +2277 if flat: +2278 return f"{op} {expressions_sql}" +2279 return f"{self.seg(op)}{self.sep() if expressions_sql else ''}{expressions_sql}" +2280 +2281 def naked_property(self, expression: exp.Property) -> str: +2282 property_name = exp.Properties.PROPERTY_TO_NAME.get(expression.__class__) +2283 if not property_name: +2284 self.unsupported(f"Unsupported property {expression.__class__.__name__}") +2285 return f"{property_name} {self.sql(expression, 'this')}" 2286 -2287 def joinhint_sql(self, expression: exp.JoinHint) -> str: +2287 def set_operation(self, expression: exp.Expression, op: str) -> str: 2288 this = self.sql(expression, "this") -2289 expressions = self.expressions(expression, flat=True) -2290 return f"{this}({expressions})" -2291 -2292 def kwarg_sql(self, expression: exp.Kwarg) -> str: -2293 return self.binary(expression, "=>") -2294 -2295 def when_sql(self, expression: exp.When) -> str: -2296 matched = "MATCHED" if expression.args["matched"] else "NOT MATCHED" -2297 source = " BY SOURCE" if self.MATCHED_BY_SOURCE and expression.args.get("source") else "" -2298 condition = self.sql(expression, "condition") -2299 condition = f" AND {condition}" if condition else "" -2300 -2301 then_expression = expression.args.get("then") -2302 if isinstance(then_expression, exp.Insert): -2303 then = f"INSERT {self.sql(then_expression, 'this')}" -2304 if "expression" in then_expression.args: -2305 then += f" VALUES {self.sql(then_expression, 'expression')}" -2306 elif isinstance(then_expression, exp.Update): -2307 if isinstance(then_expression.args.get("expressions"), exp.Star): -2308 then = f"UPDATE {self.sql(then_expression, 'expressions')}" -2309 else: -2310 then = f"UPDATE SET {self.expressions(then_expression, flat=True)}" -2311 else: -2312 then = self.sql(then_expression) -2313 return f"WHEN {matched}{source}{condition} THEN {then}" -2314 -2315 def merge_sql(self, expression: exp.Merge) -> str: -2316 this = self.sql(expression, "this") -2317 using = f"USING {self.sql(expression, 'using')}" -2318 on = f"ON {self.sql(expression, 'on')}" -2319 return f"MERGE INTO {this} {using} {on} {self.expressions(expression, sep=' ')}" -2320 -2321 def tochar_sql(self, expression: exp.ToChar) -> str: -2322 if expression.args.get("format"): -2323 self.unsupported("Format argument unsupported for TO_CHAR/TO_VARCHAR function") -2324 -2325 return self.sql(exp.cast(expression.this, "text")) -2326 -2327 def dictproperty_sql(self, expression: exp.DictProperty) -> str: -2328 this = self.sql(expression, "this") -2329 kind = self.sql(expression, "kind") -2330 settings_sql = self.expressions(expression, key="settings", sep=" ") -2331 args = f"({self.sep('')}{settings_sql}{self.seg(')', sep='')}" if settings_sql else "()" -2332 return f"{this}({kind}{args})" -2333 -2334 def dictrange_sql(self, expression: exp.DictRange) -> str: -2335 this = self.sql(expression, "this") -2336 max = self.sql(expression, "max") -2337 min = self.sql(expression, "min") -2338 return f"{this}(MIN {min} MAX {max})" -2339 -2340 def dictsubproperty_sql(self, expression: exp.DictSubProperty) -> str: -2341 return f"{self.sql(expression, 'this')} {self.sql(expression, 'value')}" -2342 -2343 -2344def cached_generator( -2345 cache: t.Optional[t.Dict[int, str]] = None -2346) -> t.Callable[[exp.Expression], str]: -2347 """Returns a cached generator.""" -2348 cache = {} if cache is None else cache -2349 generator = Generator(normalize=True, identify="safe") -2350 return lambda e: generator.generate(e, cache) +2289 op = self.seg(op) +2290 return self.query_modifiers( +2291 expression, f"{this}{op}{self.sep()}{self.sql(expression, 'expression')}" +2292 ) +2293 +2294 def tag_sql(self, expression: exp.Tag) -> str: +2295 return f"{expression.args.get('prefix')}{self.sql(expression.this)}{expression.args.get('postfix')}" +2296 +2297 def token_sql(self, token_type: TokenType) -> str: +2298 return self.TOKEN_MAPPING.get(token_type, token_type.name) +2299 +2300 def userdefinedfunction_sql(self, expression: exp.UserDefinedFunction) -> str: +2301 this = self.sql(expression, "this") +2302 expressions = self.no_identify(self.expressions, expression) +2303 expressions = ( +2304 self.wrap(expressions) if expression.args.get("wrapped") else f" {expressions}" +2305 ) +2306 return f"{this}{expressions}" +2307 +2308 def joinhint_sql(self, expression: exp.JoinHint) -> str: +2309 this = self.sql(expression, "this") +2310 expressions = self.expressions(expression, flat=True) +2311 return f"{this}({expressions})" +2312 +2313 def kwarg_sql(self, expression: exp.Kwarg) -> str: +2314 return self.binary(expression, "=>") +2315 +2316 def when_sql(self, expression: exp.When) -> str: +2317 matched = "MATCHED" if expression.args["matched"] else "NOT MATCHED" +2318 source = " BY SOURCE" if self.MATCHED_BY_SOURCE and expression.args.get("source") else "" +2319 condition = self.sql(expression, "condition") +2320 condition = f" AND {condition}" if condition else "" +2321 +2322 then_expression = expression.args.get("then") +2323 if isinstance(then_expression, exp.Insert): +2324 then = f"INSERT {self.sql(then_expression, 'this')}" +2325 if "expression" in then_expression.args: +2326 then += f" VALUES {self.sql(then_expression, 'expression')}" +2327 elif isinstance(then_expression, exp.Update): +2328 if isinstance(then_expression.args.get("expressions"), exp.Star): +2329 then = f"UPDATE {self.sql(then_expression, 'expressions')}" +2330 else: +2331 then = f"UPDATE SET {self.expressions(then_expression, flat=True)}" +2332 else: +2333 then = self.sql(then_expression) +2334 return f"WHEN {matched}{source}{condition} THEN {then}" +2335 +2336 def merge_sql(self, expression: exp.Merge) -> str: +2337 this = self.sql(expression, "this") +2338 using = f"USING {self.sql(expression, 'using')}" +2339 on = f"ON {self.sql(expression, 'on')}" +2340 return f"MERGE INTO {this} {using} {on} {self.expressions(expression, sep=' ')}" +2341 +2342 def tochar_sql(self, expression: exp.ToChar) -> str: +2343 if expression.args.get("format"): +2344 self.unsupported("Format argument unsupported for TO_CHAR/TO_VARCHAR function") +2345 +2346 return self.sql(exp.cast(expression.this, "text")) +2347 +2348 def dictproperty_sql(self, expression: exp.DictProperty) -> str: +2349 this = self.sql(expression, "this") +2350 kind = self.sql(expression, "kind") +2351 settings_sql = self.expressions(expression, key="settings", sep=" ") +2352 args = f"({self.sep('')}{settings_sql}{self.seg(')', sep='')}" if settings_sql else "()" +2353 return f"{this}({kind}{args})" +2354 +2355 def dictrange_sql(self, expression: exp.DictRange) -> str: +2356 this = self.sql(expression, "this") +2357 max = self.sql(expression, "max") +2358 min = self.sql(expression, "min") +2359 return f"{this}(MIN {min} MAX {max})" +2360 +2361 def dictsubproperty_sql(self, expression: exp.DictSubProperty) -> str: +2362 return f"{self.sql(expression, 'this')} {self.sql(expression, 'value')}" +2363 +2364 def oncluster_sql(self, expression: exp.OnCluster) -> str: +2365 return "" +2366 +2367 +2368def cached_generator( +2369 cache: t.Optional[t.Dict[int, str]] = None +2370) -> t.Callable[[exp.Expression], str]: +2371 """Returns a cached generator.""" +2372 cache = {} if cache is None else cache +2373 generator = Generator(normalize=True, identify="safe") +2374 return lambda e: generator.generate(e, cache)
    @@ -3131,1791 +3170,1791 @@
      16class Generator:
       17    """
    -  18    Generator interprets the given syntax tree and produces a SQL string as an output.
    +  18    Generator converts a given syntax tree to the corresponding SQL string.
       19
       20    Args:
    -  21        time_mapping (dict): the dictionary of custom time mappings in which the key
    -  22            represents a python time format and the output the target time format
    -  23        time_trie (trie): a trie of the time_mapping keys
    -  24        pretty (bool): if set to True the returned string will be formatted. Default: False.
    -  25        quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
    -  26        quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
    -  27        identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
    -  28        identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
    -  29        bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
    -  30        bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
    -  31        hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
    -  32        hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
    -  33        byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
    -  34        byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
    -  35        raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
    -  36        raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
    -  37        identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
    -  38        normalize (bool): if set to True all identifiers will lower cased
    -  39        string_escape (str): specifies a string escape character. Default: '.
    -  40        identifier_escape (str): specifies an identifier escape character. Default: ".
    -  41        pad (int): determines padding in a formatted string. Default: 2.
    -  42        indent (int): determines the size of indentation in a formatted string. Default: 4.
    -  43        unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
    -  44        normalize_functions (str): normalize function names, "upper", "lower", or None
    -  45            Default: "upper"
    -  46        alias_post_tablesample (bool): if the table alias comes after tablesample
    -  47            Default: False
    -  48        identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit
    -  49            Default: False
    -  50        unsupported_level (ErrorLevel): determines the generator's behavior when it encounters
    -  51            unsupported expressions. Default ErrorLevel.WARN.
    -  52        null_ordering (str): Indicates the default null ordering method to use if not explicitly set.
    -  53            Options are "nulls_are_small", "nulls_are_large", "nulls_are_last".
    -  54            Default: "nulls_are_small"
    -  55        max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.
    -  56            This is only relevant if unsupported_level is ErrorLevel.RAISE.
    -  57            Default: 3
    -  58        leading_comma (bool): if the the comma is leading or trailing in select statements
    -  59            Default: False
    -  60        max_text_width: The max number of characters in a segment before creating new lines in pretty mode.
    -  61            The default is on the smaller end because the length only represents a segment and not the true
    -  62            line length.
    -  63            Default: 80
    -  64        comments: Whether or not to preserve comments in the output SQL code.
    -  65            Default: True
    -  66    """
    -  67
    -  68    TRANSFORMS = {
    -  69        exp.DateAdd: lambda self, e: self.func(
    -  70            "DATE_ADD", e.this, e.expression, exp.Literal.string(e.text("unit"))
    -  71        ),
    -  72        exp.TsOrDsAdd: lambda self, e: self.func(
    -  73            "TS_OR_DS_ADD", e.this, e.expression, exp.Literal.string(e.text("unit"))
    -  74        ),
    -  75        exp.VarMap: lambda self, e: self.func("MAP", e.args["keys"], e.args["values"]),
    -  76        exp.CharacterSetProperty: lambda self, e: f"{'DEFAULT ' if e.args.get('default') else ''}CHARACTER SET={self.sql(e, 'this')}",
    -  77        exp.ExecuteAsProperty: lambda self, e: self.naked_property(e),
    -  78        exp.ExternalProperty: lambda self, e: "EXTERNAL",
    -  79        exp.LanguageProperty: lambda self, e: self.naked_property(e),
    -  80        exp.LocationProperty: lambda self, e: self.naked_property(e),
    -  81        exp.LogProperty: lambda self, e: f"{'NO ' if e.args.get('no') else ''}LOG",
    -  82        exp.MaterializedProperty: lambda self, e: "MATERIALIZED",
    -  83        exp.NoPrimaryIndexProperty: lambda self, e: "NO PRIMARY INDEX",
    -  84        exp.OnCommitProperty: lambda self, e: f"ON COMMIT {'DELETE' if e.args.get('delete') else 'PRESERVE'} ROWS",
    -  85        exp.ReturnsProperty: lambda self, e: self.naked_property(e),
    -  86        exp.SetProperty: lambda self, e: f"{'MULTI' if e.args.get('multi') else ''}SET",
    -  87        exp.SettingsProperty: lambda self, e: f"SETTINGS{self.seg('')}{(self.expressions(e))}",
    -  88        exp.SqlSecurityProperty: lambda self, e: f"SQL SECURITY {'DEFINER' if e.args.get('definer') else 'INVOKER'}",
    -  89        exp.TemporaryProperty: lambda self, e: f"TEMPORARY",
    -  90        exp.TransientProperty: lambda self, e: "TRANSIENT",
    -  91        exp.StabilityProperty: lambda self, e: e.name,
    -  92        exp.VolatileProperty: lambda self, e: "VOLATILE",
    -  93        exp.WithJournalTableProperty: lambda self, e: f"WITH JOURNAL TABLE={self.sql(e, 'this')}",
    -  94        exp.CaseSpecificColumnConstraint: lambda self, e: f"{'NOT ' if e.args.get('not_') else ''}CASESPECIFIC",
    -  95        exp.CharacterSetColumnConstraint: lambda self, e: f"CHARACTER SET {self.sql(e, 'this')}",
    -  96        exp.DateFormatColumnConstraint: lambda self, e: f"FORMAT {self.sql(e, 'this')}",
    -  97        exp.OnUpdateColumnConstraint: lambda self, e: f"ON UPDATE {self.sql(e, 'this')}",
    -  98        exp.UppercaseColumnConstraint: lambda self, e: f"UPPERCASE",
    -  99        exp.TitleColumnConstraint: lambda self, e: f"TITLE {self.sql(e, 'this')}",
    - 100        exp.PathColumnConstraint: lambda self, e: f"PATH {self.sql(e, 'this')}",
    - 101        exp.CheckColumnConstraint: lambda self, e: f"CHECK ({self.sql(e, 'this')})",
    - 102        exp.CommentColumnConstraint: lambda self, e: f"COMMENT {self.sql(e, 'this')}",
    - 103        exp.CollateColumnConstraint: lambda self, e: f"COLLATE {self.sql(e, 'this')}",
    - 104        exp.EncodeColumnConstraint: lambda self, e: f"ENCODE {self.sql(e, 'this')}",
    - 105        exp.DefaultColumnConstraint: lambda self, e: f"DEFAULT {self.sql(e, 'this')}",
    - 106        exp.InlineLengthColumnConstraint: lambda self, e: f"INLINE LENGTH {self.sql(e, 'this')}",
    - 107    }
    - 108
    - 109    # Whether or not null ordering is supported in order by
    - 110    NULL_ORDERING_SUPPORTED = True
    - 111
    - 112    # Whether or not locking reads (i.e. SELECT ... FOR UPDATE/SHARE) are supported
    - 113    LOCKING_READS_SUPPORTED = False
    - 114
    - 115    # Always do union distinct or union all
    - 116    EXPLICIT_UNION = False
    - 117
    - 118    # Wrap derived values in parens, usually standard but spark doesn't support it
    - 119    WRAP_DERIVED_VALUES = True
    - 120
    - 121    # Whether or not create function uses an AS before the RETURN
    - 122    CREATE_FUNCTION_RETURN_AS = True
    - 123
    - 124    # Whether or not MERGE ... WHEN MATCHED BY SOURCE is allowed
    - 125    MATCHED_BY_SOURCE = True
    - 126
    - 127    # Whether or not the INTERVAL expression works only with values like '1 day'
    - 128    SINGLE_STRING_INTERVAL = False
    - 129
    - 130    # Whether or not the plural form of date parts like day (i.e. "days") is supported in INTERVALs
    - 131    INTERVAL_ALLOWS_PLURAL_FORM = True
    - 132
    - 133    # Whether or not the TABLESAMPLE clause supports a method name, like BERNOULLI
    - 134    TABLESAMPLE_WITH_METHOD = True
    - 135
    - 136    # Whether or not to treat the number in TABLESAMPLE (50) as a percentage
    - 137    TABLESAMPLE_SIZE_IS_PERCENT = False
    - 138
    - 139    # Whether or not limit and fetch are supported (possible values: "ALL", "LIMIT", "FETCH")
    - 140    LIMIT_FETCH = "ALL"
    - 141
    - 142    # Whether a table is allowed to be renamed with a db
    - 143    RENAME_TABLE_WITH_DB = True
    - 144
    - 145    # The separator for grouping sets and rollups
    - 146    GROUPINGS_SEP = ","
    - 147
    - 148    # The string used for creating index on a table
    - 149    INDEX_ON = "ON"
    - 150
    - 151    TYPE_MAPPING = {
    - 152        exp.DataType.Type.NCHAR: "CHAR",
    - 153        exp.DataType.Type.NVARCHAR: "VARCHAR",
    - 154        exp.DataType.Type.MEDIUMTEXT: "TEXT",
    - 155        exp.DataType.Type.LONGTEXT: "TEXT",
    - 156        exp.DataType.Type.MEDIUMBLOB: "BLOB",
    - 157        exp.DataType.Type.LONGBLOB: "BLOB",
    - 158        exp.DataType.Type.INET: "INET",
    +  21        pretty: Whether or not to format the produced SQL string.
    +  22            Default: False.
    +  23        identify: Determines when an identifier should be quoted. Possible values are:
    +  24            False (default): Never quote, except in cases where it's mandatory by the dialect.
    +  25            True or 'always': Always quote.
    +  26            'safe': Only quote identifiers that are case insensitive.
    +  27        normalize: Whether or not to normalize identifiers to lowercase.
    +  28            Default: False.
    +  29        pad: Determines the pad size in a formatted string.
    +  30            Default: 2.
    +  31        indent: Determines the indentation size in a formatted string.
    +  32            Default: 2.
    +  33        normalize_functions: Whether or not to normalize all function names. Possible values are:
    +  34            "upper" or True (default): Convert names to uppercase.
    +  35            "lower": Convert names to lowercase.
    +  36            False: Disables function name normalization.
    +  37        unsupported_level: Determines the generator's behavior when it encounters unsupported expressions.
    +  38            Default ErrorLevel.WARN.
    +  39        max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError.
    +  40            This is only relevant if unsupported_level is ErrorLevel.RAISE.
    +  41            Default: 3
    +  42        leading_comma: Determines whether or not the comma is leading or trailing in select expressions.
    +  43            This is only relevant when generating in pretty mode.
    +  44            Default: False
    +  45        max_text_width: The max number of characters in a segment before creating new lines in pretty mode.
    +  46            The default is on the smaller end because the length only represents a segment and not the true
    +  47            line length.
    +  48            Default: 80
    +  49        comments: Whether or not to preserve comments in the output SQL code.
    +  50            Default: True
    +  51    """
    +  52
    +  53    TRANSFORMS = {
    +  54        exp.DateAdd: lambda self, e: self.func(
    +  55            "DATE_ADD", e.this, e.expression, exp.Literal.string(e.text("unit"))
    +  56        ),
    +  57        exp.TsOrDsAdd: lambda self, e: self.func(
    +  58            "TS_OR_DS_ADD", e.this, e.expression, exp.Literal.string(e.text("unit"))
    +  59        ),
    +  60        exp.VarMap: lambda self, e: self.func("MAP", e.args["keys"], e.args["values"]),
    +  61        exp.CharacterSetProperty: lambda self, e: f"{'DEFAULT ' if e.args.get('default') else ''}CHARACTER SET={self.sql(e, 'this')}",
    +  62        exp.ExecuteAsProperty: lambda self, e: self.naked_property(e),
    +  63        exp.ExternalProperty: lambda self, e: "EXTERNAL",
    +  64        exp.LanguageProperty: lambda self, e: self.naked_property(e),
    +  65        exp.LocationProperty: lambda self, e: self.naked_property(e),
    +  66        exp.LogProperty: lambda self, e: f"{'NO ' if e.args.get('no') else ''}LOG",
    +  67        exp.MaterializedProperty: lambda self, e: "MATERIALIZED",
    +  68        exp.NoPrimaryIndexProperty: lambda self, e: "NO PRIMARY INDEX",
    +  69        exp.OnCommitProperty: lambda self, e: f"ON COMMIT {'DELETE' if e.args.get('delete') else 'PRESERVE'} ROWS",
    +  70        exp.ReturnsProperty: lambda self, e: self.naked_property(e),
    +  71        exp.SetProperty: lambda self, e: f"{'MULTI' if e.args.get('multi') else ''}SET",
    +  72        exp.SettingsProperty: lambda self, e: f"SETTINGS{self.seg('')}{(self.expressions(e))}",
    +  73        exp.SqlSecurityProperty: lambda self, e: f"SQL SECURITY {'DEFINER' if e.args.get('definer') else 'INVOKER'}",
    +  74        exp.TemporaryProperty: lambda self, e: f"TEMPORARY",
    +  75        exp.ToTableProperty: lambda self, e: f"TO {self.sql(e.this)}",
    +  76        exp.TransientProperty: lambda self, e: "TRANSIENT",
    +  77        exp.StabilityProperty: lambda self, e: e.name,
    +  78        exp.VolatileProperty: lambda self, e: "VOLATILE",
    +  79        exp.WithJournalTableProperty: lambda self, e: f"WITH JOURNAL TABLE={self.sql(e, 'this')}",
    +  80        exp.CaseSpecificColumnConstraint: lambda self, e: f"{'NOT ' if e.args.get('not_') else ''}CASESPECIFIC",
    +  81        exp.CharacterSetColumnConstraint: lambda self, e: f"CHARACTER SET {self.sql(e, 'this')}",
    +  82        exp.DateFormatColumnConstraint: lambda self, e: f"FORMAT {self.sql(e, 'this')}",
    +  83        exp.OnUpdateColumnConstraint: lambda self, e: f"ON UPDATE {self.sql(e, 'this')}",
    +  84        exp.UppercaseColumnConstraint: lambda self, e: f"UPPERCASE",
    +  85        exp.TitleColumnConstraint: lambda self, e: f"TITLE {self.sql(e, 'this')}",
    +  86        exp.PathColumnConstraint: lambda self, e: f"PATH {self.sql(e, 'this')}",
    +  87        exp.CheckColumnConstraint: lambda self, e: f"CHECK ({self.sql(e, 'this')})",
    +  88        exp.CommentColumnConstraint: lambda self, e: f"COMMENT {self.sql(e, 'this')}",
    +  89        exp.CollateColumnConstraint: lambda self, e: f"COLLATE {self.sql(e, 'this')}",
    +  90        exp.EncodeColumnConstraint: lambda self, e: f"ENCODE {self.sql(e, 'this')}",
    +  91        exp.DefaultColumnConstraint: lambda self, e: f"DEFAULT {self.sql(e, 'this')}",
    +  92        exp.InlineLengthColumnConstraint: lambda self, e: f"INLINE LENGTH {self.sql(e, 'this')}",
    +  93    }
    +  94
    +  95    # Whether or not null ordering is supported in order by
    +  96    NULL_ORDERING_SUPPORTED = True
    +  97
    +  98    # Whether or not locking reads (i.e. SELECT ... FOR UPDATE/SHARE) are supported
    +  99    LOCKING_READS_SUPPORTED = False
    + 100
    + 101    # Always do union distinct or union all
    + 102    EXPLICIT_UNION = False
    + 103
    + 104    # Wrap derived values in parens, usually standard but spark doesn't support it
    + 105    WRAP_DERIVED_VALUES = True
    + 106
    + 107    # Whether or not create function uses an AS before the RETURN
    + 108    CREATE_FUNCTION_RETURN_AS = True
    + 109
    + 110    # Whether or not MERGE ... WHEN MATCHED BY SOURCE is allowed
    + 111    MATCHED_BY_SOURCE = True
    + 112
    + 113    # Whether or not the INTERVAL expression works only with values like '1 day'
    + 114    SINGLE_STRING_INTERVAL = False
    + 115
    + 116    # Whether or not the plural form of date parts like day (i.e. "days") is supported in INTERVALs
    + 117    INTERVAL_ALLOWS_PLURAL_FORM = True
    + 118
    + 119    # Whether or not the TABLESAMPLE clause supports a method name, like BERNOULLI
    + 120    TABLESAMPLE_WITH_METHOD = True
    + 121
    + 122    # Whether or not to treat the number in TABLESAMPLE (50) as a percentage
    + 123    TABLESAMPLE_SIZE_IS_PERCENT = False
    + 124
    + 125    # Whether or not limit and fetch are supported (possible values: "ALL", "LIMIT", "FETCH")
    + 126    LIMIT_FETCH = "ALL"
    + 127
    + 128    # Whether or not a table is allowed to be renamed with a db
    + 129    RENAME_TABLE_WITH_DB = True
    + 130
    + 131    # The separator for grouping sets and rollups
    + 132    GROUPINGS_SEP = ","
    + 133
    + 134    # The string used for creating an index on a table
    + 135    INDEX_ON = "ON"
    + 136
    + 137    # Whether or not join hints should be generated
    + 138    JOIN_HINTS = True
    + 139
    + 140    # Whether or not table hints should be generated
    + 141    TABLE_HINTS = True
    + 142
    + 143    # Whether or not comparing against booleans (e.g. x IS TRUE) is supported
    + 144    IS_BOOL_ALLOWED = True
    + 145
    + 146    TYPE_MAPPING = {
    + 147        exp.DataType.Type.NCHAR: "CHAR",
    + 148        exp.DataType.Type.NVARCHAR: "VARCHAR",
    + 149        exp.DataType.Type.MEDIUMTEXT: "TEXT",
    + 150        exp.DataType.Type.LONGTEXT: "TEXT",
    + 151        exp.DataType.Type.MEDIUMBLOB: "BLOB",
    + 152        exp.DataType.Type.LONGBLOB: "BLOB",
    + 153        exp.DataType.Type.INET: "INET",
    + 154    }
    + 155
    + 156    STAR_MAPPING = {
    + 157        "except": "EXCEPT",
    + 158        "replace": "REPLACE",
      159    }
      160
    - 161    STAR_MAPPING = {
    - 162        "except": "EXCEPT",
    - 163        "replace": "REPLACE",
    - 164    }
    - 165
    - 166    TIME_PART_SINGULARS = {
    - 167        "microseconds": "microsecond",
    - 168        "seconds": "second",
    - 169        "minutes": "minute",
    - 170        "hours": "hour",
    - 171        "days": "day",
    - 172        "weeks": "week",
    - 173        "months": "month",
    - 174        "quarters": "quarter",
    - 175        "years": "year",
    - 176    }
    - 177
    - 178    TOKEN_MAPPING: t.Dict[TokenType, str] = {}
    - 179
    - 180    STRUCT_DELIMITER = ("<", ">")
    - 181
    - 182    PARAMETER_TOKEN = "@"
    - 183
    - 184    PROPERTIES_LOCATION = {
    - 185        exp.AlgorithmProperty: exp.Properties.Location.POST_CREATE,
    - 186        exp.AutoIncrementProperty: exp.Properties.Location.POST_SCHEMA,
    - 187        exp.BlockCompressionProperty: exp.Properties.Location.POST_NAME,
    - 188        exp.CharacterSetProperty: exp.Properties.Location.POST_SCHEMA,
    - 189        exp.ChecksumProperty: exp.Properties.Location.POST_NAME,
    - 190        exp.CollateProperty: exp.Properties.Location.POST_SCHEMA,
    - 191        exp.Cluster: exp.Properties.Location.POST_SCHEMA,
    - 192        exp.DataBlocksizeProperty: exp.Properties.Location.POST_NAME,
    - 193        exp.DefinerProperty: exp.Properties.Location.POST_CREATE,
    - 194        exp.DictRange: exp.Properties.Location.POST_SCHEMA,
    - 195        exp.DictProperty: exp.Properties.Location.POST_SCHEMA,
    - 196        exp.DistKeyProperty: exp.Properties.Location.POST_SCHEMA,
    - 197        exp.DistStyleProperty: exp.Properties.Location.POST_SCHEMA,
    - 198        exp.EngineProperty: exp.Properties.Location.POST_SCHEMA,
    - 199        exp.ExecuteAsProperty: exp.Properties.Location.POST_SCHEMA,
    - 200        exp.ExternalProperty: exp.Properties.Location.POST_CREATE,
    - 201        exp.FallbackProperty: exp.Properties.Location.POST_NAME,
    - 202        exp.FileFormatProperty: exp.Properties.Location.POST_WITH,
    - 203        exp.FreespaceProperty: exp.Properties.Location.POST_NAME,
    - 204        exp.IsolatedLoadingProperty: exp.Properties.Location.POST_NAME,
    - 205        exp.JournalProperty: exp.Properties.Location.POST_NAME,
    - 206        exp.LanguageProperty: exp.Properties.Location.POST_SCHEMA,
    - 207        exp.LikeProperty: exp.Properties.Location.POST_SCHEMA,
    - 208        exp.LocationProperty: exp.Properties.Location.POST_SCHEMA,
    - 209        exp.LockingProperty: exp.Properties.Location.POST_ALIAS,
    - 210        exp.LogProperty: exp.Properties.Location.POST_NAME,
    - 211        exp.MaterializedProperty: exp.Properties.Location.POST_CREATE,
    - 212        exp.MergeBlockRatioProperty: exp.Properties.Location.POST_NAME,
    - 213        exp.NoPrimaryIndexProperty: exp.Properties.Location.POST_EXPRESSION,
    - 214        exp.OnCommitProperty: exp.Properties.Location.POST_EXPRESSION,
    - 215        exp.Order: exp.Properties.Location.POST_SCHEMA,
    - 216        exp.PartitionedByProperty: exp.Properties.Location.POST_WITH,
    - 217        exp.PrimaryKey: exp.Properties.Location.POST_SCHEMA,
    - 218        exp.Property: exp.Properties.Location.POST_WITH,
    - 219        exp.ReturnsProperty: exp.Properties.Location.POST_SCHEMA,
    - 220        exp.RowFormatProperty: exp.Properties.Location.POST_SCHEMA,
    - 221        exp.RowFormatDelimitedProperty: exp.Properties.Location.POST_SCHEMA,
    - 222        exp.RowFormatSerdeProperty: exp.Properties.Location.POST_SCHEMA,
    - 223        exp.SchemaCommentProperty: exp.Properties.Location.POST_SCHEMA,
    - 224        exp.SerdeProperties: exp.Properties.Location.POST_SCHEMA,
    - 225        exp.Set: exp.Properties.Location.POST_SCHEMA,
    - 226        exp.SettingsProperty: exp.Properties.Location.POST_SCHEMA,
    - 227        exp.SetProperty: exp.Properties.Location.POST_CREATE,
    - 228        exp.SortKeyProperty: exp.Properties.Location.POST_SCHEMA,
    - 229        exp.SqlSecurityProperty: exp.Properties.Location.POST_CREATE,
    - 230        exp.StabilityProperty: exp.Properties.Location.POST_SCHEMA,
    - 231        exp.TemporaryProperty: exp.Properties.Location.POST_CREATE,
    - 232        exp.TransientProperty: exp.Properties.Location.POST_CREATE,
    - 233        exp.MergeTreeTTL: exp.Properties.Location.POST_SCHEMA,
    - 234        exp.VolatileProperty: exp.Properties.Location.POST_CREATE,
    - 235        exp.WithDataProperty: exp.Properties.Location.POST_EXPRESSION,
    - 236        exp.WithJournalTableProperty: exp.Properties.Location.POST_NAME,
    - 237    }
    - 238
    - 239    JOIN_HINTS = True
    - 240    TABLE_HINTS = True
    - 241
    - 242    RESERVED_KEYWORDS: t.Set[str] = set()
    - 243    WITH_SEPARATED_COMMENTS = (exp.Select, exp.From, exp.Where, exp.With)
    - 244    UNWRAPPED_INTERVAL_VALUES = (exp.Column, exp.Literal, exp.Neg, exp.Paren)
    + 161    TIME_PART_SINGULARS = {
    + 162        "microseconds": "microsecond",
    + 163        "seconds": "second",
    + 164        "minutes": "minute",
    + 165        "hours": "hour",
    + 166        "days": "day",
    + 167        "weeks": "week",
    + 168        "months": "month",
    + 169        "quarters": "quarter",
    + 170        "years": "year",
    + 171    }
    + 172
    + 173    TOKEN_MAPPING: t.Dict[TokenType, str] = {}
    + 174
    + 175    STRUCT_DELIMITER = ("<", ">")
    + 176
    + 177    PARAMETER_TOKEN = "@"
    + 178
    + 179    PROPERTIES_LOCATION = {
    + 180        exp.AlgorithmProperty: exp.Properties.Location.POST_CREATE,
    + 181        exp.AutoIncrementProperty: exp.Properties.Location.POST_SCHEMA,
    + 182        exp.BlockCompressionProperty: exp.Properties.Location.POST_NAME,
    + 183        exp.CharacterSetProperty: exp.Properties.Location.POST_SCHEMA,
    + 184        exp.ChecksumProperty: exp.Properties.Location.POST_NAME,
    + 185        exp.CollateProperty: exp.Properties.Location.POST_SCHEMA,
    + 186        exp.Cluster: exp.Properties.Location.POST_SCHEMA,
    + 187        exp.DataBlocksizeProperty: exp.Properties.Location.POST_NAME,
    + 188        exp.DefinerProperty: exp.Properties.Location.POST_CREATE,
    + 189        exp.DictRange: exp.Properties.Location.POST_SCHEMA,
    + 190        exp.DictProperty: exp.Properties.Location.POST_SCHEMA,
    + 191        exp.DistKeyProperty: exp.Properties.Location.POST_SCHEMA,
    + 192        exp.DistStyleProperty: exp.Properties.Location.POST_SCHEMA,
    + 193        exp.EngineProperty: exp.Properties.Location.POST_SCHEMA,
    + 194        exp.ExecuteAsProperty: exp.Properties.Location.POST_SCHEMA,
    + 195        exp.ExternalProperty: exp.Properties.Location.POST_CREATE,
    + 196        exp.FallbackProperty: exp.Properties.Location.POST_NAME,
    + 197        exp.FileFormatProperty: exp.Properties.Location.POST_WITH,
    + 198        exp.FreespaceProperty: exp.Properties.Location.POST_NAME,
    + 199        exp.IsolatedLoadingProperty: exp.Properties.Location.POST_NAME,
    + 200        exp.JournalProperty: exp.Properties.Location.POST_NAME,
    + 201        exp.LanguageProperty: exp.Properties.Location.POST_SCHEMA,
    + 202        exp.LikeProperty: exp.Properties.Location.POST_SCHEMA,
    + 203        exp.LocationProperty: exp.Properties.Location.POST_SCHEMA,
    + 204        exp.LockingProperty: exp.Properties.Location.POST_ALIAS,
    + 205        exp.LogProperty: exp.Properties.Location.POST_NAME,
    + 206        exp.MaterializedProperty: exp.Properties.Location.POST_CREATE,
    + 207        exp.MergeBlockRatioProperty: exp.Properties.Location.POST_NAME,
    + 208        exp.NoPrimaryIndexProperty: exp.Properties.Location.POST_EXPRESSION,
    + 209        exp.OnCommitProperty: exp.Properties.Location.POST_EXPRESSION,
    + 210        exp.Order: exp.Properties.Location.POST_SCHEMA,
    + 211        exp.PartitionedByProperty: exp.Properties.Location.POST_WITH,
    + 212        exp.PrimaryKey: exp.Properties.Location.POST_SCHEMA,
    + 213        exp.Property: exp.Properties.Location.POST_WITH,
    + 214        exp.ReturnsProperty: exp.Properties.Location.POST_SCHEMA,
    + 215        exp.RowFormatProperty: exp.Properties.Location.POST_SCHEMA,
    + 216        exp.RowFormatDelimitedProperty: exp.Properties.Location.POST_SCHEMA,
    + 217        exp.RowFormatSerdeProperty: exp.Properties.Location.POST_SCHEMA,
    + 218        exp.SchemaCommentProperty: exp.Properties.Location.POST_SCHEMA,
    + 219        exp.SerdeProperties: exp.Properties.Location.POST_SCHEMA,
    + 220        exp.Set: exp.Properties.Location.POST_SCHEMA,
    + 221        exp.SettingsProperty: exp.Properties.Location.POST_SCHEMA,
    + 222        exp.SetProperty: exp.Properties.Location.POST_CREATE,
    + 223        exp.SortKeyProperty: exp.Properties.Location.POST_SCHEMA,
    + 224        exp.SqlSecurityProperty: exp.Properties.Location.POST_CREATE,
    + 225        exp.StabilityProperty: exp.Properties.Location.POST_SCHEMA,
    + 226        exp.TemporaryProperty: exp.Properties.Location.POST_CREATE,
    + 227        exp.ToTableProperty: exp.Properties.Location.POST_SCHEMA,
    + 228        exp.TransientProperty: exp.Properties.Location.POST_CREATE,
    + 229        exp.MergeTreeTTL: exp.Properties.Location.POST_SCHEMA,
    + 230        exp.VolatileProperty: exp.Properties.Location.POST_CREATE,
    + 231        exp.WithDataProperty: exp.Properties.Location.POST_EXPRESSION,
    + 232        exp.WithJournalTableProperty: exp.Properties.Location.POST_NAME,
    + 233    }
    + 234
    + 235    # Keywords that can't be used as unquoted identifier names
    + 236    RESERVED_KEYWORDS: t.Set[str] = set()
    + 237
    + 238    # Expressions whose comments are separated from them for better formatting
    + 239    WITH_SEPARATED_COMMENTS: t.Tuple[t.Type[exp.Expression], ...] = (
    + 240        exp.Select,
    + 241        exp.From,
    + 242        exp.Where,
    + 243        exp.With,
    + 244    )
      245
    - 246    SENTINEL_LINE_BREAK = "__SQLGLOT__LB__"
    - 247
    - 248    __slots__ = (
    - 249        "time_mapping",
    - 250        "time_trie",
    - 251        "pretty",
    - 252        "quote_start",
    - 253        "quote_end",
    - 254        "identifier_start",
    - 255        "identifier_end",
    - 256        "bit_start",
    - 257        "bit_end",
    - 258        "hex_start",
    - 259        "hex_end",
    - 260        "byte_start",
    - 261        "byte_end",
    - 262        "raw_start",
    - 263        "raw_end",
    - 264        "identify",
    - 265        "normalize",
    - 266        "string_escape",
    - 267        "identifier_escape",
    - 268        "pad",
    - 269        "index_offset",
    - 270        "unnest_column_only",
    - 271        "alias_post_tablesample",
    - 272        "identifiers_can_start_with_digit",
    - 273        "normalize_functions",
    - 274        "unsupported_level",
    - 275        "unsupported_messages",
    - 276        "null_ordering",
    - 277        "max_unsupported",
    - 278        "_indent",
    - 279        "_escaped_quote_end",
    - 280        "_escaped_identifier_end",
    - 281        "_leading_comma",
    - 282        "_max_text_width",
    - 283        "_comments",
    - 284        "_cache",
    - 285    )
    - 286
    - 287    def __init__(
    - 288        self,
    - 289        time_mapping=None,
    - 290        time_trie=None,
    - 291        pretty=None,
    - 292        quote_start=None,
    - 293        quote_end=None,
    - 294        identifier_start=None,
    - 295        identifier_end=None,
    - 296        bit_start=None,
    - 297        bit_end=None,
    - 298        hex_start=None,
    - 299        hex_end=None,
    - 300        byte_start=None,
    - 301        byte_end=None,
    - 302        raw_start=None,
    - 303        raw_end=None,
    - 304        identify=False,
    - 305        normalize=False,
    - 306        string_escape=None,
    - 307        identifier_escape=None,
    - 308        pad=2,
    - 309        indent=2,
    - 310        index_offset=0,
    - 311        unnest_column_only=False,
    - 312        alias_post_tablesample=False,
    - 313        identifiers_can_start_with_digit=False,
    - 314        normalize_functions="upper",
    - 315        unsupported_level=ErrorLevel.WARN,
    - 316        null_ordering=None,
    - 317        max_unsupported=3,
    - 318        leading_comma=False,
    - 319        max_text_width=80,
    - 320        comments=True,
    - 321    ):
    - 322        import sqlglot
    - 323
    - 324        self.time_mapping = time_mapping or {}
    - 325        self.time_trie = time_trie
    - 326        self.pretty = pretty if pretty is not None else sqlglot.pretty
    - 327        self.quote_start = quote_start or "'"
    - 328        self.quote_end = quote_end or "'"
    - 329        self.identifier_start = identifier_start or '"'
    - 330        self.identifier_end = identifier_end or '"'
    - 331        self.bit_start = bit_start
    - 332        self.bit_end = bit_end
    - 333        self.hex_start = hex_start
    - 334        self.hex_end = hex_end
    - 335        self.byte_start = byte_start
    - 336        self.byte_end = byte_end
    - 337        self.raw_start = raw_start
    - 338        self.raw_end = raw_end
    - 339        self.identify = identify
    - 340        self.normalize = normalize
    - 341        self.string_escape = string_escape or "'"
    - 342        self.identifier_escape = identifier_escape or '"'
    - 343        self.pad = pad
    - 344        self.index_offset = index_offset
    - 345        self.unnest_column_only = unnest_column_only
    - 346        self.alias_post_tablesample = alias_post_tablesample
    - 347        self.identifiers_can_start_with_digit = identifiers_can_start_with_digit
    - 348        self.normalize_functions = normalize_functions
    - 349        self.unsupported_level = unsupported_level
    - 350        self.unsupported_messages = []
    - 351        self.max_unsupported = max_unsupported
    - 352        self.null_ordering = null_ordering
    - 353        self._indent = indent
    - 354        self._escaped_quote_end = self.string_escape + self.quote_end
    - 355        self._escaped_identifier_end = self.identifier_escape + self.identifier_end
    - 356        self._leading_comma = leading_comma
    - 357        self._max_text_width = max_text_width
    - 358        self._comments = comments
    - 359        self._cache = None
    - 360
    - 361    def generate(
    - 362        self,
    - 363        expression: t.Optional[exp.Expression],
    - 364        cache: t.Optional[t.Dict[int, str]] = None,
    - 365    ) -> str:
    - 366        """
    - 367        Generates a SQL string by interpreting the given syntax tree.
    - 368
    - 369        Args
    - 370            expression: the syntax tree.
    - 371            cache: an optional sql string cache. this leverages the hash of an expression which is slow, so only use this if you set _hash on each node.
    - 372
    - 373        Returns
    - 374            the SQL string.
    - 375        """
    - 376        if cache is not None:
    - 377            self._cache = cache
    - 378        self.unsupported_messages = []
    - 379        sql = self.sql(expression).strip()
    - 380        self._cache = None
    - 381
    - 382        if self.unsupported_level == ErrorLevel.IGNORE:
    - 383            return sql
    - 384
    - 385        if self.unsupported_level == ErrorLevel.WARN:
    - 386            for msg in self.unsupported_messages:
    - 387                logger.warning(msg)
    - 388        elif self.unsupported_level == ErrorLevel.RAISE and self.unsupported_messages:
    - 389            raise UnsupportedError(concat_messages(self.unsupported_messages, self.max_unsupported))
    - 390
    - 391        if self.pretty:
    - 392            sql = sql.replace(self.SENTINEL_LINE_BREAK, "\n")
    - 393        return sql
    - 394
    - 395    def unsupported(self, message: str) -> None:
    - 396        if self.unsupported_level == ErrorLevel.IMMEDIATE:
    - 397            raise UnsupportedError(message)
    - 398        self.unsupported_messages.append(message)
    - 399
    - 400    def sep(self, sep: str = " ") -> str:
    - 401        return f"{sep.strip()}\n" if self.pretty else sep
    - 402
    - 403    def seg(self, sql: str, sep: str = " ") -> str:
    - 404        return f"{self.sep(sep)}{sql}"
    - 405
    - 406    def pad_comment(self, comment: str) -> str:
    - 407        comment = " " + comment if comment[0].strip() else comment
    - 408        comment = comment + " " if comment[-1].strip() else comment
    - 409        return comment
    - 410
    - 411    def maybe_comment(
    - 412        self,
    - 413        sql: str,
    - 414        expression: t.Optional[exp.Expression] = None,
    - 415        comments: t.Optional[t.List[str]] = None,
    - 416    ) -> str:
    - 417        comments = ((expression and expression.comments) if comments is None else comments) if self._comments else None  # type: ignore
    - 418
    - 419        if not comments or isinstance(expression, exp.Binary):
    - 420            return sql
    + 246    # Expressions that can remain unwrapped when appearing in the context of an INTERVAL
    + 247    UNWRAPPED_INTERVAL_VALUES: t.Tuple[t.Type[exp.Expression], ...] = (
    + 248        exp.Column,
    + 249        exp.Literal,
    + 250        exp.Neg,
    + 251        exp.Paren,
    + 252    )
    + 253
    + 254    SENTINEL_LINE_BREAK = "__SQLGLOT__LB__"
    + 255
    + 256    # Autofilled
    + 257    INVERSE_TIME_MAPPING: t.Dict[str, str] = {}
    + 258    INVERSE_TIME_TRIE: t.Dict = {}
    + 259    INDEX_OFFSET = 0
    + 260    UNNEST_COLUMN_ONLY = False
    + 261    ALIAS_POST_TABLESAMPLE = False
    + 262    IDENTIFIERS_CAN_START_WITH_DIGIT = False
    + 263    STRICT_STRING_CONCAT = False
    + 264    NORMALIZE_FUNCTIONS: bool | str = "upper"
    + 265    NULL_ORDERING = "nulls_are_small"
    + 266
    + 267    # Delimiters for quotes, identifiers and the corresponding escape characters
    + 268    QUOTE_START = "'"
    + 269    QUOTE_END = "'"
    + 270    IDENTIFIER_START = '"'
    + 271    IDENTIFIER_END = '"'
    + 272    STRING_ESCAPE = "'"
    + 273    IDENTIFIER_ESCAPE = '"'
    + 274
    + 275    # Delimiters for bit, hex, byte and raw literals
    + 276    BIT_START: t.Optional[str] = None
    + 277    BIT_END: t.Optional[str] = None
    + 278    HEX_START: t.Optional[str] = None
    + 279    HEX_END: t.Optional[str] = None
    + 280    BYTE_START: t.Optional[str] = None
    + 281    BYTE_END: t.Optional[str] = None
    + 282    RAW_START: t.Optional[str] = None
    + 283    RAW_END: t.Optional[str] = None
    + 284
    + 285    __slots__ = (
    + 286        "pretty",
    + 287        "identify",
    + 288        "normalize",
    + 289        "pad",
    + 290        "_indent",
    + 291        "normalize_functions",
    + 292        "unsupported_level",
    + 293        "max_unsupported",
    + 294        "leading_comma",
    + 295        "max_text_width",
    + 296        "comments",
    + 297        "unsupported_messages",
    + 298        "_escaped_quote_end",
    + 299        "_escaped_identifier_end",
    + 300        "_cache",
    + 301    )
    + 302
    + 303    def __init__(
    + 304        self,
    + 305        pretty: t.Optional[bool] = None,
    + 306        identify: str | bool = False,
    + 307        normalize: bool = False,
    + 308        pad: int = 2,
    + 309        indent: int = 2,
    + 310        normalize_functions: t.Optional[str | bool] = None,
    + 311        unsupported_level: ErrorLevel = ErrorLevel.WARN,
    + 312        max_unsupported: int = 3,
    + 313        leading_comma: bool = False,
    + 314        max_text_width: int = 80,
    + 315        comments: bool = True,
    + 316    ):
    + 317        import sqlglot
    + 318
    + 319        self.pretty = pretty if pretty is not None else sqlglot.pretty
    + 320        self.identify = identify
    + 321        self.normalize = normalize
    + 322        self.pad = pad
    + 323        self._indent = indent
    + 324        self.unsupported_level = unsupported_level
    + 325        self.max_unsupported = max_unsupported
    + 326        self.leading_comma = leading_comma
    + 327        self.max_text_width = max_text_width
    + 328        self.comments = comments
    + 329
    + 330        # This is both a Dialect property and a Generator argument, so we prioritize the latter
    + 331        self.normalize_functions = (
    + 332            self.NORMALIZE_FUNCTIONS if normalize_functions is None else normalize_functions
    + 333        )
    + 334
    + 335        self.unsupported_messages: t.List[str] = []
    + 336        self._escaped_quote_end: str = self.STRING_ESCAPE + self.QUOTE_END
    + 337        self._escaped_identifier_end: str = self.IDENTIFIER_ESCAPE + self.IDENTIFIER_END
    + 338        self._cache: t.Optional[t.Dict[int, str]] = None
    + 339
    + 340    def generate(
    + 341        self,
    + 342        expression: t.Optional[exp.Expression],
    + 343        cache: t.Optional[t.Dict[int, str]] = None,
    + 344    ) -> str:
    + 345        """
    + 346        Generates the SQL string corresponding to the given syntax tree.
    + 347
    + 348        Args:
    + 349            expression: The syntax tree.
    + 350            cache: An optional sql string cache. This leverages the hash of an Expression
    + 351                which can be slow to compute, so only use it if you set _hash on each node.
    + 352
    + 353        Returns:
    + 354            The SQL string corresponding to `expression`.
    + 355        """
    + 356        if cache is not None:
    + 357            self._cache = cache
    + 358
    + 359        self.unsupported_messages = []
    + 360        sql = self.sql(expression).strip()
    + 361        self._cache = None
    + 362
    + 363        if self.unsupported_level == ErrorLevel.IGNORE:
    + 364            return sql
    + 365
    + 366        if self.unsupported_level == ErrorLevel.WARN:
    + 367            for msg in self.unsupported_messages:
    + 368                logger.warning(msg)
    + 369        elif self.unsupported_level == ErrorLevel.RAISE and self.unsupported_messages:
    + 370            raise UnsupportedError(concat_messages(self.unsupported_messages, self.max_unsupported))
    + 371
    + 372        if self.pretty:
    + 373            sql = sql.replace(self.SENTINEL_LINE_BREAK, "\n")
    + 374        return sql
    + 375
    + 376    def unsupported(self, message: str) -> None:
    + 377        if self.unsupported_level == ErrorLevel.IMMEDIATE:
    + 378            raise UnsupportedError(message)
    + 379        self.unsupported_messages.append(message)
    + 380
    + 381    def sep(self, sep: str = " ") -> str:
    + 382        return f"{sep.strip()}\n" if self.pretty else sep
    + 383
    + 384    def seg(self, sql: str, sep: str = " ") -> str:
    + 385        return f"{self.sep(sep)}{sql}"
    + 386
    + 387    def pad_comment(self, comment: str) -> str:
    + 388        comment = " " + comment if comment[0].strip() else comment
    + 389        comment = comment + " " if comment[-1].strip() else comment
    + 390        return comment
    + 391
    + 392    def maybe_comment(
    + 393        self,
    + 394        sql: str,
    + 395        expression: t.Optional[exp.Expression] = None,
    + 396        comments: t.Optional[t.List[str]] = None,
    + 397    ) -> str:
    + 398        comments = (
    + 399            ((expression and expression.comments) if comments is None else comments)  # type: ignore
    + 400            if self.comments
    + 401            else None
    + 402        )
    + 403
    + 404        if not comments or isinstance(expression, exp.Binary):
    + 405            return sql
    + 406
    + 407        sep = "\n" if self.pretty else " "
    + 408        comments_sql = sep.join(
    + 409            f"/*{self.pad_comment(comment)}*/" for comment in comments if comment
    + 410        )
    + 411
    + 412        if not comments_sql:
    + 413            return sql
    + 414
    + 415        if isinstance(expression, self.WITH_SEPARATED_COMMENTS):
    + 416            return (
    + 417                f"{self.sep()}{comments_sql}{sql}"
    + 418                if sql[0].isspace()
    + 419                else f"{comments_sql}{self.sep()}{sql}"
    + 420            )
      421
    - 422        sep = "\n" if self.pretty else " "
    - 423        comments_sql = sep.join(
    - 424            f"/*{self.pad_comment(comment)}*/" for comment in comments if comment
    - 425        )
    - 426
    - 427        if not comments_sql:
    - 428            return sql
    - 429
    - 430        if isinstance(expression, self.WITH_SEPARATED_COMMENTS):
    - 431            return (
    - 432                f"{self.sep()}{comments_sql}{sql}"
    - 433                if sql[0].isspace()
    - 434                else f"{comments_sql}{self.sep()}{sql}"
    - 435            )
    - 436
    - 437        return f"{sql} {comments_sql}"
    - 438
    - 439    def wrap(self, expression: exp.Expression | str) -> str:
    - 440        this_sql = self.indent(
    - 441            self.sql(expression)
    - 442            if isinstance(expression, (exp.Select, exp.Union))
    - 443            else self.sql(expression, "this"),
    - 444            level=1,
    - 445            pad=0,
    - 446        )
    - 447        return f"({self.sep('')}{this_sql}{self.seg(')', sep='')}"
    - 448
    - 449    def no_identify(self, func: t.Callable[..., str], *args, **kwargs) -> str:
    - 450        original = self.identify
    - 451        self.identify = False
    - 452        result = func(*args, **kwargs)
    - 453        self.identify = original
    - 454        return result
    - 455
    - 456    def normalize_func(self, name: str) -> str:
    - 457        if self.normalize_functions == "upper":
    - 458            return name.upper()
    - 459        if self.normalize_functions == "lower":
    - 460            return name.lower()
    - 461        return name
    - 462
    - 463    def indent(
    - 464        self,
    - 465        sql: str,
    - 466        level: int = 0,
    - 467        pad: t.Optional[int] = None,
    - 468        skip_first: bool = False,
    - 469        skip_last: bool = False,
    - 470    ) -> str:
    - 471        if not self.pretty:
    - 472            return sql
    - 473
    - 474        pad = self.pad if pad is None else pad
    - 475        lines = sql.split("\n")
    - 476
    - 477        return "\n".join(
    - 478            line
    - 479            if (skip_first and i == 0) or (skip_last and i == len(lines) - 1)
    - 480            else f"{' ' * (level * self._indent + pad)}{line}"
    - 481            for i, line in enumerate(lines)
    - 482        )
    + 422        return f"{sql} {comments_sql}"
    + 423
    + 424    def wrap(self, expression: exp.Expression | str) -> str:
    + 425        this_sql = self.indent(
    + 426            self.sql(expression)
    + 427            if isinstance(expression, (exp.Select, exp.Union))
    + 428            else self.sql(expression, "this"),
    + 429            level=1,
    + 430            pad=0,
    + 431        )
    + 432        return f"({self.sep('')}{this_sql}{self.seg(')', sep='')}"
    + 433
    + 434    def no_identify(self, func: t.Callable[..., str], *args, **kwargs) -> str:
    + 435        original = self.identify
    + 436        self.identify = False
    + 437        result = func(*args, **kwargs)
    + 438        self.identify = original
    + 439        return result
    + 440
    + 441    def normalize_func(self, name: str) -> str:
    + 442        if self.normalize_functions == "upper" or self.normalize_functions is True:
    + 443            return name.upper()
    + 444        if self.normalize_functions == "lower":
    + 445            return name.lower()
    + 446        return name
    + 447
    + 448    def indent(
    + 449        self,
    + 450        sql: str,
    + 451        level: int = 0,
    + 452        pad: t.Optional[int] = None,
    + 453        skip_first: bool = False,
    + 454        skip_last: bool = False,
    + 455    ) -> str:
    + 456        if not self.pretty:
    + 457            return sql
    + 458
    + 459        pad = self.pad if pad is None else pad
    + 460        lines = sql.split("\n")
    + 461
    + 462        return "\n".join(
    + 463            line
    + 464            if (skip_first and i == 0) or (skip_last and i == len(lines) - 1)
    + 465            else f"{' ' * (level * self._indent + pad)}{line}"
    + 466            for i, line in enumerate(lines)
    + 467        )
    + 468
    + 469    def sql(
    + 470        self,
    + 471        expression: t.Optional[str | exp.Expression],
    + 472        key: t.Optional[str] = None,
    + 473        comment: bool = True,
    + 474    ) -> str:
    + 475        if not expression:
    + 476            return ""
    + 477
    + 478        if isinstance(expression, str):
    + 479            return expression
    + 480
    + 481        if key:
    + 482            return self.sql(expression.args.get(key))
      483
    - 484    def sql(
    - 485        self,
    - 486        expression: t.Optional[str | exp.Expression],
    - 487        key: t.Optional[str] = None,
    - 488        comment: bool = True,
    - 489    ) -> str:
    - 490        if not expression:
    - 491            return ""
    - 492
    - 493        if isinstance(expression, str):
    - 494            return expression
    - 495
    - 496        if key:
    - 497            return self.sql(expression.args.get(key))
    + 484        if self._cache is not None:
    + 485            expression_id = hash(expression)
    + 486
    + 487            if expression_id in self._cache:
    + 488                return self._cache[expression_id]
    + 489
    + 490        transform = self.TRANSFORMS.get(expression.__class__)
    + 491
    + 492        if callable(transform):
    + 493            sql = transform(self, expression)
    + 494        elif transform:
    + 495            sql = transform
    + 496        elif isinstance(expression, exp.Expression):
    + 497            exp_handler_name = f"{expression.key}_sql"
      498
    - 499        if self._cache is not None:
    - 500            expression_id = hash(expression)
    - 501
    - 502            if expression_id in self._cache:
    - 503                return self._cache[expression_id]
    - 504
    - 505        transform = self.TRANSFORMS.get(expression.__class__)
    - 506
    - 507        if callable(transform):
    - 508            sql = transform(self, expression)
    - 509        elif transform:
    - 510            sql = transform
    - 511        elif isinstance(expression, exp.Expression):
    - 512            exp_handler_name = f"{expression.key}_sql"
    - 513
    - 514            if hasattr(self, exp_handler_name):
    - 515                sql = getattr(self, exp_handler_name)(expression)
    - 516            elif isinstance(expression, exp.Func):
    - 517                sql = self.function_fallback_sql(expression)
    - 518            elif isinstance(expression, exp.Property):
    - 519                sql = self.property_sql(expression)
    - 520            else:
    - 521                raise ValueError(f"Unsupported expression type {expression.__class__.__name__}")
    - 522        else:
    - 523            raise ValueError(f"Expected an Expression. Received {type(expression)}: {expression}")
    - 524
    - 525        sql = self.maybe_comment(sql, expression) if self._comments and comment else sql
    - 526
    - 527        if self._cache is not None:
    - 528            self._cache[expression_id] = sql
    - 529        return sql
    + 499            if hasattr(self, exp_handler_name):
    + 500                sql = getattr(self, exp_handler_name)(expression)
    + 501            elif isinstance(expression, exp.Func):
    + 502                sql = self.function_fallback_sql(expression)
    + 503            elif isinstance(expression, exp.Property):
    + 504                sql = self.property_sql(expression)
    + 505            else:
    + 506                raise ValueError(f"Unsupported expression type {expression.__class__.__name__}")
    + 507        else:
    + 508            raise ValueError(f"Expected an Expression. Received {type(expression)}: {expression}")
    + 509
    + 510        sql = self.maybe_comment(sql, expression) if self.comments and comment else sql
    + 511
    + 512        if self._cache is not None:
    + 513            self._cache[expression_id] = sql
    + 514        return sql
    + 515
    + 516    def uncache_sql(self, expression: exp.Uncache) -> str:
    + 517        table = self.sql(expression, "this")
    + 518        exists_sql = " IF EXISTS" if expression.args.get("exists") else ""
    + 519        return f"UNCACHE TABLE{exists_sql} {table}"
    + 520
    + 521    def cache_sql(self, expression: exp.Cache) -> str:
    + 522        lazy = " LAZY" if expression.args.get("lazy") else ""
    + 523        table = self.sql(expression, "this")
    + 524        options = expression.args.get("options")
    + 525        options = f" OPTIONS({self.sql(options[0])} = {self.sql(options[1])})" if options else ""
    + 526        sql = self.sql(expression, "expression")
    + 527        sql = f" AS{self.sep()}{sql}" if sql else ""
    + 528        sql = f"CACHE{lazy} TABLE {table}{options}{sql}"
    + 529        return self.prepend_ctes(expression, sql)
      530
    - 531    def uncache_sql(self, expression: exp.Uncache) -> str:
    - 532        table = self.sql(expression, "this")
    - 533        exists_sql = " IF EXISTS" if expression.args.get("exists") else ""
    - 534        return f"UNCACHE TABLE{exists_sql} {table}"
    - 535
    - 536    def cache_sql(self, expression: exp.Cache) -> str:
    - 537        lazy = " LAZY" if expression.args.get("lazy") else ""
    - 538        table = self.sql(expression, "this")
    - 539        options = expression.args.get("options")
    - 540        options = f" OPTIONS({self.sql(options[0])} = {self.sql(options[1])})" if options else ""
    - 541        sql = self.sql(expression, "expression")
    - 542        sql = f" AS{self.sep()}{sql}" if sql else ""
    - 543        sql = f"CACHE{lazy} TABLE {table}{options}{sql}"
    - 544        return self.prepend_ctes(expression, sql)
    - 545
    - 546    def characterset_sql(self, expression: exp.CharacterSet) -> str:
    - 547        if isinstance(expression.parent, exp.Cast):
    - 548            return f"CHAR CHARACTER SET {self.sql(expression, 'this')}"
    - 549        default = "DEFAULT " if expression.args.get("default") else ""
    - 550        return f"{default}CHARACTER SET={self.sql(expression, 'this')}"
    - 551
    - 552    def column_sql(self, expression: exp.Column) -> str:
    - 553        return ".".join(
    - 554            self.sql(part)
    - 555            for part in (
    - 556                expression.args.get("catalog"),
    - 557                expression.args.get("db"),
    - 558                expression.args.get("table"),
    - 559                expression.args.get("this"),
    - 560            )
    - 561            if part
    - 562        )
    - 563
    - 564    def columnposition_sql(self, expression: exp.ColumnPosition) -> str:
    - 565        this = self.sql(expression, "this")
    - 566        this = f" {this}" if this else ""
    - 567        position = self.sql(expression, "position")
    - 568        return f"{position}{this}"
    - 569
    - 570    def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str:
    - 571        column = self.sql(expression, "this")
    - 572        kind = self.sql(expression, "kind")
    - 573        constraints = self.expressions(expression, key="constraints", sep=" ", flat=True)
    - 574        exists = "IF NOT EXISTS " if expression.args.get("exists") else ""
    - 575        kind = f"{sep}{kind}" if kind else ""
    - 576        constraints = f" {constraints}" if constraints else ""
    - 577        position = self.sql(expression, "position")
    - 578        position = f" {position}" if position else ""
    - 579
    - 580        return f"{exists}{column}{kind}{constraints}{position}"
    - 581
    - 582    def columnconstraint_sql(self, expression: exp.ColumnConstraint) -> str:
    - 583        this = self.sql(expression, "this")
    - 584        kind_sql = self.sql(expression, "kind").strip()
    - 585        return f"CONSTRAINT {this} {kind_sql}" if this else kind_sql
    - 586
    - 587    def autoincrementcolumnconstraint_sql(self, _) -> str:
    - 588        return self.token_sql(TokenType.AUTO_INCREMENT)
    - 589
    - 590    def compresscolumnconstraint_sql(self, expression: exp.CompressColumnConstraint) -> str:
    - 591        if isinstance(expression.this, list):
    - 592            this = self.wrap(self.expressions(expression, key="this", flat=True))
    - 593        else:
    - 594            this = self.sql(expression, "this")
    - 595
    - 596        return f"COMPRESS {this}"
    - 597
    - 598    def generatedasidentitycolumnconstraint_sql(
    - 599        self, expression: exp.GeneratedAsIdentityColumnConstraint
    - 600    ) -> str:
    - 601        this = ""
    - 602        if expression.this is not None:
    - 603            on_null = "ON NULL " if expression.args.get("on_null") else ""
    - 604            this = " ALWAYS " if expression.this else f" BY DEFAULT {on_null}"
    + 531    def characterset_sql(self, expression: exp.CharacterSet) -> str:
    + 532        if isinstance(expression.parent, exp.Cast):
    + 533            return f"CHAR CHARACTER SET {self.sql(expression, 'this')}"
    + 534        default = "DEFAULT " if expression.args.get("default") else ""
    + 535        return f"{default}CHARACTER SET={self.sql(expression, 'this')}"
    + 536
    + 537    def column_sql(self, expression: exp.Column) -> str:
    + 538        return ".".join(
    + 539            self.sql(part)
    + 540            for part in (
    + 541                expression.args.get("catalog"),
    + 542                expression.args.get("db"),
    + 543                expression.args.get("table"),
    + 544                expression.args.get("this"),
    + 545            )
    + 546            if part
    + 547        )
    + 548
    + 549    def columnposition_sql(self, expression: exp.ColumnPosition) -> str:
    + 550        this = self.sql(expression, "this")
    + 551        this = f" {this}" if this else ""
    + 552        position = self.sql(expression, "position")
    + 553        return f"{position}{this}"
    + 554
    + 555    def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str:
    + 556        column = self.sql(expression, "this")
    + 557        kind = self.sql(expression, "kind")
    + 558        constraints = self.expressions(expression, key="constraints", sep=" ", flat=True)
    + 559        exists = "IF NOT EXISTS " if expression.args.get("exists") else ""
    + 560        kind = f"{sep}{kind}" if kind else ""
    + 561        constraints = f" {constraints}" if constraints else ""
    + 562        position = self.sql(expression, "position")
    + 563        position = f" {position}" if position else ""
    + 564
    + 565        return f"{exists}{column}{kind}{constraints}{position}"
    + 566
    + 567    def columnconstraint_sql(self, expression: exp.ColumnConstraint) -> str:
    + 568        this = self.sql(expression, "this")
    + 569        kind_sql = self.sql(expression, "kind").strip()
    + 570        return f"CONSTRAINT {this} {kind_sql}" if this else kind_sql
    + 571
    + 572    def autoincrementcolumnconstraint_sql(self, _) -> str:
    + 573        return self.token_sql(TokenType.AUTO_INCREMENT)
    + 574
    + 575    def compresscolumnconstraint_sql(self, expression: exp.CompressColumnConstraint) -> str:
    + 576        if isinstance(expression.this, list):
    + 577            this = self.wrap(self.expressions(expression, key="this", flat=True))
    + 578        else:
    + 579            this = self.sql(expression, "this")
    + 580
    + 581        return f"COMPRESS {this}"
    + 582
    + 583    def generatedasidentitycolumnconstraint_sql(
    + 584        self, expression: exp.GeneratedAsIdentityColumnConstraint
    + 585    ) -> str:
    + 586        this = ""
    + 587        if expression.this is not None:
    + 588            on_null = "ON NULL " if expression.args.get("on_null") else ""
    + 589            this = " ALWAYS " if expression.this else f" BY DEFAULT {on_null}"
    + 590
    + 591        start = expression.args.get("start")
    + 592        start = f"START WITH {start}" if start else ""
    + 593        increment = expression.args.get("increment")
    + 594        increment = f" INCREMENT BY {increment}" if increment else ""
    + 595        minvalue = expression.args.get("minvalue")
    + 596        minvalue = f" MINVALUE {minvalue}" if minvalue else ""
    + 597        maxvalue = expression.args.get("maxvalue")
    + 598        maxvalue = f" MAXVALUE {maxvalue}" if maxvalue else ""
    + 599        cycle = expression.args.get("cycle")
    + 600        cycle_sql = ""
    + 601
    + 602        if cycle is not None:
    + 603            cycle_sql = f"{' NO' if not cycle else ''} CYCLE"
    + 604            cycle_sql = cycle_sql.strip() if not start and not increment else cycle_sql
      605
    - 606        start = expression.args.get("start")
    - 607        start = f"START WITH {start}" if start else ""
    - 608        increment = expression.args.get("increment")
    - 609        increment = f" INCREMENT BY {increment}" if increment else ""
    - 610        minvalue = expression.args.get("minvalue")
    - 611        minvalue = f" MINVALUE {minvalue}" if minvalue else ""
    - 612        maxvalue = expression.args.get("maxvalue")
    - 613        maxvalue = f" MAXVALUE {maxvalue}" if maxvalue else ""
    - 614        cycle = expression.args.get("cycle")
    - 615        cycle_sql = ""
    - 616
    - 617        if cycle is not None:
    - 618            cycle_sql = f"{' NO' if not cycle else ''} CYCLE"
    - 619            cycle_sql = cycle_sql.strip() if not start and not increment else cycle_sql
    - 620
    - 621        sequence_opts = ""
    - 622        if start or increment or cycle_sql:
    - 623            sequence_opts = f"{start}{increment}{minvalue}{maxvalue}{cycle_sql}"
    - 624            sequence_opts = f" ({sequence_opts.strip()})"
    - 625
    - 626        expr = self.sql(expression, "expression")
    - 627        expr = f"({expr})" if expr else "IDENTITY"
    - 628
    - 629        return f"GENERATED{this}AS {expr}{sequence_opts}"
    - 630
    - 631    def notnullcolumnconstraint_sql(self, expression: exp.NotNullColumnConstraint) -> str:
    - 632        return f"{'' if expression.args.get('allow_null') else 'NOT '}NULL"
    - 633
    - 634    def primarykeycolumnconstraint_sql(self, expression: exp.PrimaryKeyColumnConstraint) -> str:
    - 635        desc = expression.args.get("desc")
    - 636        if desc is not None:
    - 637            return f"PRIMARY KEY{' DESC' if desc else ' ASC'}"
    - 638        return f"PRIMARY KEY"
    + 606        sequence_opts = ""
    + 607        if start or increment or cycle_sql:
    + 608            sequence_opts = f"{start}{increment}{minvalue}{maxvalue}{cycle_sql}"
    + 609            sequence_opts = f" ({sequence_opts.strip()})"
    + 610
    + 611        expr = self.sql(expression, "expression")
    + 612        expr = f"({expr})" if expr else "IDENTITY"
    + 613
    + 614        return f"GENERATED{this}AS {expr}{sequence_opts}"
    + 615
    + 616    def notnullcolumnconstraint_sql(self, expression: exp.NotNullColumnConstraint) -> str:
    + 617        return f"{'' if expression.args.get('allow_null') else 'NOT '}NULL"
    + 618
    + 619    def primarykeycolumnconstraint_sql(self, expression: exp.PrimaryKeyColumnConstraint) -> str:
    + 620        desc = expression.args.get("desc")
    + 621        if desc is not None:
    + 622            return f"PRIMARY KEY{' DESC' if desc else ' ASC'}"
    + 623        return f"PRIMARY KEY"
    + 624
    + 625    def uniquecolumnconstraint_sql(self, expression: exp.UniqueColumnConstraint) -> str:
    + 626        this = self.sql(expression, "this")
    + 627        this = f" {this}" if this else ""
    + 628        return f"UNIQUE{this}"
    + 629
    + 630    def createable_sql(
    + 631        self, expression: exp.Create, locations: dict[exp.Properties.Location, list[exp.Property]]
    + 632    ) -> str:
    + 633        return self.sql(expression, "this")
    + 634
    + 635    def create_sql(self, expression: exp.Create) -> str:
    + 636        kind = self.sql(expression, "kind").upper()
    + 637        properties = expression.args.get("properties")
    + 638        properties_locs = self.locate_properties(properties) if properties else {}
      639
    - 640    def uniquecolumnconstraint_sql(self, expression: exp.UniqueColumnConstraint) -> str:
    - 641        this = self.sql(expression, "this")
    - 642        this = f" {this}" if this else ""
    - 643        return f"UNIQUE{this}"
    - 644
    - 645    def create_sql(self, expression: exp.Create) -> str:
    - 646        kind = self.sql(expression, "kind").upper()
    - 647        properties = expression.args.get("properties")
    - 648        properties_exp = expression.copy()
    - 649        properties_locs = self.locate_properties(properties) if properties else {}
    - 650        if properties_locs.get(exp.Properties.Location.POST_SCHEMA) or properties_locs.get(
    - 651            exp.Properties.Location.POST_WITH
    - 652        ):
    - 653            properties_exp.set(
    - 654                "properties",
    - 655                exp.Properties(
    - 656                    expressions=[
    - 657                        *properties_locs[exp.Properties.Location.POST_SCHEMA],
    - 658                        *properties_locs[exp.Properties.Location.POST_WITH],
    - 659                    ]
    - 660                ),
    - 661            )
    - 662        if kind == "TABLE" and properties_locs.get(exp.Properties.Location.POST_NAME):
    - 663            this_name = self.sql(expression.this, "this")
    - 664            this_properties = self.properties(
    - 665                exp.Properties(expressions=properties_locs[exp.Properties.Location.POST_NAME]),
    - 666                wrapped=False,
    - 667            )
    - 668            this_schema = f"({self.expressions(expression.this)})"
    - 669            this = f"{this_name}, {this_properties} {this_schema}"
    - 670            properties_sql = ""
    - 671        else:
    - 672            this = self.sql(expression, "this")
    - 673            properties_sql = self.sql(properties_exp, "properties")
    - 674        begin = " BEGIN" if expression.args.get("begin") else ""
    - 675        expression_sql = self.sql(expression, "expression")
    - 676        if expression_sql:
    - 677            expression_sql = f"{begin}{self.sep()}{expression_sql}"
    - 678
    - 679            if self.CREATE_FUNCTION_RETURN_AS or not isinstance(expression.expression, exp.Return):
    - 680                if properties_locs.get(exp.Properties.Location.POST_ALIAS):
    - 681                    postalias_props_sql = self.properties(
    - 682                        exp.Properties(
    - 683                            expressions=properties_locs[exp.Properties.Location.POST_ALIAS]
    - 684                        ),
    - 685                        wrapped=False,
    - 686                    )
    - 687                    expression_sql = f" AS {postalias_props_sql}{expression_sql}"
    - 688                else:
    - 689                    expression_sql = f" AS{expression_sql}"
    - 690
    - 691        postindex_props_sql = ""
    - 692        if properties_locs.get(exp.Properties.Location.POST_INDEX):
    - 693            postindex_props_sql = self.properties(
    - 694                exp.Properties(expressions=properties_locs[exp.Properties.Location.POST_INDEX]),
    - 695                wrapped=False,
    - 696                prefix=" ",
    - 697            )
    - 698
    - 699        indexes = self.expressions(expression, key="indexes", indent=False, sep=" ")
    - 700        indexes = f" {indexes}" if indexes else ""
    - 701        index_sql = indexes + postindex_props_sql
    - 702
    - 703        replace = " OR REPLACE" if expression.args.get("replace") else ""
    - 704        unique = " UNIQUE" if expression.args.get("unique") else ""
    - 705
    - 706        postcreate_props_sql = ""
    - 707        if properties_locs.get(exp.Properties.Location.POST_CREATE):
    - 708            postcreate_props_sql = self.properties(
    - 709                exp.Properties(expressions=properties_locs[exp.Properties.Location.POST_CREATE]),
    - 710                sep=" ",
    - 711                prefix=" ",
    - 712                wrapped=False,
    - 713            )
    - 714
    - 715        modifiers = "".join((replace, unique, postcreate_props_sql))
    + 640        this = self.createable_sql(expression, properties_locs)
    + 641
    + 642        properties_sql = ""
    + 643        if properties_locs.get(exp.Properties.Location.POST_SCHEMA) or properties_locs.get(
    + 644            exp.Properties.Location.POST_WITH
    + 645        ):
    + 646            properties_sql = self.sql(
    + 647                exp.Properties(
    + 648                    expressions=[
    + 649                        *properties_locs[exp.Properties.Location.POST_SCHEMA],
    + 650                        *properties_locs[exp.Properties.Location.POST_WITH],
    + 651                    ]
    + 652                )
    + 653            )
    + 654
    + 655        begin = " BEGIN" if expression.args.get("begin") else ""
    + 656        expression_sql = self.sql(expression, "expression")
    + 657        if expression_sql:
    + 658            expression_sql = f"{begin}{self.sep()}{expression_sql}"
    + 659
    + 660            if self.CREATE_FUNCTION_RETURN_AS or not isinstance(expression.expression, exp.Return):
    + 661                if properties_locs.get(exp.Properties.Location.POST_ALIAS):
    + 662                    postalias_props_sql = self.properties(
    + 663                        exp.Properties(
    + 664                            expressions=properties_locs[exp.Properties.Location.POST_ALIAS]
    + 665                        ),
    + 666                        wrapped=False,
    + 667                    )
    + 668                    expression_sql = f" AS {postalias_props_sql}{expression_sql}"
    + 669                else:
    + 670                    expression_sql = f" AS{expression_sql}"
    + 671
    + 672        postindex_props_sql = ""
    + 673        if properties_locs.get(exp.Properties.Location.POST_INDEX):
    + 674            postindex_props_sql = self.properties(
    + 675                exp.Properties(expressions=properties_locs[exp.Properties.Location.POST_INDEX]),
    + 676                wrapped=False,
    + 677                prefix=" ",
    + 678            )
    + 679
    + 680        indexes = self.expressions(expression, key="indexes", indent=False, sep=" ")
    + 681        indexes = f" {indexes}" if indexes else ""
    + 682        index_sql = indexes + postindex_props_sql
    + 683
    + 684        replace = " OR REPLACE" if expression.args.get("replace") else ""
    + 685        unique = " UNIQUE" if expression.args.get("unique") else ""
    + 686
    + 687        postcreate_props_sql = ""
    + 688        if properties_locs.get(exp.Properties.Location.POST_CREATE):
    + 689            postcreate_props_sql = self.properties(
    + 690                exp.Properties(expressions=properties_locs[exp.Properties.Location.POST_CREATE]),
    + 691                sep=" ",
    + 692                prefix=" ",
    + 693                wrapped=False,
    + 694            )
    + 695
    + 696        modifiers = "".join((replace, unique, postcreate_props_sql))
    + 697
    + 698        postexpression_props_sql = ""
    + 699        if properties_locs.get(exp.Properties.Location.POST_EXPRESSION):
    + 700            postexpression_props_sql = self.properties(
    + 701                exp.Properties(
    + 702                    expressions=properties_locs[exp.Properties.Location.POST_EXPRESSION]
    + 703                ),
    + 704                sep=" ",
    + 705                prefix=" ",
    + 706                wrapped=False,
    + 707            )
    + 708
    + 709        exists_sql = " IF NOT EXISTS" if expression.args.get("exists") else ""
    + 710        no_schema_binding = (
    + 711            " WITH NO SCHEMA BINDING" if expression.args.get("no_schema_binding") else ""
    + 712        )
    + 713
    + 714        clone = self.sql(expression, "clone")
    + 715        clone = f" {clone}" if clone else ""
      716
    - 717        postexpression_props_sql = ""
    - 718        if properties_locs.get(exp.Properties.Location.POST_EXPRESSION):
    - 719            postexpression_props_sql = self.properties(
    - 720                exp.Properties(
    - 721                    expressions=properties_locs[exp.Properties.Location.POST_EXPRESSION]
    - 722                ),
    - 723                sep=" ",
    - 724                prefix=" ",
    - 725                wrapped=False,
    - 726            )
    - 727
    - 728        exists_sql = " IF NOT EXISTS" if expression.args.get("exists") else ""
    - 729        no_schema_binding = (
    - 730            " WITH NO SCHEMA BINDING" if expression.args.get("no_schema_binding") else ""
    - 731        )
    - 732
    - 733        clone = self.sql(expression, "clone")
    - 734        clone = f" {clone}" if clone else ""
    - 735
    - 736        expression_sql = f"CREATE{modifiers} {kind}{exists_sql} {this}{properties_sql}{expression_sql}{postexpression_props_sql}{index_sql}{no_schema_binding}{clone}"
    - 737        return self.prepend_ctes(expression, expression_sql)
    - 738
    - 739    def clone_sql(self, expression: exp.Clone) -> str:
    - 740        this = self.sql(expression, "this")
    - 741        when = self.sql(expression, "when")
    - 742
    - 743        if when:
    - 744            kind = self.sql(expression, "kind")
    - 745            expr = self.sql(expression, "expression")
    - 746            return f"CLONE {this} {when} ({kind} => {expr})"
    - 747
    - 748        return f"CLONE {this}"
    + 717        expression_sql = f"CREATE{modifiers} {kind}{exists_sql} {this}{properties_sql}{expression_sql}{postexpression_props_sql}{index_sql}{no_schema_binding}{clone}"
    + 718        return self.prepend_ctes(expression, expression_sql)
    + 719
    + 720    def clone_sql(self, expression: exp.Clone) -> str:
    + 721        this = self.sql(expression, "this")
    + 722        when = self.sql(expression, "when")
    + 723
    + 724        if when:
    + 725            kind = self.sql(expression, "kind")
    + 726            expr = self.sql(expression, "expression")
    + 727            return f"CLONE {this} {when} ({kind} => {expr})"
    + 728
    + 729        return f"CLONE {this}"
    + 730
    + 731    def describe_sql(self, expression: exp.Describe) -> str:
    + 732        return f"DESCRIBE {self.sql(expression, 'this')}"
    + 733
    + 734    def prepend_ctes(self, expression: exp.Expression, sql: str) -> str:
    + 735        with_ = self.sql(expression, "with")
    + 736        if with_:
    + 737            sql = f"{with_}{self.sep()}{sql}"
    + 738        return sql
    + 739
    + 740    def with_sql(self, expression: exp.With) -> str:
    + 741        sql = self.expressions(expression, flat=True)
    + 742        recursive = "RECURSIVE " if expression.args.get("recursive") else ""
    + 743
    + 744        return f"WITH {recursive}{sql}"
    + 745
    + 746    def cte_sql(self, expression: exp.CTE) -> str:
    + 747        alias = self.sql(expression, "alias")
    + 748        return f"{alias} AS {self.wrap(expression)}"
      749
    - 750    def describe_sql(self, expression: exp.Describe) -> str:
    - 751        return f"DESCRIBE {self.sql(expression, 'this')}"
    - 752
    - 753    def prepend_ctes(self, expression: exp.Expression, sql: str) -> str:
    - 754        with_ = self.sql(expression, "with")
    - 755        if with_:
    - 756            sql = f"{with_}{self.sep()}{sql}"
    - 757        return sql
    - 758
    - 759    def with_sql(self, expression: exp.With) -> str:
    - 760        sql = self.expressions(expression, flat=True)
    - 761        recursive = "RECURSIVE " if expression.args.get("recursive") else ""
    - 762
    - 763        return f"WITH {recursive}{sql}"
    - 764
    - 765    def cte_sql(self, expression: exp.CTE) -> str:
    - 766        alias = self.sql(expression, "alias")
    - 767        return f"{alias} AS {self.wrap(expression)}"
    - 768
    - 769    def tablealias_sql(self, expression: exp.TableAlias) -> str:
    - 770        alias = self.sql(expression, "this")
    - 771        columns = self.expressions(expression, key="columns", flat=True)
    - 772        columns = f"({columns})" if columns else ""
    - 773        return f"{alias}{columns}"
    - 774
    - 775    def bitstring_sql(self, expression: exp.BitString) -> str:
    - 776        this = self.sql(expression, "this")
    - 777        if self.bit_start:
    - 778            return f"{self.bit_start}{this}{self.bit_end}"
    - 779        return f"{int(this, 2)}"
    - 780
    - 781    def hexstring_sql(self, expression: exp.HexString) -> str:
    - 782        this = self.sql(expression, "this")
    - 783        if self.hex_start:
    - 784            return f"{self.hex_start}{this}{self.hex_end}"
    - 785        return f"{int(this, 16)}"
    - 786
    - 787    def bytestring_sql(self, expression: exp.ByteString) -> str:
    - 788        this = self.sql(expression, "this")
    - 789        if self.byte_start:
    - 790            return f"{self.byte_start}{this}{self.byte_end}"
    - 791        return this
    - 792
    - 793    def rawstring_sql(self, expression: exp.RawString) -> str:
    - 794        if self.raw_start:
    - 795            return f"{self.raw_start}{expression.name}{self.raw_end}"
    - 796        return self.sql(exp.Literal.string(expression.name.replace("\\", "\\\\")))
    - 797
    - 798    def datatypesize_sql(self, expression: exp.DataTypeSize) -> str:
    - 799        this = self.sql(expression, "this")
    - 800        specifier = self.sql(expression, "expression")
    - 801        specifier = f" {specifier}" if specifier else ""
    - 802        return f"{this}{specifier}"
    - 803
    - 804    def datatype_sql(self, expression: exp.DataType) -> str:
    - 805        type_value = expression.this
    - 806        type_sql = self.TYPE_MAPPING.get(type_value, type_value.value)
    - 807        nested = ""
    - 808        interior = self.expressions(expression, flat=True)
    - 809        values = ""
    - 810        if interior:
    - 811            if expression.args.get("nested"):
    - 812                nested = f"{self.STRUCT_DELIMITER[0]}{interior}{self.STRUCT_DELIMITER[1]}"
    - 813                if expression.args.get("values") is not None:
    - 814                    delimiters = ("[", "]") if type_value == exp.DataType.Type.ARRAY else ("(", ")")
    - 815                    values = self.expressions(expression, key="values", flat=True)
    - 816                    values = f"{delimiters[0]}{values}{delimiters[1]}"
    - 817            else:
    - 818                nested = f"({interior})"
    - 819
    - 820        return f"{type_sql}{nested}{values}"
    + 750    def tablealias_sql(self, expression: exp.TableAlias) -> str:
    + 751        alias = self.sql(expression, "this")
    + 752        columns = self.expressions(expression, key="columns", flat=True)
    + 753        columns = f"({columns})" if columns else ""
    + 754        return f"{alias}{columns}"
    + 755
    + 756    def bitstring_sql(self, expression: exp.BitString) -> str:
    + 757        this = self.sql(expression, "this")
    + 758        if self.BIT_START:
    + 759            return f"{self.BIT_START}{this}{self.BIT_END}"
    + 760        return f"{int(this, 2)}"
    + 761
    + 762    def hexstring_sql(self, expression: exp.HexString) -> str:
    + 763        this = self.sql(expression, "this")
    + 764        if self.HEX_START:
    + 765            return f"{self.HEX_START}{this}{self.HEX_END}"
    + 766        return f"{int(this, 16)}"
    + 767
    + 768    def bytestring_sql(self, expression: exp.ByteString) -> str:
    + 769        this = self.sql(expression, "this")
    + 770        if self.BYTE_START:
    + 771            return f"{self.BYTE_START}{this}{self.BYTE_END}"
    + 772        return this
    + 773
    + 774    def rawstring_sql(self, expression: exp.RawString) -> str:
    + 775        if self.RAW_START:
    + 776            return f"{self.RAW_START}{expression.name}{self.RAW_END}"
    + 777        return self.sql(exp.Literal.string(expression.name.replace("\\", "\\\\")))
    + 778
    + 779    def datatypesize_sql(self, expression: exp.DataTypeSize) -> str:
    + 780        this = self.sql(expression, "this")
    + 781        specifier = self.sql(expression, "expression")
    + 782        specifier = f" {specifier}" if specifier else ""
    + 783        return f"{this}{specifier}"
    + 784
    + 785    def datatype_sql(self, expression: exp.DataType) -> str:
    + 786        type_value = expression.this
    + 787        type_sql = self.TYPE_MAPPING.get(type_value, type_value.value)
    + 788        nested = ""
    + 789        interior = self.expressions(expression, flat=True)
    + 790        values = ""
    + 791        if interior:
    + 792            if expression.args.get("nested"):
    + 793                nested = f"{self.STRUCT_DELIMITER[0]}{interior}{self.STRUCT_DELIMITER[1]}"
    + 794                if expression.args.get("values") is not None:
    + 795                    delimiters = ("[", "]") if type_value == exp.DataType.Type.ARRAY else ("(", ")")
    + 796                    values = self.expressions(expression, key="values", flat=True)
    + 797                    values = f"{delimiters[0]}{values}{delimiters[1]}"
    + 798            else:
    + 799                nested = f"({interior})"
    + 800
    + 801        return f"{type_sql}{nested}{values}"
    + 802
    + 803    def directory_sql(self, expression: exp.Directory) -> str:
    + 804        local = "LOCAL " if expression.args.get("local") else ""
    + 805        row_format = self.sql(expression, "row_format")
    + 806        row_format = f" {row_format}" if row_format else ""
    + 807        return f"{local}DIRECTORY {self.sql(expression, 'this')}{row_format}"
    + 808
    + 809    def delete_sql(self, expression: exp.Delete) -> str:
    + 810        this = self.sql(expression, "this")
    + 811        this = f" FROM {this}" if this else ""
    + 812        using_sql = (
    + 813            f" USING {self.expressions(expression, key='using', sep=', USING ')}"
    + 814            if expression.args.get("using")
    + 815            else ""
    + 816        )
    + 817        where_sql = self.sql(expression, "where")
    + 818        returning = self.sql(expression, "returning")
    + 819        sql = f"DELETE{this}{using_sql}{where_sql}{returning}"
    + 820        return self.prepend_ctes(expression, sql)
      821
    - 822    def directory_sql(self, expression: exp.Directory) -> str:
    - 823        local = "LOCAL " if expression.args.get("local") else ""
    - 824        row_format = self.sql(expression, "row_format")
    - 825        row_format = f" {row_format}" if row_format else ""
    - 826        return f"{local}DIRECTORY {self.sql(expression, 'this')}{row_format}"
    - 827
    - 828    def delete_sql(self, expression: exp.Delete) -> str:
    - 829        this = self.sql(expression, "this")
    - 830        this = f" FROM {this}" if this else ""
    - 831        using_sql = (
    - 832            f" USING {self.expressions(expression, key='using', sep=', USING ')}"
    - 833            if expression.args.get("using")
    - 834            else ""
    - 835        )
    - 836        where_sql = self.sql(expression, "where")
    - 837        returning = self.sql(expression, "returning")
    - 838        sql = f"DELETE{this}{using_sql}{where_sql}{returning}"
    - 839        return self.prepend_ctes(expression, sql)
    + 822    def drop_sql(self, expression: exp.Drop) -> str:
    + 823        this = self.sql(expression, "this")
    + 824        kind = expression.args["kind"]
    + 825        exists_sql = " IF EXISTS " if expression.args.get("exists") else " "
    + 826        temporary = " TEMPORARY" if expression.args.get("temporary") else ""
    + 827        materialized = " MATERIALIZED" if expression.args.get("materialized") else ""
    + 828        cascade = " CASCADE" if expression.args.get("cascade") else ""
    + 829        constraints = " CONSTRAINTS" if expression.args.get("constraints") else ""
    + 830        purge = " PURGE" if expression.args.get("purge") else ""
    + 831        return (
    + 832            f"DROP{temporary}{materialized} {kind}{exists_sql}{this}{cascade}{constraints}{purge}"
    + 833        )
    + 834
    + 835    def except_sql(self, expression: exp.Except) -> str:
    + 836        return self.prepend_ctes(
    + 837            expression,
    + 838            self.set_operation(expression, self.except_op(expression)),
    + 839        )
      840
    - 841    def drop_sql(self, expression: exp.Drop) -> str:
    - 842        this = self.sql(expression, "this")
    - 843        kind = expression.args["kind"]
    - 844        exists_sql = " IF EXISTS " if expression.args.get("exists") else " "
    - 845        temporary = " TEMPORARY" if expression.args.get("temporary") else ""
    - 846        materialized = " MATERIALIZED" if expression.args.get("materialized") else ""
    - 847        cascade = " CASCADE" if expression.args.get("cascade") else ""
    - 848        constraints = " CONSTRAINTS" if expression.args.get("constraints") else ""
    - 849        purge = " PURGE" if expression.args.get("purge") else ""
    - 850        return (
    - 851            f"DROP{temporary}{materialized} {kind}{exists_sql}{this}{cascade}{constraints}{purge}"
    - 852        )
    + 841    def except_op(self, expression: exp.Except) -> str:
    + 842        return f"EXCEPT{'' if expression.args.get('distinct') else ' ALL'}"
    + 843
    + 844    def fetch_sql(self, expression: exp.Fetch) -> str:
    + 845        direction = expression.args.get("direction")
    + 846        direction = f" {direction.upper()}" if direction else ""
    + 847        count = expression.args.get("count")
    + 848        count = f" {count}" if count else ""
    + 849        if expression.args.get("percent"):
    + 850            count = f"{count} PERCENT"
    + 851        with_ties_or_only = "WITH TIES" if expression.args.get("with_ties") else "ONLY"
    + 852        return f"{self.seg('FETCH')}{direction}{count} ROWS {with_ties_or_only}"
      853
    - 854    def except_sql(self, expression: exp.Except) -> str:
    - 855        return self.prepend_ctes(
    - 856            expression,
    - 857            self.set_operation(expression, self.except_op(expression)),
    - 858        )
    - 859
    - 860    def except_op(self, expression: exp.Except) -> str:
    - 861        return f"EXCEPT{'' if expression.args.get('distinct') else ' ALL'}"
    - 862
    - 863    def fetch_sql(self, expression: exp.Fetch) -> str:
    - 864        direction = expression.args.get("direction")
    - 865        direction = f" {direction.upper()}" if direction else ""
    - 866        count = expression.args.get("count")
    - 867        count = f" {count}" if count else ""
    - 868        if expression.args.get("percent"):
    - 869            count = f"{count} PERCENT"
    - 870        with_ties_or_only = "WITH TIES" if expression.args.get("with_ties") else "ONLY"
    - 871        return f"{self.seg('FETCH')}{direction}{count} ROWS {with_ties_or_only}"
    - 872
    - 873    def filter_sql(self, expression: exp.Filter) -> str:
    - 874        this = self.sql(expression, "this")
    - 875        where = self.sql(expression, "expression")[1:]  # where has a leading space
    - 876        return f"{this} FILTER({where})"
    - 877
    - 878    def hint_sql(self, expression: exp.Hint) -> str:
    - 879        if self.sql(expression, "this"):
    - 880            self.unsupported("Hints are not supported")
    - 881        return ""
    - 882
    - 883    def index_sql(self, expression: exp.Index) -> str:
    - 884        unique = "UNIQUE " if expression.args.get("unique") else ""
    - 885        primary = "PRIMARY " if expression.args.get("primary") else ""
    - 886        amp = "AMP " if expression.args.get("amp") else ""
    - 887        name = f"{expression.name} " if expression.name else ""
    - 888        table = self.sql(expression, "table")
    - 889        table = f"{self.INDEX_ON} {table} " if table else ""
    - 890        index = "INDEX " if not table else ""
    - 891        columns = self.expressions(expression, key="columns", flat=True)
    - 892        partition_by = self.expressions(expression, key="partition_by", flat=True)
    - 893        partition_by = f" PARTITION BY {partition_by}" if partition_by else ""
    - 894        return f"{unique}{primary}{amp}{index}{name}{table}({columns}){partition_by}"
    - 895
    - 896    def identifier_sql(self, expression: exp.Identifier) -> str:
    - 897        text = expression.name
    - 898        lower = text.lower()
    - 899        text = lower if self.normalize and not expression.quoted else text
    - 900        text = text.replace(self.identifier_end, self._escaped_identifier_end)
    - 901        if (
    - 902            expression.quoted
    - 903            or should_identify(text, self.identify)
    - 904            or lower in self.RESERVED_KEYWORDS
    - 905            or (not self.identifiers_can_start_with_digit and text[:1].isdigit())
    - 906        ):
    - 907            text = f"{self.identifier_start}{text}{self.identifier_end}"
    - 908        return text
    - 909
    - 910    def inputoutputformat_sql(self, expression: exp.InputOutputFormat) -> str:
    - 911        input_format = self.sql(expression, "input_format")
    - 912        input_format = f"INPUTFORMAT {input_format}" if input_format else ""
    - 913        output_format = self.sql(expression, "output_format")
    - 914        output_format = f"OUTPUTFORMAT {output_format}" if output_format else ""
    - 915        return self.sep().join((input_format, output_format))
    - 916
    - 917    def national_sql(self, expression: exp.National, prefix: str = "N") -> str:
    - 918        string = self.sql(exp.Literal.string(expression.name))
    - 919        return f"{prefix}{string}"
    - 920
    - 921    def partition_sql(self, expression: exp.Partition) -> str:
    - 922        return f"PARTITION({self.expressions(expression)})"
    - 923
    - 924    def properties_sql(self, expression: exp.Properties) -> str:
    - 925        root_properties = []
    - 926        with_properties = []
    + 854    def filter_sql(self, expression: exp.Filter) -> str:
    + 855        this = self.sql(expression, "this")
    + 856        where = self.sql(expression, "expression")[1:]  # where has a leading space
    + 857        return f"{this} FILTER({where})"
    + 858
    + 859    def hint_sql(self, expression: exp.Hint) -> str:
    + 860        if self.sql(expression, "this"):
    + 861            self.unsupported("Hints are not supported")
    + 862        return ""
    + 863
    + 864    def index_sql(self, expression: exp.Index) -> str:
    + 865        unique = "UNIQUE " if expression.args.get("unique") else ""
    + 866        primary = "PRIMARY " if expression.args.get("primary") else ""
    + 867        amp = "AMP " if expression.args.get("amp") else ""
    + 868        name = f"{expression.name} " if expression.name else ""
    + 869        table = self.sql(expression, "table")
    + 870        table = f"{self.INDEX_ON} {table} " if table else ""
    + 871        using = self.sql(expression, "using")
    + 872        using = f"USING {using} " if using else ""
    + 873        index = "INDEX " if not table else ""
    + 874        columns = self.expressions(expression, key="columns", flat=True)
    + 875        columns = f"({columns})" if columns else ""
    + 876        partition_by = self.expressions(expression, key="partition_by", flat=True)
    + 877        partition_by = f" PARTITION BY {partition_by}" if partition_by else ""
    + 878        return f"{unique}{primary}{amp}{index}{name}{table}{using}{columns}{partition_by}"
    + 879
    + 880    def identifier_sql(self, expression: exp.Identifier) -> str:
    + 881        text = expression.name
    + 882        lower = text.lower()
    + 883        text = lower if self.normalize and not expression.quoted else text
    + 884        text = text.replace(self.IDENTIFIER_END, self._escaped_identifier_end)
    + 885        if (
    + 886            expression.quoted
    + 887            or should_identify(text, self.identify)
    + 888            or lower in self.RESERVED_KEYWORDS
    + 889            or (not self.IDENTIFIERS_CAN_START_WITH_DIGIT and text[:1].isdigit())
    + 890        ):
    + 891            text = f"{self.IDENTIFIER_START}{text}{self.IDENTIFIER_END}"
    + 892        return text
    + 893
    + 894    def inputoutputformat_sql(self, expression: exp.InputOutputFormat) -> str:
    + 895        input_format = self.sql(expression, "input_format")
    + 896        input_format = f"INPUTFORMAT {input_format}" if input_format else ""
    + 897        output_format = self.sql(expression, "output_format")
    + 898        output_format = f"OUTPUTFORMAT {output_format}" if output_format else ""
    + 899        return self.sep().join((input_format, output_format))
    + 900
    + 901    def national_sql(self, expression: exp.National, prefix: str = "N") -> str:
    + 902        string = self.sql(exp.Literal.string(expression.name))
    + 903        return f"{prefix}{string}"
    + 904
    + 905    def partition_sql(self, expression: exp.Partition) -> str:
    + 906        return f"PARTITION({self.expressions(expression)})"
    + 907
    + 908    def properties_sql(self, expression: exp.Properties) -> str:
    + 909        root_properties = []
    + 910        with_properties = []
    + 911
    + 912        for p in expression.expressions:
    + 913            p_loc = self.PROPERTIES_LOCATION[p.__class__]
    + 914            if p_loc == exp.Properties.Location.POST_WITH:
    + 915                with_properties.append(p)
    + 916            elif p_loc == exp.Properties.Location.POST_SCHEMA:
    + 917                root_properties.append(p)
    + 918
    + 919        return self.root_properties(
    + 920            exp.Properties(expressions=root_properties)
    + 921        ) + self.with_properties(exp.Properties(expressions=with_properties))
    + 922
    + 923    def root_properties(self, properties: exp.Properties) -> str:
    + 924        if properties.expressions:
    + 925            return self.sep() + self.expressions(properties, indent=False, sep=" ")
    + 926        return ""
      927
    - 928        for p in expression.expressions:
    - 929            p_loc = self.PROPERTIES_LOCATION[p.__class__]
    - 930            if p_loc == exp.Properties.Location.POST_WITH:
    - 931                with_properties.append(p)
    - 932            elif p_loc == exp.Properties.Location.POST_SCHEMA:
    - 933                root_properties.append(p)
    - 934
    - 935        return self.root_properties(
    - 936            exp.Properties(expressions=root_properties)
    - 937        ) + self.with_properties(exp.Properties(expressions=with_properties))
    - 938
    - 939    def root_properties(self, properties: exp.Properties) -> str:
    - 940        if properties.expressions:
    - 941            return self.sep() + self.expressions(properties, indent=False, sep=" ")
    - 942        return ""
    - 943
    - 944    def properties(
    - 945        self,
    - 946        properties: exp.Properties,
    - 947        prefix: str = "",
    - 948        sep: str = ", ",
    - 949        suffix: str = "",
    - 950        wrapped: bool = True,
    - 951    ) -> str:
    - 952        if properties.expressions:
    - 953            expressions = self.expressions(properties, sep=sep, indent=False)
    - 954            expressions = self.wrap(expressions) if wrapped else expressions
    - 955            return f"{prefix}{' ' if prefix and prefix != ' ' else ''}{expressions}{suffix}"
    - 956        return ""
    - 957
    - 958    def with_properties(self, properties: exp.Properties) -> str:
    - 959        return self.properties(properties, prefix=self.seg("WITH"))
    - 960
    - 961    def locate_properties(
    - 962        self, properties: exp.Properties
    - 963    ) -> t.Dict[exp.Properties.Location, list[exp.Property]]:
    - 964        properties_locs: t.Dict[exp.Properties.Location, list[exp.Property]] = {
    - 965            key: [] for key in exp.Properties.Location
    - 966        }
    - 967
    - 968        for p in properties.expressions:
    - 969            p_loc = self.PROPERTIES_LOCATION[p.__class__]
    - 970            if p_loc == exp.Properties.Location.POST_NAME:
    - 971                properties_locs[exp.Properties.Location.POST_NAME].append(p)
    - 972            elif p_loc == exp.Properties.Location.POST_INDEX:
    - 973                properties_locs[exp.Properties.Location.POST_INDEX].append(p)
    - 974            elif p_loc == exp.Properties.Location.POST_SCHEMA:
    - 975                properties_locs[exp.Properties.Location.POST_SCHEMA].append(p)
    - 976            elif p_loc == exp.Properties.Location.POST_WITH:
    - 977                properties_locs[exp.Properties.Location.POST_WITH].append(p)
    - 978            elif p_loc == exp.Properties.Location.POST_CREATE:
    - 979                properties_locs[exp.Properties.Location.POST_CREATE].append(p)
    - 980            elif p_loc == exp.Properties.Location.POST_ALIAS:
    - 981                properties_locs[exp.Properties.Location.POST_ALIAS].append(p)
    - 982            elif p_loc == exp.Properties.Location.POST_EXPRESSION:
    - 983                properties_locs[exp.Properties.Location.POST_EXPRESSION].append(p)
    - 984            elif p_loc == exp.Properties.Location.UNSUPPORTED:
    - 985                self.unsupported(f"Unsupported property {p.key}")
    - 986
    - 987        return properties_locs
    + 928    def properties(
    + 929        self,
    + 930        properties: exp.Properties,
    + 931        prefix: str = "",
    + 932        sep: str = ", ",
    + 933        suffix: str = "",
    + 934        wrapped: bool = True,
    + 935    ) -> str:
    + 936        if properties.expressions:
    + 937            expressions = self.expressions(properties, sep=sep, indent=False)
    + 938            expressions = self.wrap(expressions) if wrapped else expressions
    + 939            return f"{prefix}{' ' if prefix and prefix != ' ' else ''}{expressions}{suffix}"
    + 940        return ""
    + 941
    + 942    def with_properties(self, properties: exp.Properties) -> str:
    + 943        return self.properties(properties, prefix=self.seg("WITH"))
    + 944
    + 945    def locate_properties(
    + 946        self, properties: exp.Properties
    + 947    ) -> t.Dict[exp.Properties.Location, list[exp.Property]]:
    + 948        properties_locs: t.Dict[exp.Properties.Location, list[exp.Property]] = {
    + 949            key: [] for key in exp.Properties.Location
    + 950        }
    + 951
    + 952        for p in properties.expressions:
    + 953            p_loc = self.PROPERTIES_LOCATION[p.__class__]
    + 954            if p_loc == exp.Properties.Location.POST_NAME:
    + 955                properties_locs[exp.Properties.Location.POST_NAME].append(p)
    + 956            elif p_loc == exp.Properties.Location.POST_INDEX:
    + 957                properties_locs[exp.Properties.Location.POST_INDEX].append(p)
    + 958            elif p_loc == exp.Properties.Location.POST_SCHEMA:
    + 959                properties_locs[exp.Properties.Location.POST_SCHEMA].append(p)
    + 960            elif p_loc == exp.Properties.Location.POST_WITH:
    + 961                properties_locs[exp.Properties.Location.POST_WITH].append(p)
    + 962            elif p_loc == exp.Properties.Location.POST_CREATE:
    + 963                properties_locs[exp.Properties.Location.POST_CREATE].append(p)
    + 964            elif p_loc == exp.Properties.Location.POST_ALIAS:
    + 965                properties_locs[exp.Properties.Location.POST_ALIAS].append(p)
    + 966            elif p_loc == exp.Properties.Location.POST_EXPRESSION:
    + 967                properties_locs[exp.Properties.Location.POST_EXPRESSION].append(p)
    + 968            elif p_loc == exp.Properties.Location.UNSUPPORTED:
    + 969                self.unsupported(f"Unsupported property {p.key}")
    + 970
    + 971        return properties_locs
    + 972
    + 973    def property_sql(self, expression: exp.Property) -> str:
    + 974        property_cls = expression.__class__
    + 975        if property_cls == exp.Property:
    + 976            return f"{expression.name}={self.sql(expression, 'value')}"
    + 977
    + 978        property_name = exp.Properties.PROPERTY_TO_NAME.get(property_cls)
    + 979        if not property_name:
    + 980            self.unsupported(f"Unsupported property {expression.key}")
    + 981
    + 982        return f"{property_name}={self.sql(expression, 'this')}"
    + 983
    + 984    def likeproperty_sql(self, expression: exp.LikeProperty) -> str:
    + 985        options = " ".join(f"{e.name} {self.sql(e, 'value')}" for e in expression.expressions)
    + 986        options = f" {options}" if options else ""
    + 987        return f"LIKE {self.sql(expression, 'this')}{options}"
      988
    - 989    def property_sql(self, expression: exp.Property) -> str:
    - 990        property_cls = expression.__class__
    - 991        if property_cls == exp.Property:
    - 992            return f"{expression.name}={self.sql(expression, 'value')}"
    + 989    def fallbackproperty_sql(self, expression: exp.FallbackProperty) -> str:
    + 990        no = "NO " if expression.args.get("no") else ""
    + 991        protection = " PROTECTION" if expression.args.get("protection") else ""
    + 992        return f"{no}FALLBACK{protection}"
      993
    - 994        property_name = exp.Properties.PROPERTY_TO_NAME.get(property_cls)
    - 995        if not property_name:
    - 996            self.unsupported(f"Unsupported property {expression.key}")
    - 997
    - 998        return f"{property_name}={self.sql(expression, 'this')}"
    - 999
    -1000    def likeproperty_sql(self, expression: exp.LikeProperty) -> str:
    -1001        options = " ".join(f"{e.name} {self.sql(e, 'value')}" for e in expression.expressions)
    -1002        options = f" {options}" if options else ""
    -1003        return f"LIKE {self.sql(expression, 'this')}{options}"
    -1004
    -1005    def fallbackproperty_sql(self, expression: exp.FallbackProperty) -> str:
    -1006        no = "NO " if expression.args.get("no") else ""
    -1007        protection = " PROTECTION" if expression.args.get("protection") else ""
    -1008        return f"{no}FALLBACK{protection}"
    -1009
    -1010    def journalproperty_sql(self, expression: exp.JournalProperty) -> str:
    -1011        no = "NO " if expression.args.get("no") else ""
    -1012        local = expression.args.get("local")
    -1013        local = f"{local} " if local else ""
    -1014        dual = "DUAL " if expression.args.get("dual") else ""
    -1015        before = "BEFORE " if expression.args.get("before") else ""
    -1016        after = "AFTER " if expression.args.get("after") else ""
    -1017        return f"{no}{local}{dual}{before}{after}JOURNAL"
    -1018
    -1019    def freespaceproperty_sql(self, expression: exp.FreespaceProperty) -> str:
    -1020        freespace = self.sql(expression, "this")
    -1021        percent = " PERCENT" if expression.args.get("percent") else ""
    -1022        return f"FREESPACE={freespace}{percent}"
    -1023
    -1024    def checksumproperty_sql(self, expression: exp.ChecksumProperty) -> str:
    -1025        if expression.args.get("default"):
    -1026            property = "DEFAULT"
    -1027        elif expression.args.get("on"):
    -1028            property = "ON"
    -1029        else:
    -1030            property = "OFF"
    -1031        return f"CHECKSUM={property}"
    -1032
    -1033    def mergeblockratioproperty_sql(self, expression: exp.MergeBlockRatioProperty) -> str:
    -1034        if expression.args.get("no"):
    -1035            return "NO MERGEBLOCKRATIO"
    -1036        if expression.args.get("default"):
    -1037            return "DEFAULT MERGEBLOCKRATIO"
    -1038
    -1039        percent = " PERCENT" if expression.args.get("percent") else ""
    -1040        return f"MERGEBLOCKRATIO={self.sql(expression, 'this')}{percent}"
    + 994    def journalproperty_sql(self, expression: exp.JournalProperty) -> str:
    + 995        no = "NO " if expression.args.get("no") else ""
    + 996        local = expression.args.get("local")
    + 997        local = f"{local} " if local else ""
    + 998        dual = "DUAL " if expression.args.get("dual") else ""
    + 999        before = "BEFORE " if expression.args.get("before") else ""
    +1000        after = "AFTER " if expression.args.get("after") else ""
    +1001        return f"{no}{local}{dual}{before}{after}JOURNAL"
    +1002
    +1003    def freespaceproperty_sql(self, expression: exp.FreespaceProperty) -> str:
    +1004        freespace = self.sql(expression, "this")
    +1005        percent = " PERCENT" if expression.args.get("percent") else ""
    +1006        return f"FREESPACE={freespace}{percent}"
    +1007
    +1008    def checksumproperty_sql(self, expression: exp.ChecksumProperty) -> str:
    +1009        if expression.args.get("default"):
    +1010            property = "DEFAULT"
    +1011        elif expression.args.get("on"):
    +1012            property = "ON"
    +1013        else:
    +1014            property = "OFF"
    +1015        return f"CHECKSUM={property}"
    +1016
    +1017    def mergeblockratioproperty_sql(self, expression: exp.MergeBlockRatioProperty) -> str:
    +1018        if expression.args.get("no"):
    +1019            return "NO MERGEBLOCKRATIO"
    +1020        if expression.args.get("default"):
    +1021            return "DEFAULT MERGEBLOCKRATIO"
    +1022
    +1023        percent = " PERCENT" if expression.args.get("percent") else ""
    +1024        return f"MERGEBLOCKRATIO={self.sql(expression, 'this')}{percent}"
    +1025
    +1026    def datablocksizeproperty_sql(self, expression: exp.DataBlocksizeProperty) -> str:
    +1027        default = expression.args.get("default")
    +1028        minimum = expression.args.get("minimum")
    +1029        maximum = expression.args.get("maximum")
    +1030        if default or minimum or maximum:
    +1031            if default:
    +1032                prop = "DEFAULT"
    +1033            elif minimum:
    +1034                prop = "MINIMUM"
    +1035            else:
    +1036                prop = "MAXIMUM"
    +1037            return f"{prop} DATABLOCKSIZE"
    +1038        units = expression.args.get("units")
    +1039        units = f" {units}" if units else ""
    +1040        return f"DATABLOCKSIZE={self.sql(expression, 'size')}{units}"
     1041
    -1042    def datablocksizeproperty_sql(self, expression: exp.DataBlocksizeProperty) -> str:
    -1043        default = expression.args.get("default")
    -1044        minimum = expression.args.get("minimum")
    -1045        maximum = expression.args.get("maximum")
    -1046        if default or minimum or maximum:
    -1047            if default:
    -1048                prop = "DEFAULT"
    -1049            elif minimum:
    -1050                prop = "MINIMUM"
    -1051            else:
    -1052                prop = "MAXIMUM"
    -1053            return f"{prop} DATABLOCKSIZE"
    -1054        units = expression.args.get("units")
    -1055        units = f" {units}" if units else ""
    -1056        return f"DATABLOCKSIZE={self.sql(expression, 'size')}{units}"
    -1057
    -1058    def blockcompressionproperty_sql(self, expression: exp.BlockCompressionProperty) -> str:
    -1059        autotemp = expression.args.get("autotemp")
    -1060        always = expression.args.get("always")
    -1061        default = expression.args.get("default")
    -1062        manual = expression.args.get("manual")
    -1063        never = expression.args.get("never")
    -1064
    -1065        if autotemp is not None:
    -1066            prop = f"AUTOTEMP({self.expressions(autotemp)})"
    -1067        elif always:
    -1068            prop = "ALWAYS"
    -1069        elif default:
    -1070            prop = "DEFAULT"
    -1071        elif manual:
    -1072            prop = "MANUAL"
    -1073        elif never:
    -1074            prop = "NEVER"
    -1075        return f"BLOCKCOMPRESSION={prop}"
    -1076
    -1077    def isolatedloadingproperty_sql(self, expression: exp.IsolatedLoadingProperty) -> str:
    -1078        no = expression.args.get("no")
    -1079        no = " NO" if no else ""
    -1080        concurrent = expression.args.get("concurrent")
    -1081        concurrent = " CONCURRENT" if concurrent else ""
    -1082
    -1083        for_ = ""
    -1084        if expression.args.get("for_all"):
    -1085            for_ = " FOR ALL"
    -1086        elif expression.args.get("for_insert"):
    -1087            for_ = " FOR INSERT"
    -1088        elif expression.args.get("for_none"):
    -1089            for_ = " FOR NONE"
    -1090        return f"WITH{no}{concurrent} ISOLATED LOADING{for_}"
    +1042    def blockcompressionproperty_sql(self, expression: exp.BlockCompressionProperty) -> str:
    +1043        autotemp = expression.args.get("autotemp")
    +1044        always = expression.args.get("always")
    +1045        default = expression.args.get("default")
    +1046        manual = expression.args.get("manual")
    +1047        never = expression.args.get("never")
    +1048
    +1049        if autotemp is not None:
    +1050            prop = f"AUTOTEMP({self.expressions(autotemp)})"
    +1051        elif always:
    +1052            prop = "ALWAYS"
    +1053        elif default:
    +1054            prop = "DEFAULT"
    +1055        elif manual:
    +1056            prop = "MANUAL"
    +1057        elif never:
    +1058            prop = "NEVER"
    +1059        return f"BLOCKCOMPRESSION={prop}"
    +1060
    +1061    def isolatedloadingproperty_sql(self, expression: exp.IsolatedLoadingProperty) -> str:
    +1062        no = expression.args.get("no")
    +1063        no = " NO" if no else ""
    +1064        concurrent = expression.args.get("concurrent")
    +1065        concurrent = " CONCURRENT" if concurrent else ""
    +1066
    +1067        for_ = ""
    +1068        if expression.args.get("for_all"):
    +1069            for_ = " FOR ALL"
    +1070        elif expression.args.get("for_insert"):
    +1071            for_ = " FOR INSERT"
    +1072        elif expression.args.get("for_none"):
    +1073            for_ = " FOR NONE"
    +1074        return f"WITH{no}{concurrent} ISOLATED LOADING{for_}"
    +1075
    +1076    def lockingproperty_sql(self, expression: exp.LockingProperty) -> str:
    +1077        kind = expression.args.get("kind")
    +1078        this = f" {self.sql(expression, 'this')}" if expression.this else ""
    +1079        for_or_in = expression.args.get("for_or_in")
    +1080        lock_type = expression.args.get("lock_type")
    +1081        override = " OVERRIDE" if expression.args.get("override") else ""
    +1082        return f"LOCKING {kind}{this} {for_or_in} {lock_type}{override}"
    +1083
    +1084    def withdataproperty_sql(self, expression: exp.WithDataProperty) -> str:
    +1085        data_sql = f"WITH {'NO ' if expression.args.get('no') else ''}DATA"
    +1086        statistics = expression.args.get("statistics")
    +1087        statistics_sql = ""
    +1088        if statistics is not None:
    +1089            statistics_sql = f" AND {'NO ' if not statistics else ''}STATISTICS"
    +1090        return f"{data_sql}{statistics_sql}"
     1091
    -1092    def lockingproperty_sql(self, expression: exp.LockingProperty) -> str:
    -1093        kind = expression.args.get("kind")
    -1094        this = f" {self.sql(expression, 'this')}" if expression.this else ""
    -1095        for_or_in = expression.args.get("for_or_in")
    -1096        lock_type = expression.args.get("lock_type")
    -1097        override = " OVERRIDE" if expression.args.get("override") else ""
    -1098        return f"LOCKING {kind}{this} {for_or_in} {lock_type}{override}"
    +1092    def insert_sql(self, expression: exp.Insert) -> str:
    +1093        overwrite = expression.args.get("overwrite")
    +1094
    +1095        if isinstance(expression.this, exp.Directory):
    +1096            this = "OVERWRITE " if overwrite else "INTO "
    +1097        else:
    +1098            this = "OVERWRITE TABLE " if overwrite else "INTO "
     1099
    -1100    def withdataproperty_sql(self, expression: exp.WithDataProperty) -> str:
    -1101        data_sql = f"WITH {'NO ' if expression.args.get('no') else ''}DATA"
    -1102        statistics = expression.args.get("statistics")
    -1103        statistics_sql = ""
    -1104        if statistics is not None:
    -1105            statistics_sql = f" AND {'NO ' if not statistics else ''}STATISTICS"
    -1106        return f"{data_sql}{statistics_sql}"
    -1107
    -1108    def insert_sql(self, expression: exp.Insert) -> str:
    -1109        overwrite = expression.args.get("overwrite")
    -1110
    -1111        if isinstance(expression.this, exp.Directory):
    -1112            this = "OVERWRITE " if overwrite else "INTO "
    -1113        else:
    -1114            this = "OVERWRITE TABLE " if overwrite else "INTO "
    -1115
    -1116        alternative = expression.args.get("alternative")
    -1117        alternative = f" OR {alternative} " if alternative else " "
    -1118        this = f"{this}{self.sql(expression, 'this')}"
    -1119
    -1120        exists = " IF EXISTS " if expression.args.get("exists") else " "
    -1121        partition_sql = (
    -1122            self.sql(expression, "partition") if expression.args.get("partition") else ""
    -1123        )
    -1124        expression_sql = self.sql(expression, "expression")
    -1125        conflict = self.sql(expression, "conflict")
    -1126        returning = self.sql(expression, "returning")
    -1127        sep = self.sep() if partition_sql else ""
    -1128        sql = f"INSERT{alternative}{this}{exists}{partition_sql}{sep}{expression_sql}{conflict}{returning}"
    -1129        return self.prepend_ctes(expression, sql)
    -1130
    -1131    def intersect_sql(self, expression: exp.Intersect) -> str:
    -1132        return self.prepend_ctes(
    -1133            expression,
    -1134            self.set_operation(expression, self.intersect_op(expression)),
    -1135        )
    -1136
    -1137    def intersect_op(self, expression: exp.Intersect) -> str:
    -1138        return f"INTERSECT{'' if expression.args.get('distinct') else ' ALL'}"
    -1139
    -1140    def introducer_sql(self, expression: exp.Introducer) -> str:
    -1141        return f"{self.sql(expression, 'this')} {self.sql(expression, 'expression')}"
    +1100        alternative = expression.args.get("alternative")
    +1101        alternative = f" OR {alternative} " if alternative else " "
    +1102        this = f"{this}{self.sql(expression, 'this')}"
    +1103
    +1104        exists = " IF EXISTS " if expression.args.get("exists") else " "
    +1105        partition_sql = (
    +1106            self.sql(expression, "partition") if expression.args.get("partition") else ""
    +1107        )
    +1108        expression_sql = self.sql(expression, "expression")
    +1109        conflict = self.sql(expression, "conflict")
    +1110        returning = self.sql(expression, "returning")
    +1111        sep = self.sep() if partition_sql else ""
    +1112        sql = f"INSERT{alternative}{this}{exists}{partition_sql}{sep}{expression_sql}{conflict}{returning}"
    +1113        return self.prepend_ctes(expression, sql)
    +1114
    +1115    def intersect_sql(self, expression: exp.Intersect) -> str:
    +1116        return self.prepend_ctes(
    +1117            expression,
    +1118            self.set_operation(expression, self.intersect_op(expression)),
    +1119        )
    +1120
    +1121    def intersect_op(self, expression: exp.Intersect) -> str:
    +1122        return f"INTERSECT{'' if expression.args.get('distinct') else ' ALL'}"
    +1123
    +1124    def introducer_sql(self, expression: exp.Introducer) -> str:
    +1125        return f"{self.sql(expression, 'this')} {self.sql(expression, 'expression')}"
    +1126
    +1127    def pseudotype_sql(self, expression: exp.PseudoType) -> str:
    +1128        return expression.name.upper()
    +1129
    +1130    def onconflict_sql(self, expression: exp.OnConflict) -> str:
    +1131        conflict = "ON DUPLICATE KEY" if expression.args.get("duplicate") else "ON CONFLICT"
    +1132        constraint = self.sql(expression, "constraint")
    +1133        if constraint:
    +1134            constraint = f"ON CONSTRAINT {constraint}"
    +1135        key = self.expressions(expression, key="key", flat=True)
    +1136        do = "" if expression.args.get("duplicate") else " DO "
    +1137        nothing = "NOTHING" if expression.args.get("nothing") else ""
    +1138        expressions = self.expressions(expression, flat=True)
    +1139        if expressions:
    +1140            expressions = f"UPDATE SET {expressions}"
    +1141        return f"{self.seg(conflict)} {constraint}{key}{do}{nothing}{expressions}"
     1142
    -1143    def pseudotype_sql(self, expression: exp.PseudoType) -> str:
    -1144        return expression.name.upper()
    +1143    def returning_sql(self, expression: exp.Returning) -> str:
    +1144        return f"{self.seg('RETURNING')} {self.expressions(expression, flat=True)}"
     1145
    -1146    def onconflict_sql(self, expression: exp.OnConflict) -> str:
    -1147        conflict = "ON DUPLICATE KEY" if expression.args.get("duplicate") else "ON CONFLICT"
    -1148        constraint = self.sql(expression, "constraint")
    -1149        if constraint:
    -1150            constraint = f"ON CONSTRAINT {constraint}"
    -1151        key = self.expressions(expression, key="key", flat=True)
    -1152        do = "" if expression.args.get("duplicate") else " DO "
    -1153        nothing = "NOTHING" if expression.args.get("nothing") else ""
    -1154        expressions = self.expressions(expression, flat=True)
    -1155        if expressions:
    -1156            expressions = f"UPDATE SET {expressions}"
    -1157        return f"{self.seg(conflict)} {constraint}{key}{do}{nothing}{expressions}"
    -1158
    -1159    def returning_sql(self, expression: exp.Returning) -> str:
    -1160        return f"{self.seg('RETURNING')} {self.expressions(expression, flat=True)}"
    -1161
    -1162    def rowformatdelimitedproperty_sql(self, expression: exp.RowFormatDelimitedProperty) -> str:
    -1163        fields = expression.args.get("fields")
    -1164        fields = f" FIELDS TERMINATED BY {fields}" if fields else ""
    -1165        escaped = expression.args.get("escaped")
    -1166        escaped = f" ESCAPED BY {escaped}" if escaped else ""
    -1167        items = expression.args.get("collection_items")
    -1168        items = f" COLLECTION ITEMS TERMINATED BY {items}" if items else ""
    -1169        keys = expression.args.get("map_keys")
    -1170        keys = f" MAP KEYS TERMINATED BY {keys}" if keys else ""
    -1171        lines = expression.args.get("lines")
    -1172        lines = f" LINES TERMINATED BY {lines}" if lines else ""
    -1173        null = expression.args.get("null")
    -1174        null = f" NULL DEFINED AS {null}" if null else ""
    -1175        return f"ROW FORMAT DELIMITED{fields}{escaped}{items}{keys}{lines}{null}"
    -1176
    -1177    def table_sql(self, expression: exp.Table, sep: str = " AS ") -> str:
    -1178        table = ".".join(
    -1179            part
    -1180            for part in [
    -1181                self.sql(expression, "catalog"),
    -1182                self.sql(expression, "db"),
    -1183                self.sql(expression, "this"),
    -1184            ]
    -1185            if part
    -1186        )
    -1187
    -1188        alias = self.sql(expression, "alias")
    -1189        alias = f"{sep}{alias}" if alias else ""
    -1190        hints = self.expressions(expression, key="hints", flat=True)
    -1191        hints = f" WITH ({hints})" if hints and self.TABLE_HINTS else ""
    -1192        pivots = self.expressions(expression, key="pivots", sep=" ", flat=True)
    -1193        pivots = f" {pivots}" if pivots else ""
    -1194        joins = self.expressions(expression, key="joins", sep="")
    -1195        laterals = self.expressions(expression, key="laterals", sep="")
    -1196        system_time = expression.args.get("system_time")
    -1197        system_time = f" {self.sql(expression, 'system_time')}" if system_time else ""
    -1198
    -1199        return f"{table}{system_time}{alias}{hints}{pivots}{joins}{laterals}"
    -1200
    -1201    def tablesample_sql(
    -1202        self, expression: exp.TableSample, seed_prefix: str = "SEED", sep=" AS "
    -1203    ) -> str:
    -1204        if self.alias_post_tablesample and expression.this.alias:
    -1205            table = expression.this.copy()
    -1206            table.set("alias", None)
    -1207            this = self.sql(table)
    -1208            alias = f"{sep}{self.sql(expression.this, 'alias')}"
    -1209        else:
    -1210            this = self.sql(expression, "this")
    -1211            alias = ""
    -1212        method = self.sql(expression, "method")
    -1213        method = f"{method.upper()} " if method and self.TABLESAMPLE_WITH_METHOD else ""
    -1214        numerator = self.sql(expression, "bucket_numerator")
    -1215        denominator = self.sql(expression, "bucket_denominator")
    -1216        field = self.sql(expression, "bucket_field")
    -1217        field = f" ON {field}" if field else ""
    -1218        bucket = f"BUCKET {numerator} OUT OF {denominator}{field}" if numerator else ""
    -1219        percent = self.sql(expression, "percent")
    -1220        percent = f"{percent} PERCENT" if percent else ""
    -1221        rows = self.sql(expression, "rows")
    -1222        rows = f"{rows} ROWS" if rows else ""
    -1223        size = self.sql(expression, "size")
    -1224        if size and self.TABLESAMPLE_SIZE_IS_PERCENT:
    -1225            size = f"{size} PERCENT"
    -1226        seed = self.sql(expression, "seed")
    -1227        seed = f" {seed_prefix} ({seed})" if seed else ""
    -1228        kind = expression.args.get("kind", "TABLESAMPLE")
    -1229        return f"{this} {kind} {method}({bucket}{percent}{rows}{size}){seed}{alias}"
    -1230
    -1231    def pivot_sql(self, expression: exp.Pivot) -> str:
    -1232        expressions = self.expressions(expression, flat=True)
    -1233
    -1234        if expression.this:
    -1235            this = self.sql(expression, "this")
    -1236            on = f"{self.seg('ON')} {expressions}"
    -1237            using = self.expressions(expression, key="using", flat=True)
    -1238            using = f"{self.seg('USING')} {using}" if using else ""
    -1239            group = self.sql(expression, "group")
    -1240            return f"PIVOT {this}{on}{using}{group}"
    -1241
    -1242        alias = self.sql(expression, "alias")
    -1243        alias = f" AS {alias}" if alias else ""
    -1244        unpivot = expression.args.get("unpivot")
    -1245        direction = "UNPIVOT" if unpivot else "PIVOT"
    -1246        field = self.sql(expression, "field")
    -1247        return f"{direction}({expressions} FOR {field}){alias}"
    -1248
    -1249    def tuple_sql(self, expression: exp.Tuple) -> str:
    -1250        return f"({self.expressions(expression, flat=True)})"
    -1251
    -1252    def update_sql(self, expression: exp.Update) -> str:
    -1253        this = self.sql(expression, "this")
    -1254        set_sql = self.expressions(expression, flat=True)
    -1255        from_sql = self.sql(expression, "from")
    -1256        where_sql = self.sql(expression, "where")
    -1257        returning = self.sql(expression, "returning")
    -1258        sql = f"UPDATE {this} SET {set_sql}{from_sql}{where_sql}{returning}"
    -1259        return self.prepend_ctes(expression, sql)
    -1260
    -1261    def values_sql(self, expression: exp.Values) -> str:
    -1262        args = self.expressions(expression)
    -1263        alias = self.sql(expression, "alias")
    -1264        values = f"VALUES{self.seg('')}{args}"
    -1265        values = (
    -1266            f"({values})"
    -1267            if self.WRAP_DERIVED_VALUES and (alias or isinstance(expression.parent, exp.From))
    -1268            else values
    -1269        )
    -1270        return f"{values} AS {alias}" if alias else values
    -1271
    -1272    def var_sql(self, expression: exp.Var) -> str:
    -1273        return self.sql(expression, "this")
    -1274
    -1275    def into_sql(self, expression: exp.Into) -> str:
    -1276        temporary = " TEMPORARY" if expression.args.get("temporary") else ""
    -1277        unlogged = " UNLOGGED" if expression.args.get("unlogged") else ""
    -1278        return f"{self.seg('INTO')}{temporary or unlogged} {self.sql(expression, 'this')}"
    -1279
    -1280    def from_sql(self, expression: exp.From) -> str:
    -1281        return f"{self.seg('FROM')} {self.sql(expression, 'this')}"
    -1282
    -1283    def group_sql(self, expression: exp.Group) -> str:
    -1284        group_by = self.op_expressions("GROUP BY", expression)
    -1285        grouping_sets = self.expressions(expression, key="grouping_sets", indent=False)
    -1286        grouping_sets = (
    -1287            f"{self.seg('GROUPING SETS')} {self.wrap(grouping_sets)}" if grouping_sets else ""
    -1288        )
    -1289
    -1290        cube = expression.args.get("cube", [])
    -1291        if seq_get(cube, 0) is True:
    -1292            return f"{group_by}{self.seg('WITH CUBE')}"
    -1293        else:
    -1294            cube_sql = self.expressions(expression, key="cube", indent=False)
    -1295            cube_sql = f"{self.seg('CUBE')} {self.wrap(cube_sql)}" if cube_sql else ""
    -1296
    -1297        rollup = expression.args.get("rollup", [])
    -1298        if seq_get(rollup, 0) is True:
    -1299            return f"{group_by}{self.seg('WITH ROLLUP')}"
    -1300        else:
    -1301            rollup_sql = self.expressions(expression, key="rollup", indent=False)
    -1302            rollup_sql = f"{self.seg('ROLLUP')} {self.wrap(rollup_sql)}" if rollup_sql else ""
    -1303
    -1304        groupings = csv(
    -1305            grouping_sets,
    -1306            cube_sql,
    -1307            rollup_sql,
    -1308            self.seg("WITH TOTALS") if expression.args.get("totals") else "",
    -1309            sep=self.GROUPINGS_SEP,
    -1310        )
    -1311
    -1312        if expression.args.get("expressions") and groupings:
    -1313            group_by = f"{group_by}{self.GROUPINGS_SEP}"
    -1314
    -1315        return f"{group_by}{groupings}"
    -1316
    -1317    def having_sql(self, expression: exp.Having) -> str:
    -1318        this = self.indent(self.sql(expression, "this"))
    -1319        return f"{self.seg('HAVING')}{self.sep()}{this}"
    -1320
    -1321    def join_sql(self, expression: exp.Join) -> str:
    -1322        op_sql = " ".join(
    -1323            op
    -1324            for op in (
    -1325                expression.method,
    -1326                "GLOBAL" if expression.args.get("global") else None,
    -1327                expression.side,
    -1328                expression.kind,
    -1329                expression.hint if self.JOIN_HINTS else None,
    -1330            )
    -1331            if op
    -1332        )
    -1333        on_sql = self.sql(expression, "on")
    -1334        using = expression.args.get("using")
    -1335
    -1336        if not on_sql and using:
    -1337            on_sql = csv(*(self.sql(column) for column in using))
    -1338
    -1339        this_sql = self.sql(expression, "this")
    -1340
    -1341        if on_sql:
    -1342            on_sql = self.indent(on_sql, skip_first=True)
    -1343            space = self.seg(" " * self.pad) if self.pretty else " "
    -1344            if using:
    -1345                on_sql = f"{space}USING ({on_sql})"
    -1346            else:
    -1347                on_sql = f"{space}ON {on_sql}"
    -1348        elif not op_sql:
    -1349            return f", {this_sql}"
    -1350
    -1351        op_sql = f"{op_sql} JOIN" if op_sql else "JOIN"
    -1352        return f"{self.seg(op_sql)} {this_sql}{on_sql}"
    -1353
    -1354    def lambda_sql(self, expression: exp.Lambda, arrow_sep: str = "->") -> str:
    -1355        args = self.expressions(expression, flat=True)
    -1356        args = f"({args})" if len(args.split(",")) > 1 else args
    -1357        return f"{args} {arrow_sep} {self.sql(expression, 'this')}"
    -1358
    -1359    def lateral_sql(self, expression: exp.Lateral) -> str:
    -1360        this = self.sql(expression, "this")
    -1361
    -1362        if isinstance(expression.this, exp.Subquery):
    -1363            return f"LATERAL {this}"
    -1364
    -1365        if expression.args.get("view"):
    -1366            alias = expression.args["alias"]
    -1367            columns = self.expressions(alias, key="columns", flat=True)
    -1368            table = f" {alias.name}" if alias.name else ""
    -1369            columns = f" AS {columns}" if columns else ""
    -1370            op_sql = self.seg(f"LATERAL VIEW{' OUTER' if expression.args.get('outer') else ''}")
    -1371            return f"{op_sql}{self.sep()}{this}{table}{columns}"
    +1146    def rowformatdelimitedproperty_sql(self, expression: exp.RowFormatDelimitedProperty) -> str:
    +1147        fields = expression.args.get("fields")
    +1148        fields = f" FIELDS TERMINATED BY {fields}" if fields else ""
    +1149        escaped = expression.args.get("escaped")
    +1150        escaped = f" ESCAPED BY {escaped}" if escaped else ""
    +1151        items = expression.args.get("collection_items")
    +1152        items = f" COLLECTION ITEMS TERMINATED BY {items}" if items else ""
    +1153        keys = expression.args.get("map_keys")
    +1154        keys = f" MAP KEYS TERMINATED BY {keys}" if keys else ""
    +1155        lines = expression.args.get("lines")
    +1156        lines = f" LINES TERMINATED BY {lines}" if lines else ""
    +1157        null = expression.args.get("null")
    +1158        null = f" NULL DEFINED AS {null}" if null else ""
    +1159        return f"ROW FORMAT DELIMITED{fields}{escaped}{items}{keys}{lines}{null}"
    +1160
    +1161    def table_sql(self, expression: exp.Table, sep: str = " AS ") -> str:
    +1162        table = ".".join(
    +1163            part
    +1164            for part in [
    +1165                self.sql(expression, "catalog"),
    +1166                self.sql(expression, "db"),
    +1167                self.sql(expression, "this"),
    +1168            ]
    +1169            if part
    +1170        )
    +1171
    +1172        alias = self.sql(expression, "alias")
    +1173        alias = f"{sep}{alias}" if alias else ""
    +1174        hints = self.expressions(expression, key="hints", flat=True)
    +1175        hints = f" WITH ({hints})" if hints and self.TABLE_HINTS else ""
    +1176        pivots = self.expressions(expression, key="pivots", sep=" ", flat=True)
    +1177        pivots = f" {pivots}" if pivots else ""
    +1178        joins = self.expressions(expression, key="joins", sep="")
    +1179        laterals = self.expressions(expression, key="laterals", sep="")
    +1180        system_time = expression.args.get("system_time")
    +1181        system_time = f" {self.sql(expression, 'system_time')}" if system_time else ""
    +1182
    +1183        return f"{table}{system_time}{alias}{hints}{pivots}{joins}{laterals}"
    +1184
    +1185    def tablesample_sql(
    +1186        self, expression: exp.TableSample, seed_prefix: str = "SEED", sep=" AS "
    +1187    ) -> str:
    +1188        if self.ALIAS_POST_TABLESAMPLE and expression.this.alias:
    +1189            table = expression.this.copy()
    +1190            table.set("alias", None)
    +1191            this = self.sql(table)
    +1192            alias = f"{sep}{self.sql(expression.this, 'alias')}"
    +1193        else:
    +1194            this = self.sql(expression, "this")
    +1195            alias = ""
    +1196        method = self.sql(expression, "method")
    +1197        method = f"{method.upper()} " if method and self.TABLESAMPLE_WITH_METHOD else ""
    +1198        numerator = self.sql(expression, "bucket_numerator")
    +1199        denominator = self.sql(expression, "bucket_denominator")
    +1200        field = self.sql(expression, "bucket_field")
    +1201        field = f" ON {field}" if field else ""
    +1202        bucket = f"BUCKET {numerator} OUT OF {denominator}{field}" if numerator else ""
    +1203        percent = self.sql(expression, "percent")
    +1204        percent = f"{percent} PERCENT" if percent else ""
    +1205        rows = self.sql(expression, "rows")
    +1206        rows = f"{rows} ROWS" if rows else ""
    +1207        size = self.sql(expression, "size")
    +1208        if size and self.TABLESAMPLE_SIZE_IS_PERCENT:
    +1209            size = f"{size} PERCENT"
    +1210        seed = self.sql(expression, "seed")
    +1211        seed = f" {seed_prefix} ({seed})" if seed else ""
    +1212        kind = expression.args.get("kind", "TABLESAMPLE")
    +1213        return f"{this} {kind} {method}({bucket}{percent}{rows}{size}){seed}{alias}"
    +1214
    +1215    def pivot_sql(self, expression: exp.Pivot) -> str:
    +1216        expressions = self.expressions(expression, flat=True)
    +1217
    +1218        if expression.this:
    +1219            this = self.sql(expression, "this")
    +1220            on = f"{self.seg('ON')} {expressions}"
    +1221            using = self.expressions(expression, key="using", flat=True)
    +1222            using = f"{self.seg('USING')} {using}" if using else ""
    +1223            group = self.sql(expression, "group")
    +1224            return f"PIVOT {this}{on}{using}{group}"
    +1225
    +1226        alias = self.sql(expression, "alias")
    +1227        alias = f" AS {alias}" if alias else ""
    +1228        unpivot = expression.args.get("unpivot")
    +1229        direction = "UNPIVOT" if unpivot else "PIVOT"
    +1230        field = self.sql(expression, "field")
    +1231        return f"{direction}({expressions} FOR {field}){alias}"
    +1232
    +1233    def tuple_sql(self, expression: exp.Tuple) -> str:
    +1234        return f"({self.expressions(expression, flat=True)})"
    +1235
    +1236    def update_sql(self, expression: exp.Update) -> str:
    +1237        this = self.sql(expression, "this")
    +1238        set_sql = self.expressions(expression, flat=True)
    +1239        from_sql = self.sql(expression, "from")
    +1240        where_sql = self.sql(expression, "where")
    +1241        returning = self.sql(expression, "returning")
    +1242        sql = f"UPDATE {this} SET {set_sql}{from_sql}{where_sql}{returning}"
    +1243        return self.prepend_ctes(expression, sql)
    +1244
    +1245    def values_sql(self, expression: exp.Values) -> str:
    +1246        args = self.expressions(expression)
    +1247        alias = self.sql(expression, "alias")
    +1248        values = f"VALUES{self.seg('')}{args}"
    +1249        values = (
    +1250            f"({values})"
    +1251            if self.WRAP_DERIVED_VALUES and (alias or isinstance(expression.parent, exp.From))
    +1252            else values
    +1253        )
    +1254        return f"{values} AS {alias}" if alias else values
    +1255
    +1256    def var_sql(self, expression: exp.Var) -> str:
    +1257        return self.sql(expression, "this")
    +1258
    +1259    def into_sql(self, expression: exp.Into) -> str:
    +1260        temporary = " TEMPORARY" if expression.args.get("temporary") else ""
    +1261        unlogged = " UNLOGGED" if expression.args.get("unlogged") else ""
    +1262        return f"{self.seg('INTO')}{temporary or unlogged} {self.sql(expression, 'this')}"
    +1263
    +1264    def from_sql(self, expression: exp.From) -> str:
    +1265        return f"{self.seg('FROM')} {self.sql(expression, 'this')}"
    +1266
    +1267    def group_sql(self, expression: exp.Group) -> str:
    +1268        group_by = self.op_expressions("GROUP BY", expression)
    +1269        grouping_sets = self.expressions(expression, key="grouping_sets", indent=False)
    +1270        grouping_sets = (
    +1271            f"{self.seg('GROUPING SETS')} {self.wrap(grouping_sets)}" if grouping_sets else ""
    +1272        )
    +1273
    +1274        cube = expression.args.get("cube", [])
    +1275        if seq_get(cube, 0) is True:
    +1276            return f"{group_by}{self.seg('WITH CUBE')}"
    +1277        else:
    +1278            cube_sql = self.expressions(expression, key="cube", indent=False)
    +1279            cube_sql = f"{self.seg('CUBE')} {self.wrap(cube_sql)}" if cube_sql else ""
    +1280
    +1281        rollup = expression.args.get("rollup", [])
    +1282        if seq_get(rollup, 0) is True:
    +1283            return f"{group_by}{self.seg('WITH ROLLUP')}"
    +1284        else:
    +1285            rollup_sql = self.expressions(expression, key="rollup", indent=False)
    +1286            rollup_sql = f"{self.seg('ROLLUP')} {self.wrap(rollup_sql)}" if rollup_sql else ""
    +1287
    +1288        groupings = csv(
    +1289            grouping_sets,
    +1290            cube_sql,
    +1291            rollup_sql,
    +1292            self.seg("WITH TOTALS") if expression.args.get("totals") else "",
    +1293            sep=self.GROUPINGS_SEP,
    +1294        )
    +1295
    +1296        if expression.args.get("expressions") and groupings:
    +1297            group_by = f"{group_by}{self.GROUPINGS_SEP}"
    +1298
    +1299        return f"{group_by}{groupings}"
    +1300
    +1301    def having_sql(self, expression: exp.Having) -> str:
    +1302        this = self.indent(self.sql(expression, "this"))
    +1303        return f"{self.seg('HAVING')}{self.sep()}{this}"
    +1304
    +1305    def join_sql(self, expression: exp.Join) -> str:
    +1306        op_sql = " ".join(
    +1307            op
    +1308            for op in (
    +1309                expression.method,
    +1310                "GLOBAL" if expression.args.get("global") else None,
    +1311                expression.side,
    +1312                expression.kind,
    +1313                expression.hint if self.JOIN_HINTS else None,
    +1314            )
    +1315            if op
    +1316        )
    +1317        on_sql = self.sql(expression, "on")
    +1318        using = expression.args.get("using")
    +1319
    +1320        if not on_sql and using:
    +1321            on_sql = csv(*(self.sql(column) for column in using))
    +1322
    +1323        this_sql = self.sql(expression, "this")
    +1324
    +1325        if on_sql:
    +1326            on_sql = self.indent(on_sql, skip_first=True)
    +1327            space = self.seg(" " * self.pad) if self.pretty else " "
    +1328            if using:
    +1329                on_sql = f"{space}USING ({on_sql})"
    +1330            else:
    +1331                on_sql = f"{space}ON {on_sql}"
    +1332        elif not op_sql:
    +1333            return f", {this_sql}"
    +1334
    +1335        op_sql = f"{op_sql} JOIN" if op_sql else "JOIN"
    +1336        return f"{self.seg(op_sql)} {this_sql}{on_sql}"
    +1337
    +1338    def lambda_sql(self, expression: exp.Lambda, arrow_sep: str = "->") -> str:
    +1339        args = self.expressions(expression, flat=True)
    +1340        args = f"({args})" if len(args.split(",")) > 1 else args
    +1341        return f"{args} {arrow_sep} {self.sql(expression, 'this')}"
    +1342
    +1343    def lateral_sql(self, expression: exp.Lateral) -> str:
    +1344        this = self.sql(expression, "this")
    +1345
    +1346        if isinstance(expression.this, exp.Subquery):
    +1347            return f"LATERAL {this}"
    +1348
    +1349        if expression.args.get("view"):
    +1350            alias = expression.args["alias"]
    +1351            columns = self.expressions(alias, key="columns", flat=True)
    +1352            table = f" {alias.name}" if alias.name else ""
    +1353            columns = f" AS {columns}" if columns else ""
    +1354            op_sql = self.seg(f"LATERAL VIEW{' OUTER' if expression.args.get('outer') else ''}")
    +1355            return f"{op_sql}{self.sep()}{this}{table}{columns}"
    +1356
    +1357        alias = self.sql(expression, "alias")
    +1358        alias = f" AS {alias}" if alias else ""
    +1359        return f"LATERAL {this}{alias}"
    +1360
    +1361    def limit_sql(self, expression: exp.Limit) -> str:
    +1362        this = self.sql(expression, "this")
    +1363        args = ", ".join(
    +1364            sql
    +1365            for sql in (
    +1366                self.sql(expression, "offset"),
    +1367                self.sql(expression, "expression"),
    +1368            )
    +1369            if sql
    +1370        )
    +1371        return f"{this}{self.seg('LIMIT')} {args}"
     1372
    -1373        alias = self.sql(expression, "alias")
    -1374        alias = f" AS {alias}" if alias else ""
    -1375        return f"LATERAL {this}{alias}"
    +1373    def offset_sql(self, expression: exp.Offset) -> str:
    +1374        this = self.sql(expression, "this")
    +1375        return f"{this}{self.seg('OFFSET')} {self.sql(expression, 'expression')}"
     1376
    -1377    def limit_sql(self, expression: exp.Limit) -> str:
    -1378        this = self.sql(expression, "this")
    -1379        return f"{this}{self.seg('LIMIT')} {self.sql(expression, 'expression')}"
    -1380
    -1381    def offset_sql(self, expression: exp.Offset) -> str:
    -1382        this = self.sql(expression, "this")
    -1383        return f"{this}{self.seg('OFFSET')} {self.sql(expression, 'expression')}"
    -1384
    -1385    def setitem_sql(self, expression: exp.SetItem) -> str:
    -1386        kind = self.sql(expression, "kind")
    -1387        kind = f"{kind} " if kind else ""
    -1388        this = self.sql(expression, "this")
    -1389        expressions = self.expressions(expression)
    -1390        collate = self.sql(expression, "collate")
    -1391        collate = f" COLLATE {collate}" if collate else ""
    -1392        global_ = "GLOBAL " if expression.args.get("global") else ""
    -1393        return f"{global_}{kind}{this}{expressions}{collate}"
    -1394
    -1395    def set_sql(self, expression: exp.Set) -> str:
    -1396        expressions = (
    -1397            f" {self.expressions(expression, flat=True)}" if expression.expressions else ""
    -1398        )
    -1399        return f"SET{expressions}"
    +1377    def setitem_sql(self, expression: exp.SetItem) -> str:
    +1378        kind = self.sql(expression, "kind")
    +1379        kind = f"{kind} " if kind else ""
    +1380        this = self.sql(expression, "this")
    +1381        expressions = self.expressions(expression)
    +1382        collate = self.sql(expression, "collate")
    +1383        collate = f" COLLATE {collate}" if collate else ""
    +1384        global_ = "GLOBAL " if expression.args.get("global") else ""
    +1385        return f"{global_}{kind}{this}{expressions}{collate}"
    +1386
    +1387    def set_sql(self, expression: exp.Set) -> str:
    +1388        expressions = (
    +1389            f" {self.expressions(expression, flat=True)}" if expression.expressions else ""
    +1390        )
    +1391        return f"SET{expressions}"
    +1392
    +1393    def pragma_sql(self, expression: exp.Pragma) -> str:
    +1394        return f"PRAGMA {self.sql(expression, 'this')}"
    +1395
    +1396    def lock_sql(self, expression: exp.Lock) -> str:
    +1397        if not self.LOCKING_READS_SUPPORTED:
    +1398            self.unsupported("Locking reads using 'FOR UPDATE/SHARE' are not supported")
    +1399            return ""
     1400
    -1401    def pragma_sql(self, expression: exp.Pragma) -> str:
    -1402        return f"PRAGMA {self.sql(expression, 'this')}"
    -1403
    -1404    def lock_sql(self, expression: exp.Lock) -> str:
    -1405        if not self.LOCKING_READS_SUPPORTED:
    -1406            self.unsupported("Locking reads using 'FOR UPDATE/SHARE' are not supported")
    -1407            return ""
    -1408
    -1409        lock_type = "FOR UPDATE" if expression.args["update"] else "FOR SHARE"
    -1410        expressions = self.expressions(expression, flat=True)
    -1411        expressions = f" OF {expressions}" if expressions else ""
    -1412        wait = expression.args.get("wait")
    +1401        lock_type = "FOR UPDATE" if expression.args["update"] else "FOR SHARE"
    +1402        expressions = self.expressions(expression, flat=True)
    +1403        expressions = f" OF {expressions}" if expressions else ""
    +1404        wait = expression.args.get("wait")
    +1405
    +1406        if wait is not None:
    +1407            if isinstance(wait, exp.Literal):
    +1408                wait = f" WAIT {self.sql(wait)}"
    +1409            else:
    +1410                wait = " NOWAIT" if wait else " SKIP LOCKED"
    +1411
    +1412        return f"{lock_type}{expressions}{wait or ''}"
     1413
    -1414        if wait is not None:
    -1415            if isinstance(wait, exp.Literal):
    -1416                wait = f" WAIT {self.sql(wait)}"
    -1417            else:
    -1418                wait = " NOWAIT" if wait else " SKIP LOCKED"
    -1419
    -1420        return f"{lock_type}{expressions}{wait or ''}"
    -1421
    -1422    def literal_sql(self, expression: exp.Literal) -> str:
    -1423        text = expression.this or ""
    -1424        if expression.is_string:
    -1425            text = text.replace(self.quote_end, self._escaped_quote_end)
    -1426            if self.pretty:
    -1427                text = text.replace("\n", self.SENTINEL_LINE_BREAK)
    -1428            text = f"{self.quote_start}{text}{self.quote_end}"
    -1429        return text
    -1430
    -1431    def loaddata_sql(self, expression: exp.LoadData) -> str:
    -1432        local = " LOCAL" if expression.args.get("local") else ""
    -1433        inpath = f" INPATH {self.sql(expression, 'inpath')}"
    -1434        overwrite = " OVERWRITE" if expression.args.get("overwrite") else ""
    -1435        this = f" INTO TABLE {self.sql(expression, 'this')}"
    -1436        partition = self.sql(expression, "partition")
    -1437        partition = f" {partition}" if partition else ""
    -1438        input_format = self.sql(expression, "input_format")
    -1439        input_format = f" INPUTFORMAT {input_format}" if input_format else ""
    -1440        serde = self.sql(expression, "serde")
    -1441        serde = f" SERDE {serde}" if serde else ""
    -1442        return f"LOAD DATA{local}{inpath}{overwrite}{this}{partition}{input_format}{serde}"
    -1443
    -1444    def null_sql(self, *_) -> str:
    -1445        return "NULL"
    +1414    def literal_sql(self, expression: exp.Literal) -> str:
    +1415        text = expression.this or ""
    +1416        if expression.is_string:
    +1417            text = text.replace(self.QUOTE_END, self._escaped_quote_end)
    +1418            if self.pretty:
    +1419                text = text.replace("\n", self.SENTINEL_LINE_BREAK)
    +1420            text = f"{self.QUOTE_START}{text}{self.QUOTE_END}"
    +1421        return text
    +1422
    +1423    def loaddata_sql(self, expression: exp.LoadData) -> str:
    +1424        local = " LOCAL" if expression.args.get("local") else ""
    +1425        inpath = f" INPATH {self.sql(expression, 'inpath')}"
    +1426        overwrite = " OVERWRITE" if expression.args.get("overwrite") else ""
    +1427        this = f" INTO TABLE {self.sql(expression, 'this')}"
    +1428        partition = self.sql(expression, "partition")
    +1429        partition = f" {partition}" if partition else ""
    +1430        input_format = self.sql(expression, "input_format")
    +1431        input_format = f" INPUTFORMAT {input_format}" if input_format else ""
    +1432        serde = self.sql(expression, "serde")
    +1433        serde = f" SERDE {serde}" if serde else ""
    +1434        return f"LOAD DATA{local}{inpath}{overwrite}{this}{partition}{input_format}{serde}"
    +1435
    +1436    def null_sql(self, *_) -> str:
    +1437        return "NULL"
    +1438
    +1439    def boolean_sql(self, expression: exp.Boolean) -> str:
    +1440        return "TRUE" if expression.this else "FALSE"
    +1441
    +1442    def order_sql(self, expression: exp.Order, flat: bool = False) -> str:
    +1443        this = self.sql(expression, "this")
    +1444        this = f"{this} " if this else this
    +1445        return self.op_expressions(f"{this}ORDER BY", expression, flat=this or flat)  # type: ignore
     1446
    -1447    def boolean_sql(self, expression: exp.Boolean) -> str:
    -1448        return "TRUE" if expression.this else "FALSE"
    +1447    def cluster_sql(self, expression: exp.Cluster) -> str:
    +1448        return self.op_expressions("CLUSTER BY", expression)
     1449
    -1450    def order_sql(self, expression: exp.Order, flat: bool = False) -> str:
    -1451        this = self.sql(expression, "this")
    -1452        this = f"{this} " if this else this
    -1453        return self.op_expressions(f"{this}ORDER BY", expression, flat=this or flat)  # type: ignore
    -1454
    -1455    def cluster_sql(self, expression: exp.Cluster) -> str:
    -1456        return self.op_expressions("CLUSTER BY", expression)
    -1457
    -1458    def distribute_sql(self, expression: exp.Distribute) -> str:
    -1459        return self.op_expressions("DISTRIBUTE BY", expression)
    -1460
    -1461    def sort_sql(self, expression: exp.Sort) -> str:
    -1462        return self.op_expressions("SORT BY", expression)
    -1463
    -1464    def ordered_sql(self, expression: exp.Ordered) -> str:
    -1465        desc = expression.args.get("desc")
    -1466        asc = not desc
    -1467
    -1468        nulls_first = expression.args.get("nulls_first")
    -1469        nulls_last = not nulls_first
    -1470        nulls_are_large = self.null_ordering == "nulls_are_large"
    -1471        nulls_are_small = self.null_ordering == "nulls_are_small"
    -1472        nulls_are_last = self.null_ordering == "nulls_are_last"
    -1473
    -1474        sort_order = " DESC" if desc else ""
    -1475        nulls_sort_change = ""
    -1476        if nulls_first and (
    -1477            (asc and nulls_are_large) or (desc and nulls_are_small) or nulls_are_last
    -1478        ):
    -1479            nulls_sort_change = " NULLS FIRST"
    -1480        elif (
    -1481            nulls_last
    -1482            and ((asc and nulls_are_small) or (desc and nulls_are_large))
    -1483            and not nulls_are_last
    -1484        ):
    -1485            nulls_sort_change = " NULLS LAST"
    +1450    def distribute_sql(self, expression: exp.Distribute) -> str:
    +1451        return self.op_expressions("DISTRIBUTE BY", expression)
    +1452
    +1453    def sort_sql(self, expression: exp.Sort) -> str:
    +1454        return self.op_expressions("SORT BY", expression)
    +1455
    +1456    def ordered_sql(self, expression: exp.Ordered) -> str:
    +1457        desc = expression.args.get("desc")
    +1458        asc = not desc
    +1459
    +1460        nulls_first = expression.args.get("nulls_first")
    +1461        nulls_last = not nulls_first
    +1462        nulls_are_large = self.NULL_ORDERING == "nulls_are_large"
    +1463        nulls_are_small = self.NULL_ORDERING == "nulls_are_small"
    +1464        nulls_are_last = self.NULL_ORDERING == "nulls_are_last"
    +1465
    +1466        sort_order = " DESC" if desc else ""
    +1467        nulls_sort_change = ""
    +1468        if nulls_first and (
    +1469            (asc and nulls_are_large) or (desc and nulls_are_small) or nulls_are_last
    +1470        ):
    +1471            nulls_sort_change = " NULLS FIRST"
    +1472        elif (
    +1473            nulls_last
    +1474            and ((asc and nulls_are_small) or (desc and nulls_are_large))
    +1475            and not nulls_are_last
    +1476        ):
    +1477            nulls_sort_change = " NULLS LAST"
    +1478
    +1479        if nulls_sort_change and not self.NULL_ORDERING_SUPPORTED:
    +1480            self.unsupported(
    +1481                "Sorting in an ORDER BY on NULLS FIRST/NULLS LAST is not supported by this dialect"
    +1482            )
    +1483            nulls_sort_change = ""
    +1484
    +1485        return f"{self.sql(expression, 'this')}{sort_order}{nulls_sort_change}"
     1486
    -1487        if nulls_sort_change and not self.NULL_ORDERING_SUPPORTED:
    -1488            self.unsupported(
    -1489                "Sorting in an ORDER BY on NULLS FIRST/NULLS LAST is not supported by this dialect"
    -1490            )
    -1491            nulls_sort_change = ""
    -1492
    -1493        return f"{self.sql(expression, 'this')}{sort_order}{nulls_sort_change}"
    -1494
    -1495    def matchrecognize_sql(self, expression: exp.MatchRecognize) -> str:
    -1496        partition = self.partition_by_sql(expression)
    -1497        order = self.sql(expression, "order")
    -1498        measures = self.expressions(expression, key="measures")
    -1499        measures = self.seg(f"MEASURES{self.seg(measures)}") if measures else ""
    -1500        rows = self.sql(expression, "rows")
    -1501        rows = self.seg(rows) if rows else ""
    -1502        after = self.sql(expression, "after")
    -1503        after = self.seg(after) if after else ""
    -1504        pattern = self.sql(expression, "pattern")
    -1505        pattern = self.seg(f"PATTERN ({pattern})") if pattern else ""
    -1506        definition_sqls = [
    -1507            f"{self.sql(definition, 'alias')} AS {self.sql(definition, 'this')}"
    -1508            for definition in expression.args.get("define", [])
    -1509        ]
    -1510        definitions = self.expressions(sqls=definition_sqls)
    -1511        define = self.seg(f"DEFINE{self.seg(definitions)}") if definitions else ""
    -1512        body = "".join(
    -1513            (
    -1514                partition,
    -1515                order,
    -1516                measures,
    -1517                rows,
    -1518                after,
    -1519                pattern,
    -1520                define,
    -1521            )
    -1522        )
    -1523        alias = self.sql(expression, "alias")
    -1524        alias = f" {alias}" if alias else ""
    -1525        return f"{self.seg('MATCH_RECOGNIZE')} {self.wrap(body)}{alias}"
    +1487    def matchrecognize_sql(self, expression: exp.MatchRecognize) -> str:
    +1488        partition = self.partition_by_sql(expression)
    +1489        order = self.sql(expression, "order")
    +1490        measures = self.expressions(expression, key="measures")
    +1491        measures = self.seg(f"MEASURES{self.seg(measures)}") if measures else ""
    +1492        rows = self.sql(expression, "rows")
    +1493        rows = self.seg(rows) if rows else ""
    +1494        after = self.sql(expression, "after")
    +1495        after = self.seg(after) if after else ""
    +1496        pattern = self.sql(expression, "pattern")
    +1497        pattern = self.seg(f"PATTERN ({pattern})") if pattern else ""
    +1498        definition_sqls = [
    +1499            f"{self.sql(definition, 'alias')} AS {self.sql(definition, 'this')}"
    +1500            for definition in expression.args.get("define", [])
    +1501        ]
    +1502        definitions = self.expressions(sqls=definition_sqls)
    +1503        define = self.seg(f"DEFINE{self.seg(definitions)}") if definitions else ""
    +1504        body = "".join(
    +1505            (
    +1506                partition,
    +1507                order,
    +1508                measures,
    +1509                rows,
    +1510                after,
    +1511                pattern,
    +1512                define,
    +1513            )
    +1514        )
    +1515        alias = self.sql(expression, "alias")
    +1516        alias = f" {alias}" if alias else ""
    +1517        return f"{self.seg('MATCH_RECOGNIZE')} {self.wrap(body)}{alias}"
    +1518
    +1519    def query_modifiers(self, expression: exp.Expression, *sqls: str) -> str:
    +1520        limit: t.Optional[exp.Fetch | exp.Limit] = expression.args.get("limit")
    +1521
    +1522        if self.LIMIT_FETCH == "LIMIT" and isinstance(limit, exp.Fetch):
    +1523            limit = exp.Limit(expression=limit.args.get("count"))
    +1524        elif self.LIMIT_FETCH == "FETCH" and isinstance(limit, exp.Limit):
    +1525            limit = exp.Fetch(direction="FIRST", count=limit.expression)
     1526
    -1527    def query_modifiers(self, expression: exp.Expression, *sqls: str) -> str:
    -1528        limit = expression.args.get("limit")
    -1529
    -1530        if self.LIMIT_FETCH == "LIMIT" and isinstance(limit, exp.Fetch):
    -1531            limit = exp.Limit(expression=limit.args.get("count"))
    -1532        elif self.LIMIT_FETCH == "FETCH" and isinstance(limit, exp.Limit):
    -1533            limit = exp.Fetch(direction="FIRST", count=limit.expression)
    -1534
    -1535        fetch = isinstance(limit, exp.Fetch)
    -1536
    -1537        return csv(
    -1538            *sqls,
    -1539            *[self.sql(join) for join in expression.args.get("joins") or []],
    -1540            self.sql(expression, "match"),
    -1541            *[self.sql(lateral) for lateral in expression.args.get("laterals") or []],
    -1542            self.sql(expression, "where"),
    -1543            self.sql(expression, "group"),
    -1544            self.sql(expression, "having"),
    -1545            *self.after_having_modifiers(expression),
    -1546            self.sql(expression, "order"),
    -1547            self.sql(expression, "offset") if fetch else self.sql(limit),
    -1548            self.sql(limit) if fetch else self.sql(expression, "offset"),
    -1549            *self.after_limit_modifiers(expression),
    -1550            sep="",
    -1551        )
    -1552
    -1553    def after_having_modifiers(self, expression: exp.Expression) -> t.List[str]:
    -1554        return [
    -1555            self.sql(expression, "qualify"),
    -1556            self.seg("WINDOW ") + self.expressions(expression, key="windows", flat=True)
    -1557            if expression.args.get("windows")
    -1558            else "",
    -1559        ]
    -1560
    -1561    def after_limit_modifiers(self, expression: exp.Expression) -> t.List[str]:
    -1562        locks = self.expressions(expression, key="locks", sep=" ")
    -1563        locks = f" {locks}" if locks else ""
    -1564        return [locks, self.sql(expression, "sample")]
    -1565
    -1566    def select_sql(self, expression: exp.Select) -> str:
    -1567        hint = self.sql(expression, "hint")
    -1568        distinct = self.sql(expression, "distinct")
    -1569        distinct = f" {distinct}" if distinct else ""
    -1570        kind = expression.args.get("kind")
    -1571        kind = f" AS {kind}" if kind else ""
    -1572        expressions = self.expressions(expression)
    -1573        expressions = f"{self.sep()}{expressions}" if expressions else expressions
    -1574        sql = self.query_modifiers(
    -1575            expression,
    -1576            f"SELECT{hint}{distinct}{kind}{expressions}",
    -1577            self.sql(expression, "into", comment=False),
    -1578            self.sql(expression, "from", comment=False),
    -1579        )
    -1580        return self.prepend_ctes(expression, sql)
    -1581
    -1582    def schema_sql(self, expression: exp.Schema) -> str:
    -1583        this = self.sql(expression, "this")
    -1584        this = f"{this} " if this else ""
    -1585        sql = f"({self.sep('')}{self.expressions(expression)}{self.seg(')', sep='')}"
    -1586        return f"{this}{sql}"
    -1587
    -1588    def star_sql(self, expression: exp.Star) -> str:
    -1589        except_ = self.expressions(expression, key="except", flat=True)
    -1590        except_ = f"{self.seg(self.STAR_MAPPING['except'])} ({except_})" if except_ else ""
    -1591        replace = self.expressions(expression, key="replace", flat=True)
    -1592        replace = f"{self.seg(self.STAR_MAPPING['replace'])} ({replace})" if replace else ""
    -1593        return f"*{except_}{replace}"
    -1594
    -1595    def parameter_sql(self, expression: exp.Parameter) -> str:
    -1596        this = self.sql(expression, "this")
    -1597        this = f"{{{this}}}" if expression.args.get("wrapped") else f"{this}"
    -1598        return f"{self.PARAMETER_TOKEN}{this}"
    -1599
    -1600    def sessionparameter_sql(self, expression: exp.SessionParameter) -> str:
    -1601        this = self.sql(expression, "this")
    -1602        kind = expression.text("kind")
    -1603        if kind:
    -1604            kind = f"{kind}."
    -1605        return f"@@{kind}{this}"
    -1606
    -1607    def placeholder_sql(self, expression: exp.Placeholder) -> str:
    -1608        return f":{expression.name}" if expression.name else "?"
    -1609
    -1610    def subquery_sql(self, expression: exp.Subquery, sep: str = " AS ") -> str:
    -1611        alias = self.sql(expression, "alias")
    -1612        alias = f"{sep}{alias}" if alias else ""
    -1613
    -1614        pivots = self.expressions(expression, key="pivots", sep=" ", flat=True)
    -1615        pivots = f" {pivots}" if pivots else ""
    -1616
    -1617        sql = self.query_modifiers(expression, self.wrap(expression), alias, pivots)
    -1618        return self.prepend_ctes(expression, sql)
    -1619
    -1620    def qualify_sql(self, expression: exp.Qualify) -> str:
    -1621        this = self.indent(self.sql(expression, "this"))
    -1622        return f"{self.seg('QUALIFY')}{self.sep()}{this}"
    -1623
    -1624    def union_sql(self, expression: exp.Union) -> str:
    -1625        return self.prepend_ctes(
    -1626            expression,
    -1627            self.set_operation(expression, self.union_op(expression)),
    -1628        )
    -1629
    -1630    def union_op(self, expression: exp.Union) -> str:
    -1631        kind = " DISTINCT" if self.EXPLICIT_UNION else ""
    -1632        kind = kind if expression.args.get("distinct") else " ALL"
    -1633        return f"UNION{kind}"
    -1634
    -1635    def unnest_sql(self, expression: exp.Unnest) -> str:
    -1636        args = self.expressions(expression, flat=True)
    -1637        alias = expression.args.get("alias")
    -1638        if alias and self.unnest_column_only:
    -1639            columns = alias.columns
    -1640            alias = self.sql(columns[0]) if columns else ""
    -1641        else:
    -1642            alias = self.sql(expression, "alias")
    -1643        alias = f" AS {alias}" if alias else alias
    -1644        ordinality = " WITH ORDINALITY" if expression.args.get("ordinality") else ""
    -1645        offset = expression.args.get("offset")
    -1646        offset = f" WITH OFFSET AS {self.sql(offset)}" if offset else ""
    -1647        return f"UNNEST({args}){ordinality}{alias}{offset}"
    -1648
    -1649    def where_sql(self, expression: exp.Where) -> str:
    -1650        this = self.indent(self.sql(expression, "this"))
    -1651        return f"{self.seg('WHERE')}{self.sep()}{this}"
    -1652
    -1653    def window_sql(self, expression: exp.Window) -> str:
    -1654        this = self.sql(expression, "this")
    -1655        partition = self.partition_by_sql(expression)
    -1656        order = expression.args.get("order")
    -1657        order = self.order_sql(order, flat=True) if order else ""
    -1658        spec = self.sql(expression, "spec")
    -1659        alias = self.sql(expression, "alias")
    -1660        over = self.sql(expression, "over") or "OVER"
    -1661
    -1662        this = f"{this} {'AS' if expression.arg_key == 'windows' else over}"
    +1527        fetch = isinstance(limit, exp.Fetch)
    +1528
    +1529        return csv(
    +1530            *sqls,
    +1531            *[self.sql(join) for join in expression.args.get("joins") or []],
    +1532            self.sql(expression, "match"),
    +1533            *[self.sql(lateral) for lateral in expression.args.get("laterals") or []],
    +1534            self.sql(expression, "where"),
    +1535            self.sql(expression, "group"),
    +1536            self.sql(expression, "having"),
    +1537            *self.after_having_modifiers(expression),
    +1538            self.sql(expression, "order"),
    +1539            *self.offset_limit_modifiers(expression, fetch, limit),
    +1540            *self.after_limit_modifiers(expression),
    +1541            sep="",
    +1542        )
    +1543
    +1544    def offset_limit_modifiers(
    +1545        self, expression: exp.Expression, fetch: bool, limit: t.Optional[exp.Fetch | exp.Limit]
    +1546    ) -> t.List[str]:
    +1547        return [
    +1548            self.sql(expression, "offset") if fetch else self.sql(limit),
    +1549            self.sql(limit) if fetch else self.sql(expression, "offset"),
    +1550        ]
    +1551
    +1552    def after_having_modifiers(self, expression: exp.Expression) -> t.List[str]:
    +1553        return [
    +1554            self.sql(expression, "qualify"),
    +1555            self.seg("WINDOW ") + self.expressions(expression, key="windows", flat=True)
    +1556            if expression.args.get("windows")
    +1557            else "",
    +1558        ]
    +1559
    +1560    def after_limit_modifiers(self, expression: exp.Expression) -> t.List[str]:
    +1561        locks = self.expressions(expression, key="locks", sep=" ")
    +1562        locks = f" {locks}" if locks else ""
    +1563        return [locks, self.sql(expression, "sample")]
    +1564
    +1565    def select_sql(self, expression: exp.Select) -> str:
    +1566        hint = self.sql(expression, "hint")
    +1567        distinct = self.sql(expression, "distinct")
    +1568        distinct = f" {distinct}" if distinct else ""
    +1569        kind = expression.args.get("kind")
    +1570        kind = f" AS {kind}" if kind else ""
    +1571        expressions = self.expressions(expression)
    +1572        expressions = f"{self.sep()}{expressions}" if expressions else expressions
    +1573        sql = self.query_modifiers(
    +1574            expression,
    +1575            f"SELECT{hint}{distinct}{kind}{expressions}",
    +1576            self.sql(expression, "into", comment=False),
    +1577            self.sql(expression, "from", comment=False),
    +1578        )
    +1579        return self.prepend_ctes(expression, sql)
    +1580
    +1581    def schema_sql(self, expression: exp.Schema) -> str:
    +1582        this = self.sql(expression, "this")
    +1583        this = f"{this} " if this else ""
    +1584        sql = self.schema_columns_sql(expression)
    +1585        return f"{this}{sql}"
    +1586
    +1587    def schema_columns_sql(self, expression: exp.Schema) -> str:
    +1588        return f"({self.sep('')}{self.expressions(expression)}{self.seg(')', sep='')}"
    +1589
    +1590    def star_sql(self, expression: exp.Star) -> str:
    +1591        except_ = self.expressions(expression, key="except", flat=True)
    +1592        except_ = f"{self.seg(self.STAR_MAPPING['except'])} ({except_})" if except_ else ""
    +1593        replace = self.expressions(expression, key="replace", flat=True)
    +1594        replace = f"{self.seg(self.STAR_MAPPING['replace'])} ({replace})" if replace else ""
    +1595        return f"*{except_}{replace}"
    +1596
    +1597    def parameter_sql(self, expression: exp.Parameter) -> str:
    +1598        this = self.sql(expression, "this")
    +1599        this = f"{{{this}}}" if expression.args.get("wrapped") else f"{this}"
    +1600        return f"{self.PARAMETER_TOKEN}{this}"
    +1601
    +1602    def sessionparameter_sql(self, expression: exp.SessionParameter) -> str:
    +1603        this = self.sql(expression, "this")
    +1604        kind = expression.text("kind")
    +1605        if kind:
    +1606            kind = f"{kind}."
    +1607        return f"@@{kind}{this}"
    +1608
    +1609    def placeholder_sql(self, expression: exp.Placeholder) -> str:
    +1610        return f":{expression.name}" if expression.name else "?"
    +1611
    +1612    def subquery_sql(self, expression: exp.Subquery, sep: str = " AS ") -> str:
    +1613        alias = self.sql(expression, "alias")
    +1614        alias = f"{sep}{alias}" if alias else ""
    +1615
    +1616        pivots = self.expressions(expression, key="pivots", sep=" ", flat=True)
    +1617        pivots = f" {pivots}" if pivots else ""
    +1618
    +1619        sql = self.query_modifiers(expression, self.wrap(expression), alias, pivots)
    +1620        return self.prepend_ctes(expression, sql)
    +1621
    +1622    def qualify_sql(self, expression: exp.Qualify) -> str:
    +1623        this = self.indent(self.sql(expression, "this"))
    +1624        return f"{self.seg('QUALIFY')}{self.sep()}{this}"
    +1625
    +1626    def union_sql(self, expression: exp.Union) -> str:
    +1627        return self.prepend_ctes(
    +1628            expression,
    +1629            self.set_operation(expression, self.union_op(expression)),
    +1630        )
    +1631
    +1632    def union_op(self, expression: exp.Union) -> str:
    +1633        kind = " DISTINCT" if self.EXPLICIT_UNION else ""
    +1634        kind = kind if expression.args.get("distinct") else " ALL"
    +1635        return f"UNION{kind}"
    +1636
    +1637    def unnest_sql(self, expression: exp.Unnest) -> str:
    +1638        args = self.expressions(expression, flat=True)
    +1639        alias = expression.args.get("alias")
    +1640        if alias and self.UNNEST_COLUMN_ONLY:
    +1641            columns = alias.columns
    +1642            alias = self.sql(columns[0]) if columns else ""
    +1643        else:
    +1644            alias = self.sql(expression, "alias")
    +1645        alias = f" AS {alias}" if alias else alias
    +1646        ordinality = " WITH ORDINALITY" if expression.args.get("ordinality") else ""
    +1647        offset = expression.args.get("offset")
    +1648        offset = f" WITH OFFSET AS {self.sql(offset)}" if offset else ""
    +1649        return f"UNNEST({args}){ordinality}{alias}{offset}"
    +1650
    +1651    def where_sql(self, expression: exp.Where) -> str:
    +1652        this = self.indent(self.sql(expression, "this"))
    +1653        return f"{self.seg('WHERE')}{self.sep()}{this}"
    +1654
    +1655    def window_sql(self, expression: exp.Window) -> str:
    +1656        this = self.sql(expression, "this")
    +1657        partition = self.partition_by_sql(expression)
    +1658        order = expression.args.get("order")
    +1659        order = self.order_sql(order, flat=True) if order else ""
    +1660        spec = self.sql(expression, "spec")
    +1661        alias = self.sql(expression, "alias")
    +1662        over = self.sql(expression, "over") or "OVER"
     1663
    -1664        first = expression.args.get("first")
    -1665        if first is None:
    -1666            first = ""
    -1667        else:
    -1668            first = "FIRST" if first else "LAST"
    -1669
    -1670        if not partition and not order and not spec and alias:
    -1671            return f"{this} {alias}"
    -1672
    -1673        args = " ".join(arg for arg in (alias, first, partition, order, spec) if arg)
    -1674        return f"{this} ({args})"
    -1675
    -1676    def partition_by_sql(self, expression: exp.Window | exp.MatchRecognize) -> str:
    -1677        partition = self.expressions(expression, key="partition_by", flat=True)
    -1678        return f"PARTITION BY {partition}" if partition else ""
    -1679
    -1680    def windowspec_sql(self, expression: exp.WindowSpec) -> str:
    -1681        kind = self.sql(expression, "kind")
    -1682        start = csv(self.sql(expression, "start"), self.sql(expression, "start_side"), sep=" ")
    -1683        end = (
    -1684            csv(self.sql(expression, "end"), self.sql(expression, "end_side"), sep=" ")
    -1685            or "CURRENT ROW"
    -1686        )
    -1687        return f"{kind} BETWEEN {start} AND {end}"
    -1688
    -1689    def withingroup_sql(self, expression: exp.WithinGroup) -> str:
    -1690        this = self.sql(expression, "this")
    -1691        expression_sql = self.sql(expression, "expression")[1:]  # order has a leading space
    -1692        return f"{this} WITHIN GROUP ({expression_sql})"
    -1693
    -1694    def between_sql(self, expression: exp.Between) -> str:
    -1695        this = self.sql(expression, "this")
    -1696        low = self.sql(expression, "low")
    -1697        high = self.sql(expression, "high")
    -1698        return f"{this} BETWEEN {low} AND {high}"
    -1699
    -1700    def bracket_sql(self, expression: exp.Bracket) -> str:
    -1701        expressions = apply_index_offset(expression.this, expression.expressions, self.index_offset)
    -1702        expressions_sql = ", ".join(self.sql(e) for e in expressions)
    -1703
    -1704        return f"{self.sql(expression, 'this')}[{expressions_sql}]"
    +1664        this = f"{this} {'AS' if expression.arg_key == 'windows' else over}"
    +1665
    +1666        first = expression.args.get("first")
    +1667        if first is None:
    +1668            first = ""
    +1669        else:
    +1670            first = "FIRST" if first else "LAST"
    +1671
    +1672        if not partition and not order and not spec and alias:
    +1673            return f"{this} {alias}"
    +1674
    +1675        args = " ".join(arg for arg in (alias, first, partition, order, spec) if arg)
    +1676        return f"{this} ({args})"
    +1677
    +1678    def partition_by_sql(self, expression: exp.Window | exp.MatchRecognize) -> str:
    +1679        partition = self.expressions(expression, key="partition_by", flat=True)
    +1680        return f"PARTITION BY {partition}" if partition else ""
    +1681
    +1682    def windowspec_sql(self, expression: exp.WindowSpec) -> str:
    +1683        kind = self.sql(expression, "kind")
    +1684        start = csv(self.sql(expression, "start"), self.sql(expression, "start_side"), sep=" ")
    +1685        end = (
    +1686            csv(self.sql(expression, "end"), self.sql(expression, "end_side"), sep=" ")
    +1687            or "CURRENT ROW"
    +1688        )
    +1689        return f"{kind} BETWEEN {start} AND {end}"
    +1690
    +1691    def withingroup_sql(self, expression: exp.WithinGroup) -> str:
    +1692        this = self.sql(expression, "this")
    +1693        expression_sql = self.sql(expression, "expression")[1:]  # order has a leading space
    +1694        return f"{this} WITHIN GROUP ({expression_sql})"
    +1695
    +1696    def between_sql(self, expression: exp.Between) -> str:
    +1697        this = self.sql(expression, "this")
    +1698        low = self.sql(expression, "low")
    +1699        high = self.sql(expression, "high")
    +1700        return f"{this} BETWEEN {low} AND {high}"
    +1701
    +1702    def bracket_sql(self, expression: exp.Bracket) -> str:
    +1703        expressions = apply_index_offset(expression.this, expression.expressions, self.INDEX_OFFSET)
    +1704        expressions_sql = ", ".join(self.sql(e) for e in expressions)
     1705
    -1706    def all_sql(self, expression: exp.All) -> str:
    -1707        return f"ALL {self.wrap(expression)}"
    -1708
    -1709    def any_sql(self, expression: exp.Any) -> str:
    -1710        this = self.sql(expression, "this")
    -1711        if isinstance(expression.this, exp.Subqueryable):
    -1712            this = self.wrap(this)
    -1713        return f"ANY {this}"
    -1714
    -1715    def exists_sql(self, expression: exp.Exists) -> str:
    -1716        return f"EXISTS{self.wrap(expression)}"
    -1717
    -1718    def case_sql(self, expression: exp.Case) -> str:
    -1719        this = self.sql(expression, "this")
    -1720        statements = [f"CASE {this}" if this else "CASE"]
    -1721
    -1722        for e in expression.args["ifs"]:
    -1723            statements.append(f"WHEN {self.sql(e, 'this')}")
    -1724            statements.append(f"THEN {self.sql(e, 'true')}")
    -1725
    -1726        default = self.sql(expression, "default")
    +1706        return f"{self.sql(expression, 'this')}[{expressions_sql}]"
    +1707
    +1708    def all_sql(self, expression: exp.All) -> str:
    +1709        return f"ALL {self.wrap(expression)}"
    +1710
    +1711    def any_sql(self, expression: exp.Any) -> str:
    +1712        this = self.sql(expression, "this")
    +1713        if isinstance(expression.this, exp.Subqueryable):
    +1714            this = self.wrap(this)
    +1715        return f"ANY {this}"
    +1716
    +1717    def exists_sql(self, expression: exp.Exists) -> str:
    +1718        return f"EXISTS{self.wrap(expression)}"
    +1719
    +1720    def case_sql(self, expression: exp.Case) -> str:
    +1721        this = self.sql(expression, "this")
    +1722        statements = [f"CASE {this}" if this else "CASE"]
    +1723
    +1724        for e in expression.args["ifs"]:
    +1725            statements.append(f"WHEN {self.sql(e, 'this')}")
    +1726            statements.append(f"THEN {self.sql(e, 'true')}")
     1727
    -1728        if default:
    -1729            statements.append(f"ELSE {default}")
    -1730
    -1731        statements.append("END")
    +1728        default = self.sql(expression, "default")
    +1729
    +1730        if default:
    +1731            statements.append(f"ELSE {default}")
     1732
    -1733        if self.pretty and self.text_width(statements) > self._max_text_width:
    -1734            return self.indent("\n".join(statements), skip_first=True, skip_last=True)
    -1735
    -1736        return " ".join(statements)
    +1733        statements.append("END")
    +1734
    +1735        if self.pretty and self.text_width(statements) > self.max_text_width:
    +1736            return self.indent("\n".join(statements), skip_first=True, skip_last=True)
     1737
    -1738    def constraint_sql(self, expression: exp.Constraint) -> str:
    -1739        this = self.sql(expression, "this")
    -1740        expressions = self.expressions(expression, flat=True)
    -1741        return f"CONSTRAINT {this} {expressions}"
    -1742
    -1743    def nextvaluefor_sql(self, expression: exp.NextValueFor) -> str:
    -1744        order = expression.args.get("order")
    -1745        order = f" OVER ({self.order_sql(order, flat=True)})" if order else ""
    -1746        return f"NEXT VALUE FOR {self.sql(expression, 'this')}{order}"
    -1747
    -1748    def extract_sql(self, expression: exp.Extract) -> str:
    -1749        this = self.sql(expression, "this")
    -1750        expression_sql = self.sql(expression, "expression")
    -1751        return f"EXTRACT({this} FROM {expression_sql})"
    -1752
    -1753    def trim_sql(self, expression: exp.Trim) -> str:
    -1754        trim_type = self.sql(expression, "position")
    -1755
    -1756        if trim_type == "LEADING":
    -1757            return self.func("LTRIM", expression.this)
    -1758        elif trim_type == "TRAILING":
    -1759            return self.func("RTRIM", expression.this)
    -1760        else:
    -1761            return self.func("TRIM", expression.this, expression.expression)
    -1762
    -1763    def concat_sql(self, expression: exp.Concat) -> str:
    -1764        if len(expression.expressions) == 1:
    -1765            return self.sql(expression.expressions[0])
    -1766        return self.function_fallback_sql(expression)
    -1767
    -1768    def check_sql(self, expression: exp.Check) -> str:
    -1769        this = self.sql(expression, key="this")
    -1770        return f"CHECK ({this})"
    -1771
    -1772    def foreignkey_sql(self, expression: exp.ForeignKey) -> str:
    -1773        expressions = self.expressions(expression, flat=True)
    -1774        reference = self.sql(expression, "reference")
    -1775        reference = f" {reference}" if reference else ""
    -1776        delete = self.sql(expression, "delete")
    -1777        delete = f" ON DELETE {delete}" if delete else ""
    -1778        update = self.sql(expression, "update")
    -1779        update = f" ON UPDATE {update}" if update else ""
    -1780        return f"FOREIGN KEY ({expressions}){reference}{delete}{update}"
    -1781
    -1782    def primarykey_sql(self, expression: exp.ForeignKey) -> str:
    -1783        expressions = self.expressions(expression, flat=True)
    -1784        options = self.expressions(expression, key="options", flat=True, sep=" ")
    -1785        options = f" {options}" if options else ""
    -1786        return f"PRIMARY KEY ({expressions}){options}"
    -1787
    -1788    def if_sql(self, expression: exp.If) -> str:
    -1789        return self.case_sql(
    -1790            exp.Case(ifs=[expression.copy()], default=expression.args.get("false"))
    -1791        )
    -1792
    -1793    def matchagainst_sql(self, expression: exp.MatchAgainst) -> str:
    -1794        modifier = expression.args.get("modifier")
    -1795        modifier = f" {modifier}" if modifier else ""
    -1796        return f"{self.func('MATCH', *expression.expressions)} AGAINST({self.sql(expression, 'this')}{modifier})"
    -1797
    -1798    def jsonkeyvalue_sql(self, expression: exp.JSONKeyValue) -> str:
    -1799        return f"{self.sql(expression, 'this')}: {self.sql(expression, 'expression')}"
    -1800
    -1801    def jsonobject_sql(self, expression: exp.JSONObject) -> str:
    -1802        expressions = self.expressions(expression)
    +1738        return " ".join(statements)
    +1739
    +1740    def constraint_sql(self, expression: exp.Constraint) -> str:
    +1741        this = self.sql(expression, "this")
    +1742        expressions = self.expressions(expression, flat=True)
    +1743        return f"CONSTRAINT {this} {expressions}"
    +1744
    +1745    def nextvaluefor_sql(self, expression: exp.NextValueFor) -> str:
    +1746        order = expression.args.get("order")
    +1747        order = f" OVER ({self.order_sql(order, flat=True)})" if order else ""
    +1748        return f"NEXT VALUE FOR {self.sql(expression, 'this')}{order}"
    +1749
    +1750    def extract_sql(self, expression: exp.Extract) -> str:
    +1751        this = self.sql(expression, "this")
    +1752        expression_sql = self.sql(expression, "expression")
    +1753        return f"EXTRACT({this} FROM {expression_sql})"
    +1754
    +1755    def trim_sql(self, expression: exp.Trim) -> str:
    +1756        trim_type = self.sql(expression, "position")
    +1757
    +1758        if trim_type == "LEADING":
    +1759            return self.func("LTRIM", expression.this)
    +1760        elif trim_type == "TRAILING":
    +1761            return self.func("RTRIM", expression.this)
    +1762        else:
    +1763            return self.func("TRIM", expression.this, expression.expression)
    +1764
    +1765    def safeconcat_sql(self, expression: exp.SafeConcat) -> str:
    +1766        expressions = expression.expressions
    +1767        if self.STRICT_STRING_CONCAT:
    +1768            expressions = (exp.cast(e, "text") for e in expressions)
    +1769        return self.func("CONCAT", *expressions)
    +1770
    +1771    def check_sql(self, expression: exp.Check) -> str:
    +1772        this = self.sql(expression, key="this")
    +1773        return f"CHECK ({this})"
    +1774
    +1775    def foreignkey_sql(self, expression: exp.ForeignKey) -> str:
    +1776        expressions = self.expressions(expression, flat=True)
    +1777        reference = self.sql(expression, "reference")
    +1778        reference = f" {reference}" if reference else ""
    +1779        delete = self.sql(expression, "delete")
    +1780        delete = f" ON DELETE {delete}" if delete else ""
    +1781        update = self.sql(expression, "update")
    +1782        update = f" ON UPDATE {update}" if update else ""
    +1783        return f"FOREIGN KEY ({expressions}){reference}{delete}{update}"
    +1784
    +1785    def primarykey_sql(self, expression: exp.ForeignKey) -> str:
    +1786        expressions = self.expressions(expression, flat=True)
    +1787        options = self.expressions(expression, key="options", flat=True, sep=" ")
    +1788        options = f" {options}" if options else ""
    +1789        return f"PRIMARY KEY ({expressions}){options}"
    +1790
    +1791    def if_sql(self, expression: exp.If) -> str:
    +1792        return self.case_sql(exp.Case(ifs=[expression], default=expression.args.get("false")))
    +1793
    +1794    def matchagainst_sql(self, expression: exp.MatchAgainst) -> str:
    +1795        modifier = expression.args.get("modifier")
    +1796        modifier = f" {modifier}" if modifier else ""
    +1797        return f"{self.func('MATCH', *expression.expressions)} AGAINST({self.sql(expression, 'this')}{modifier})"
    +1798
    +1799    def jsonkeyvalue_sql(self, expression: exp.JSONKeyValue) -> str:
    +1800        return f"{self.sql(expression, 'this')}: {self.sql(expression, 'expression')}"
    +1801
    +1802    def jsonobject_sql(self, expression: exp.JSONObject) -> str:
     1803        null_handling = expression.args.get("null_handling")
     1804        null_handling = f" {null_handling}" if null_handling else ""
     1805        unique_keys = expression.args.get("unique_keys")
    @@ -4928,580 +4967,589 @@
     1812        format_json = " FORMAT JSON" if expression.args.get("format_json") else ""
     1813        encoding = self.sql(expression, "encoding")
     1814        encoding = f" ENCODING {encoding}" if encoding else ""
    -1815        return f"JSON_OBJECT({expressions}{null_handling}{unique_keys}{return_type}{format_json}{encoding})"
    -1816
    -1817    def openjsoncolumndef_sql(self, expression: exp.OpenJSONColumnDef) -> str:
    -1818        this = self.sql(expression, "this")
    -1819        kind = self.sql(expression, "kind")
    -1820        path = self.sql(expression, "path")
    -1821        path = f" {path}" if path else ""
    -1822        as_json = " AS JSON" if expression.args.get("as_json") else ""
    -1823        return f"{this} {kind}{path}{as_json}"
    -1824
    -1825    def openjson_sql(self, expression: exp.OpenJSON) -> str:
    -1826        this = self.sql(expression, "this")
    -1827        path = self.sql(expression, "path")
    -1828        path = f", {path}" if path else ""
    -1829        expressions = self.expressions(expression)
    -1830        with_ = (
    -1831            f" WITH ({self.seg(self.indent(expressions), sep='')}{self.seg(')', sep='')}"
    -1832            if expressions
    -1833            else ""
    -1834        )
    -1835        return f"OPENJSON({this}{path}){with_}"
    -1836
    -1837    def in_sql(self, expression: exp.In) -> str:
    -1838        query = expression.args.get("query")
    -1839        unnest = expression.args.get("unnest")
    -1840        field = expression.args.get("field")
    -1841        is_global = " GLOBAL" if expression.args.get("is_global") else ""
    -1842
    -1843        if query:
    -1844            in_sql = self.wrap(query)
    -1845        elif unnest:
    -1846            in_sql = self.in_unnest_op(unnest)
    -1847        elif field:
    -1848            in_sql = self.sql(field)
    -1849        else:
    -1850            in_sql = f"({self.expressions(expression, flat=True)})"
    -1851
    -1852        return f"{self.sql(expression, 'this')}{is_global} IN {in_sql}"
    -1853
    -1854    def in_unnest_op(self, unnest: exp.Unnest) -> str:
    -1855        return f"(SELECT {self.sql(unnest)})"
    -1856
    -1857    def interval_sql(self, expression: exp.Interval) -> str:
    -1858        unit = self.sql(expression, "unit")
    -1859        if not self.INTERVAL_ALLOWS_PLURAL_FORM:
    -1860            unit = self.TIME_PART_SINGULARS.get(unit.lower(), unit)
    -1861        unit = f" {unit}" if unit else ""
    -1862
    -1863        if self.SINGLE_STRING_INTERVAL:
    -1864            this = expression.this.name if expression.this else ""
    -1865            return f"INTERVAL '{this}{unit}'" if this else f"INTERVAL{unit}"
    +1815        return self.func(
    +1816            "JSON_OBJECT",
    +1817            *expression.expressions,
    +1818            suffix=f"{null_handling}{unique_keys}{return_type}{format_json}{encoding})",
    +1819        )
    +1820
    +1821    def openjsoncolumndef_sql(self, expression: exp.OpenJSONColumnDef) -> str:
    +1822        this = self.sql(expression, "this")
    +1823        kind = self.sql(expression, "kind")
    +1824        path = self.sql(expression, "path")
    +1825        path = f" {path}" if path else ""
    +1826        as_json = " AS JSON" if expression.args.get("as_json") else ""
    +1827        return f"{this} {kind}{path}{as_json}"
    +1828
    +1829    def openjson_sql(self, expression: exp.OpenJSON) -> str:
    +1830        this = self.sql(expression, "this")
    +1831        path = self.sql(expression, "path")
    +1832        path = f", {path}" if path else ""
    +1833        expressions = self.expressions(expression)
    +1834        with_ = (
    +1835            f" WITH ({self.seg(self.indent(expressions), sep='')}{self.seg(')', sep='')}"
    +1836            if expressions
    +1837            else ""
    +1838        )
    +1839        return f"OPENJSON({this}{path}){with_}"
    +1840
    +1841    def in_sql(self, expression: exp.In) -> str:
    +1842        query = expression.args.get("query")
    +1843        unnest = expression.args.get("unnest")
    +1844        field = expression.args.get("field")
    +1845        is_global = " GLOBAL" if expression.args.get("is_global") else ""
    +1846
    +1847        if query:
    +1848            in_sql = self.wrap(query)
    +1849        elif unnest:
    +1850            in_sql = self.in_unnest_op(unnest)
    +1851        elif field:
    +1852            in_sql = self.sql(field)
    +1853        else:
    +1854            in_sql = f"({self.expressions(expression, flat=True)})"
    +1855
    +1856        return f"{self.sql(expression, 'this')}{is_global} IN {in_sql}"
    +1857
    +1858    def in_unnest_op(self, unnest: exp.Unnest) -> str:
    +1859        return f"(SELECT {self.sql(unnest)})"
    +1860
    +1861    def interval_sql(self, expression: exp.Interval) -> str:
    +1862        unit = self.sql(expression, "unit")
    +1863        if not self.INTERVAL_ALLOWS_PLURAL_FORM:
    +1864            unit = self.TIME_PART_SINGULARS.get(unit.lower(), unit)
    +1865        unit = f" {unit}" if unit else ""
     1866
    -1867        this = self.sql(expression, "this")
    -1868        if this:
    -1869            unwrapped = isinstance(expression.this, self.UNWRAPPED_INTERVAL_VALUES)
    -1870            this = f" {this}" if unwrapped else f" ({this})"
    -1871
    -1872        return f"INTERVAL{this}{unit}"
    -1873
    -1874    def return_sql(self, expression: exp.Return) -> str:
    -1875        return f"RETURN {self.sql(expression, 'this')}"
    -1876
    -1877    def reference_sql(self, expression: exp.Reference) -> str:
    -1878        this = self.sql(expression, "this")
    -1879        expressions = self.expressions(expression, flat=True)
    -1880        expressions = f"({expressions})" if expressions else ""
    -1881        options = self.expressions(expression, key="options", flat=True, sep=" ")
    -1882        options = f" {options}" if options else ""
    -1883        return f"REFERENCES {this}{expressions}{options}"
    -1884
    -1885    def anonymous_sql(self, expression: exp.Anonymous) -> str:
    -1886        return self.func(expression.name, *expression.expressions)
    -1887
    -1888    def paren_sql(self, expression: exp.Paren) -> str:
    -1889        if isinstance(expression.unnest(), exp.Select):
    -1890            sql = self.wrap(expression)
    -1891        else:
    -1892            sql = self.seg(self.indent(self.sql(expression, "this")), sep="")
    -1893            sql = f"({sql}{self.seg(')', sep='')}"
    -1894
    -1895        return self.prepend_ctes(expression, sql)
    -1896
    -1897    def neg_sql(self, expression: exp.Neg) -> str:
    -1898        # This makes sure we don't convert "- - 5" to "--5", which is a comment
    -1899        this_sql = self.sql(expression, "this")
    -1900        sep = " " if this_sql[0] == "-" else ""
    -1901        return f"-{sep}{this_sql}"
    -1902
    -1903    def not_sql(self, expression: exp.Not) -> str:
    -1904        return f"NOT {self.sql(expression, 'this')}"
    -1905
    -1906    def alias_sql(self, expression: exp.Alias) -> str:
    -1907        alias = self.sql(expression, "alias")
    -1908        alias = f" AS {alias}" if alias else ""
    -1909        return f"{self.sql(expression, 'this')}{alias}"
    -1910
    -1911    def aliases_sql(self, expression: exp.Aliases) -> str:
    -1912        return f"{self.sql(expression, 'this')} AS ({self.expressions(expression, flat=True)})"
    -1913
    -1914    def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
    -1915        this = self.sql(expression, "this")
    -1916        zone = self.sql(expression, "zone")
    -1917        return f"{this} AT TIME ZONE {zone}"
    -1918
    -1919    def add_sql(self, expression: exp.Add) -> str:
    -1920        return self.binary(expression, "+")
    -1921
    -1922    def and_sql(self, expression: exp.And) -> str:
    -1923        return self.connector_sql(expression, "AND")
    -1924
    -1925    def connector_sql(self, expression: exp.Connector, op: str) -> str:
    -1926        if not self.pretty:
    -1927            return self.binary(expression, op)
    +1867        if self.SINGLE_STRING_INTERVAL:
    +1868            this = expression.this.name if expression.this else ""
    +1869            return f"INTERVAL '{this}{unit}'" if this else f"INTERVAL{unit}"
    +1870
    +1871        this = self.sql(expression, "this")
    +1872        if this:
    +1873            unwrapped = isinstance(expression.this, self.UNWRAPPED_INTERVAL_VALUES)
    +1874            this = f" {this}" if unwrapped else f" ({this})"
    +1875
    +1876        return f"INTERVAL{this}{unit}"
    +1877
    +1878    def return_sql(self, expression: exp.Return) -> str:
    +1879        return f"RETURN {self.sql(expression, 'this')}"
    +1880
    +1881    def reference_sql(self, expression: exp.Reference) -> str:
    +1882        this = self.sql(expression, "this")
    +1883        expressions = self.expressions(expression, flat=True)
    +1884        expressions = f"({expressions})" if expressions else ""
    +1885        options = self.expressions(expression, key="options", flat=True, sep=" ")
    +1886        options = f" {options}" if options else ""
    +1887        return f"REFERENCES {this}{expressions}{options}"
    +1888
    +1889    def anonymous_sql(self, expression: exp.Anonymous) -> str:
    +1890        return self.func(expression.name, *expression.expressions)
    +1891
    +1892    def paren_sql(self, expression: exp.Paren) -> str:
    +1893        if isinstance(expression.unnest(), exp.Select):
    +1894            sql = self.wrap(expression)
    +1895        else:
    +1896            sql = self.seg(self.indent(self.sql(expression, "this")), sep="")
    +1897            sql = f"({sql}{self.seg(')', sep='')}"
    +1898
    +1899        return self.prepend_ctes(expression, sql)
    +1900
    +1901    def neg_sql(self, expression: exp.Neg) -> str:
    +1902        # This makes sure we don't convert "- - 5" to "--5", which is a comment
    +1903        this_sql = self.sql(expression, "this")
    +1904        sep = " " if this_sql[0] == "-" else ""
    +1905        return f"-{sep}{this_sql}"
    +1906
    +1907    def not_sql(self, expression: exp.Not) -> str:
    +1908        return f"NOT {self.sql(expression, 'this')}"
    +1909
    +1910    def alias_sql(self, expression: exp.Alias) -> str:
    +1911        alias = self.sql(expression, "alias")
    +1912        alias = f" AS {alias}" if alias else ""
    +1913        return f"{self.sql(expression, 'this')}{alias}"
    +1914
    +1915    def aliases_sql(self, expression: exp.Aliases) -> str:
    +1916        return f"{self.sql(expression, 'this')} AS ({self.expressions(expression, flat=True)})"
    +1917
    +1918    def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
    +1919        this = self.sql(expression, "this")
    +1920        zone = self.sql(expression, "zone")
    +1921        return f"{this} AT TIME ZONE {zone}"
    +1922
    +1923    def add_sql(self, expression: exp.Add) -> str:
    +1924        return self.binary(expression, "+")
    +1925
    +1926    def and_sql(self, expression: exp.And) -> str:
    +1927        return self.connector_sql(expression, "AND")
     1928
    -1929        sqls = tuple(
    -1930            self.maybe_comment(self.sql(e), e, e.parent.comments or []) if i != 1 else self.sql(e)
    -1931            for i, e in enumerate(expression.flatten(unnest=False))
    -1932        )
    -1933
    -1934        sep = "\n" if self.text_width(sqls) > self._max_text_width else " "
    -1935        return f"{sep}{op} ".join(sqls)
    -1936
    -1937    def bitwiseand_sql(self, expression: exp.BitwiseAnd) -> str:
    -1938        return self.binary(expression, "&")
    -1939
    -1940    def bitwiseleftshift_sql(self, expression: exp.BitwiseLeftShift) -> str:
    -1941        return self.binary(expression, "<<")
    -1942
    -1943    def bitwisenot_sql(self, expression: exp.BitwiseNot) -> str:
    -1944        return f"~{self.sql(expression, 'this')}"
    -1945
    -1946    def bitwiseor_sql(self, expression: exp.BitwiseOr) -> str:
    -1947        return self.binary(expression, "|")
    -1948
    -1949    def bitwiserightshift_sql(self, expression: exp.BitwiseRightShift) -> str:
    -1950        return self.binary(expression, ">>")
    -1951
    -1952    def bitwisexor_sql(self, expression: exp.BitwiseXor) -> str:
    -1953        return self.binary(expression, "^")
    -1954
    -1955    def cast_sql(self, expression: exp.Cast) -> str:
    -1956        return f"CAST({self.sql(expression, 'this')} AS {self.sql(expression, 'to')})"
    -1957
    -1958    def currentdate_sql(self, expression: exp.CurrentDate) -> str:
    -1959        zone = self.sql(expression, "this")
    -1960        return f"CURRENT_DATE({zone})" if zone else "CURRENT_DATE"
    +1929    def connector_sql(self, expression: exp.Connector, op: str) -> str:
    +1930        if not self.pretty:
    +1931            return self.binary(expression, op)
    +1932
    +1933        sqls = tuple(
    +1934            self.maybe_comment(self.sql(e), e, e.parent.comments or []) if i != 1 else self.sql(e)
    +1935            for i, e in enumerate(expression.flatten(unnest=False))
    +1936        )
    +1937
    +1938        sep = "\n" if self.text_width(sqls) > self.max_text_width else " "
    +1939        return f"{sep}{op} ".join(sqls)
    +1940
    +1941    def bitwiseand_sql(self, expression: exp.BitwiseAnd) -> str:
    +1942        return self.binary(expression, "&")
    +1943
    +1944    def bitwiseleftshift_sql(self, expression: exp.BitwiseLeftShift) -> str:
    +1945        return self.binary(expression, "<<")
    +1946
    +1947    def bitwisenot_sql(self, expression: exp.BitwiseNot) -> str:
    +1948        return f"~{self.sql(expression, 'this')}"
    +1949
    +1950    def bitwiseor_sql(self, expression: exp.BitwiseOr) -> str:
    +1951        return self.binary(expression, "|")
    +1952
    +1953    def bitwiserightshift_sql(self, expression: exp.BitwiseRightShift) -> str:
    +1954        return self.binary(expression, ">>")
    +1955
    +1956    def bitwisexor_sql(self, expression: exp.BitwiseXor) -> str:
    +1957        return self.binary(expression, "^")
    +1958
    +1959    def cast_sql(self, expression: exp.Cast) -> str:
    +1960        return f"CAST({self.sql(expression, 'this')} AS {self.sql(expression, 'to')})"
     1961
    -1962    def collate_sql(self, expression: exp.Collate) -> str:
    -1963        return self.binary(expression, "COLLATE")
    -1964
    -1965    def command_sql(self, expression: exp.Command) -> str:
    -1966        return f"{self.sql(expression, 'this').upper()} {expression.text('expression').strip()}"
    -1967
    -1968    def comment_sql(self, expression: exp.Comment) -> str:
    -1969        this = self.sql(expression, "this")
    -1970        kind = expression.args["kind"]
    -1971        exists_sql = " IF EXISTS " if expression.args.get("exists") else " "
    -1972        expression_sql = self.sql(expression, "expression")
    -1973        return f"COMMENT{exists_sql}ON {kind} {this} IS {expression_sql}"
    -1974
    -1975    def mergetreettlaction_sql(self, expression: exp.MergeTreeTTLAction) -> str:
    -1976        this = self.sql(expression, "this")
    -1977        delete = " DELETE" if expression.args.get("delete") else ""
    -1978        recompress = self.sql(expression, "recompress")
    -1979        recompress = f" RECOMPRESS {recompress}" if recompress else ""
    -1980        to_disk = self.sql(expression, "to_disk")
    -1981        to_disk = f" TO DISK {to_disk}" if to_disk else ""
    -1982        to_volume = self.sql(expression, "to_volume")
    -1983        to_volume = f" TO VOLUME {to_volume}" if to_volume else ""
    -1984        return f"{this}{delete}{recompress}{to_disk}{to_volume}"
    -1985
    -1986    def mergetreettl_sql(self, expression: exp.MergeTreeTTL) -> str:
    -1987        where = self.sql(expression, "where")
    -1988        group = self.sql(expression, "group")
    -1989        aggregates = self.expressions(expression, key="aggregates")
    -1990        aggregates = self.seg("SET") + self.seg(aggregates) if aggregates else ""
    -1991
    -1992        if not (where or group or aggregates) and len(expression.expressions) == 1:
    -1993            return f"TTL {self.expressions(expression, flat=True)}"
    -1994
    -1995        return f"TTL{self.seg(self.expressions(expression))}{where}{group}{aggregates}"
    -1996
    -1997    def transaction_sql(self, expression: exp.Transaction) -> str:
    -1998        return "BEGIN"
    -1999
    -2000    def commit_sql(self, expression: exp.Commit) -> str:
    -2001        chain = expression.args.get("chain")
    -2002        if chain is not None:
    -2003            chain = " AND CHAIN" if chain else " AND NO CHAIN"
    -2004
    -2005        return f"COMMIT{chain or ''}"
    -2006
    -2007    def rollback_sql(self, expression: exp.Rollback) -> str:
    -2008        savepoint = expression.args.get("savepoint")
    -2009        savepoint = f" TO {savepoint}" if savepoint else ""
    -2010        return f"ROLLBACK{savepoint}"
    -2011
    -2012    def altercolumn_sql(self, expression: exp.AlterColumn) -> str:
    -2013        this = self.sql(expression, "this")
    -2014
    -2015        dtype = self.sql(expression, "dtype")
    -2016        if dtype:
    -2017            collate = self.sql(expression, "collate")
    -2018            collate = f" COLLATE {collate}" if collate else ""
    -2019            using = self.sql(expression, "using")
    -2020            using = f" USING {using}" if using else ""
    -2021            return f"ALTER COLUMN {this} TYPE {dtype}{collate}{using}"
    -2022
    -2023        default = self.sql(expression, "default")
    -2024        if default:
    -2025            return f"ALTER COLUMN {this} SET DEFAULT {default}"
    +1962    def currentdate_sql(self, expression: exp.CurrentDate) -> str:
    +1963        zone = self.sql(expression, "this")
    +1964        return f"CURRENT_DATE({zone})" if zone else "CURRENT_DATE"
    +1965
    +1966    def collate_sql(self, expression: exp.Collate) -> str:
    +1967        return self.binary(expression, "COLLATE")
    +1968
    +1969    def command_sql(self, expression: exp.Command) -> str:
    +1970        return f"{self.sql(expression, 'this').upper()} {expression.text('expression').strip()}"
    +1971
    +1972    def comment_sql(self, expression: exp.Comment) -> str:
    +1973        this = self.sql(expression, "this")
    +1974        kind = expression.args["kind"]
    +1975        exists_sql = " IF EXISTS " if expression.args.get("exists") else " "
    +1976        expression_sql = self.sql(expression, "expression")
    +1977        return f"COMMENT{exists_sql}ON {kind} {this} IS {expression_sql}"
    +1978
    +1979    def mergetreettlaction_sql(self, expression: exp.MergeTreeTTLAction) -> str:
    +1980        this = self.sql(expression, "this")
    +1981        delete = " DELETE" if expression.args.get("delete") else ""
    +1982        recompress = self.sql(expression, "recompress")
    +1983        recompress = f" RECOMPRESS {recompress}" if recompress else ""
    +1984        to_disk = self.sql(expression, "to_disk")
    +1985        to_disk = f" TO DISK {to_disk}" if to_disk else ""
    +1986        to_volume = self.sql(expression, "to_volume")
    +1987        to_volume = f" TO VOLUME {to_volume}" if to_volume else ""
    +1988        return f"{this}{delete}{recompress}{to_disk}{to_volume}"
    +1989
    +1990    def mergetreettl_sql(self, expression: exp.MergeTreeTTL) -> str:
    +1991        where = self.sql(expression, "where")
    +1992        group = self.sql(expression, "group")
    +1993        aggregates = self.expressions(expression, key="aggregates")
    +1994        aggregates = self.seg("SET") + self.seg(aggregates) if aggregates else ""
    +1995
    +1996        if not (where or group or aggregates) and len(expression.expressions) == 1:
    +1997            return f"TTL {self.expressions(expression, flat=True)}"
    +1998
    +1999        return f"TTL{self.seg(self.expressions(expression))}{where}{group}{aggregates}"
    +2000
    +2001    def transaction_sql(self, expression: exp.Transaction) -> str:
    +2002        return "BEGIN"
    +2003
    +2004    def commit_sql(self, expression: exp.Commit) -> str:
    +2005        chain = expression.args.get("chain")
    +2006        if chain is not None:
    +2007            chain = " AND CHAIN" if chain else " AND NO CHAIN"
    +2008
    +2009        return f"COMMIT{chain or ''}"
    +2010
    +2011    def rollback_sql(self, expression: exp.Rollback) -> str:
    +2012        savepoint = expression.args.get("savepoint")
    +2013        savepoint = f" TO {savepoint}" if savepoint else ""
    +2014        return f"ROLLBACK{savepoint}"
    +2015
    +2016    def altercolumn_sql(self, expression: exp.AlterColumn) -> str:
    +2017        this = self.sql(expression, "this")
    +2018
    +2019        dtype = self.sql(expression, "dtype")
    +2020        if dtype:
    +2021            collate = self.sql(expression, "collate")
    +2022            collate = f" COLLATE {collate}" if collate else ""
    +2023            using = self.sql(expression, "using")
    +2024            using = f" USING {using}" if using else ""
    +2025            return f"ALTER COLUMN {this} TYPE {dtype}{collate}{using}"
     2026
    -2027        if not expression.args.get("drop"):
    -2028            self.unsupported("Unsupported ALTER COLUMN syntax")
    -2029
    -2030        return f"ALTER COLUMN {this} DROP DEFAULT"
    -2031
    -2032    def renametable_sql(self, expression: exp.RenameTable) -> str:
    -2033        if not self.RENAME_TABLE_WITH_DB:
    -2034            # Remove db from tables
    -2035            expression = expression.transform(
    -2036                lambda n: exp.table_(n.this) if isinstance(n, exp.Table) else n
    -2037            )
    -2038        this = self.sql(expression, "this")
    -2039        return f"RENAME TO {this}"
    -2040
    -2041    def altertable_sql(self, expression: exp.AlterTable) -> str:
    -2042        actions = expression.args["actions"]
    -2043
    -2044        if isinstance(actions[0], exp.ColumnDef):
    -2045            actions = self.expressions(expression, key="actions", prefix="ADD COLUMN ")
    -2046        elif isinstance(actions[0], exp.Schema):
    -2047            actions = self.expressions(expression, key="actions", prefix="ADD COLUMNS ")
    -2048        elif isinstance(actions[0], exp.Delete):
    -2049            actions = self.expressions(expression, key="actions", flat=True)
    -2050        else:
    -2051            actions = self.expressions(expression, key="actions")
    -2052
    -2053        exists = " IF EXISTS" if expression.args.get("exists") else ""
    -2054        return f"ALTER TABLE{exists} {self.sql(expression, 'this')} {actions}"
    -2055
    -2056    def droppartition_sql(self, expression: exp.DropPartition) -> str:
    -2057        expressions = self.expressions(expression)
    -2058        exists = " IF EXISTS " if expression.args.get("exists") else " "
    -2059        return f"DROP{exists}{expressions}"
    -2060
    -2061    def addconstraint_sql(self, expression: exp.AddConstraint) -> str:
    -2062        this = self.sql(expression, "this")
    -2063        expression_ = self.sql(expression, "expression")
    -2064        add_constraint = f"ADD CONSTRAINT {this}" if this else "ADD"
    -2065
    -2066        enforced = expression.args.get("enforced")
    -2067        if enforced is not None:
    -2068            return f"{add_constraint} CHECK ({expression_}){' ENFORCED' if enforced else ''}"
    +2027        default = self.sql(expression, "default")
    +2028        if default:
    +2029            return f"ALTER COLUMN {this} SET DEFAULT {default}"
    +2030
    +2031        if not expression.args.get("drop"):
    +2032            self.unsupported("Unsupported ALTER COLUMN syntax")
    +2033
    +2034        return f"ALTER COLUMN {this} DROP DEFAULT"
    +2035
    +2036    def renametable_sql(self, expression: exp.RenameTable) -> str:
    +2037        if not self.RENAME_TABLE_WITH_DB:
    +2038            # Remove db from tables
    +2039            expression = expression.transform(
    +2040                lambda n: exp.table_(n.this) if isinstance(n, exp.Table) else n
    +2041            )
    +2042        this = self.sql(expression, "this")
    +2043        return f"RENAME TO {this}"
    +2044
    +2045    def altertable_sql(self, expression: exp.AlterTable) -> str:
    +2046        actions = expression.args["actions"]
    +2047
    +2048        if isinstance(actions[0], exp.ColumnDef):
    +2049            actions = self.expressions(expression, key="actions", prefix="ADD COLUMN ")
    +2050        elif isinstance(actions[0], exp.Schema):
    +2051            actions = self.expressions(expression, key="actions", prefix="ADD COLUMNS ")
    +2052        elif isinstance(actions[0], exp.Delete):
    +2053            actions = self.expressions(expression, key="actions", flat=True)
    +2054        else:
    +2055            actions = self.expressions(expression, key="actions")
    +2056
    +2057        exists = " IF EXISTS" if expression.args.get("exists") else ""
    +2058        return f"ALTER TABLE{exists} {self.sql(expression, 'this')} {actions}"
    +2059
    +2060    def droppartition_sql(self, expression: exp.DropPartition) -> str:
    +2061        expressions = self.expressions(expression)
    +2062        exists = " IF EXISTS " if expression.args.get("exists") else " "
    +2063        return f"DROP{exists}{expressions}"
    +2064
    +2065    def addconstraint_sql(self, expression: exp.AddConstraint) -> str:
    +2066        this = self.sql(expression, "this")
    +2067        expression_ = self.sql(expression, "expression")
    +2068        add_constraint = f"ADD CONSTRAINT {this}" if this else "ADD"
     2069
    -2070        return f"{add_constraint} {expression_}"
    -2071
    -2072    def distinct_sql(self, expression: exp.Distinct) -> str:
    -2073        this = self.expressions(expression, flat=True)
    -2074        this = f" {this}" if this else ""
    +2070        enforced = expression.args.get("enforced")
    +2071        if enforced is not None:
    +2072            return f"{add_constraint} CHECK ({expression_}){' ENFORCED' if enforced else ''}"
    +2073
    +2074        return f"{add_constraint} {expression_}"
     2075
    -2076        on = self.sql(expression, "on")
    -2077        on = f" ON {on}" if on else ""
    -2078        return f"DISTINCT{this}{on}"
    +2076    def distinct_sql(self, expression: exp.Distinct) -> str:
    +2077        this = self.expressions(expression, flat=True)
    +2078        this = f" {this}" if this else ""
     2079
    -2080    def ignorenulls_sql(self, expression: exp.IgnoreNulls) -> str:
    -2081        return f"{self.sql(expression, 'this')} IGNORE NULLS"
    -2082
    -2083    def respectnulls_sql(self, expression: exp.RespectNulls) -> str:
    -2084        return f"{self.sql(expression, 'this')} RESPECT NULLS"
    -2085
    -2086    def intdiv_sql(self, expression: exp.IntDiv) -> str:
    -2087        return self.sql(
    -2088            exp.Cast(
    -2089                this=exp.Div(this=expression.this, expression=expression.expression),
    -2090                to=exp.DataType(this=exp.DataType.Type.INT),
    -2091            )
    -2092        )
    -2093
    -2094    def dpipe_sql(self, expression: exp.DPipe) -> str:
    -2095        return self.binary(expression, "||")
    -2096
    -2097    def div_sql(self, expression: exp.Div) -> str:
    -2098        return self.binary(expression, "/")
    -2099
    -2100    def overlaps_sql(self, expression: exp.Overlaps) -> str:
    -2101        return self.binary(expression, "OVERLAPS")
    -2102
    -2103    def distance_sql(self, expression: exp.Distance) -> str:
    -2104        return self.binary(expression, "<->")
    +2080        on = self.sql(expression, "on")
    +2081        on = f" ON {on}" if on else ""
    +2082        return f"DISTINCT{this}{on}"
    +2083
    +2084    def ignorenulls_sql(self, expression: exp.IgnoreNulls) -> str:
    +2085        return f"{self.sql(expression, 'this')} IGNORE NULLS"
    +2086
    +2087    def respectnulls_sql(self, expression: exp.RespectNulls) -> str:
    +2088        return f"{self.sql(expression, 'this')} RESPECT NULLS"
    +2089
    +2090    def intdiv_sql(self, expression: exp.IntDiv) -> str:
    +2091        return self.sql(
    +2092            exp.Cast(
    +2093                this=exp.Div(this=expression.this, expression=expression.expression),
    +2094                to=exp.DataType(this=exp.DataType.Type.INT),
    +2095            )
    +2096        )
    +2097
    +2098    def dpipe_sql(self, expression: exp.DPipe) -> str:
    +2099        return self.binary(expression, "||")
    +2100
    +2101    def safedpipe_sql(self, expression: exp.SafeDPipe) -> str:
    +2102        if self.STRICT_STRING_CONCAT:
    +2103            return self.func("CONCAT", *(exp.cast(e, "text") for e in expression.flatten()))
    +2104        return self.dpipe_sql(expression)
     2105
    -2106    def dot_sql(self, expression: exp.Dot) -> str:
    -2107        return f"{self.sql(expression, 'this')}.{self.sql(expression, 'expression')}"
    +2106    def div_sql(self, expression: exp.Div) -> str:
    +2107        return self.binary(expression, "/")
     2108
    -2109    def eq_sql(self, expression: exp.EQ) -> str:
    -2110        return self.binary(expression, "=")
    +2109    def overlaps_sql(self, expression: exp.Overlaps) -> str:
    +2110        return self.binary(expression, "OVERLAPS")
     2111
    -2112    def escape_sql(self, expression: exp.Escape) -> str:
    -2113        return self.binary(expression, "ESCAPE")
    +2112    def distance_sql(self, expression: exp.Distance) -> str:
    +2113        return self.binary(expression, "<->")
     2114
    -2115    def glob_sql(self, expression: exp.Glob) -> str:
    -2116        return self.binary(expression, "GLOB")
    +2115    def dot_sql(self, expression: exp.Dot) -> str:
    +2116        return f"{self.sql(expression, 'this')}.{self.sql(expression, 'expression')}"
     2117
    -2118    def gt_sql(self, expression: exp.GT) -> str:
    -2119        return self.binary(expression, ">")
    +2118    def eq_sql(self, expression: exp.EQ) -> str:
    +2119        return self.binary(expression, "=")
     2120
    -2121    def gte_sql(self, expression: exp.GTE) -> str:
    -2122        return self.binary(expression, ">=")
    +2121    def escape_sql(self, expression: exp.Escape) -> str:
    +2122        return self.binary(expression, "ESCAPE")
     2123
    -2124    def ilike_sql(self, expression: exp.ILike) -> str:
    -2125        return self.binary(expression, "ILIKE")
    +2124    def glob_sql(self, expression: exp.Glob) -> str:
    +2125        return self.binary(expression, "GLOB")
     2126
    -2127    def ilikeany_sql(self, expression: exp.ILikeAny) -> str:
    -2128        return self.binary(expression, "ILIKE ANY")
    +2127    def gt_sql(self, expression: exp.GT) -> str:
    +2128        return self.binary(expression, ">")
     2129
    -2130    def is_sql(self, expression: exp.Is) -> str:
    -2131        return self.binary(expression, "IS")
    +2130    def gte_sql(self, expression: exp.GTE) -> str:
    +2131        return self.binary(expression, ">=")
     2132
    -2133    def like_sql(self, expression: exp.Like) -> str:
    -2134        return self.binary(expression, "LIKE")
    +2133    def ilike_sql(self, expression: exp.ILike) -> str:
    +2134        return self.binary(expression, "ILIKE")
     2135
    -2136    def likeany_sql(self, expression: exp.LikeAny) -> str:
    -2137        return self.binary(expression, "LIKE ANY")
    +2136    def ilikeany_sql(self, expression: exp.ILikeAny) -> str:
    +2137        return self.binary(expression, "ILIKE ANY")
     2138
    -2139    def similarto_sql(self, expression: exp.SimilarTo) -> str:
    -2140        return self.binary(expression, "SIMILAR TO")
    -2141
    -2142    def lt_sql(self, expression: exp.LT) -> str:
    -2143        return self.binary(expression, "<")
    -2144
    -2145    def lte_sql(self, expression: exp.LTE) -> str:
    -2146        return self.binary(expression, "<=")
    -2147
    -2148    def mod_sql(self, expression: exp.Mod) -> str:
    -2149        return self.binary(expression, "%")
    -2150
    -2151    def mul_sql(self, expression: exp.Mul) -> str:
    -2152        return self.binary(expression, "*")
    -2153
    -2154    def neq_sql(self, expression: exp.NEQ) -> str:
    -2155        return self.binary(expression, "<>")
    -2156
    -2157    def nullsafeeq_sql(self, expression: exp.NullSafeEQ) -> str:
    -2158        return self.binary(expression, "IS NOT DISTINCT FROM")
    -2159
    -2160    def nullsafeneq_sql(self, expression: exp.NullSafeNEQ) -> str:
    -2161        return self.binary(expression, "IS DISTINCT FROM")
    -2162
    -2163    def or_sql(self, expression: exp.Or) -> str:
    -2164        return self.connector_sql(expression, "OR")
    -2165
    -2166    def slice_sql(self, expression: exp.Slice) -> str:
    -2167        return self.binary(expression, ":")
    -2168
    -2169    def sub_sql(self, expression: exp.Sub) -> str:
    -2170        return self.binary(expression, "-")
    -2171
    -2172    def trycast_sql(self, expression: exp.TryCast) -> str:
    -2173        return f"TRY_CAST({self.sql(expression, 'this')} AS {self.sql(expression, 'to')})"
    -2174
    -2175    def use_sql(self, expression: exp.Use) -> str:
    -2176        kind = self.sql(expression, "kind")
    -2177        kind = f" {kind}" if kind else ""
    -2178        this = self.sql(expression, "this")
    -2179        this = f" {this}" if this else ""
    -2180        return f"USE{kind}{this}"
    +2139    def is_sql(self, expression: exp.Is) -> str:
    +2140        if not self.IS_BOOL_ALLOWED and isinstance(expression.expression, exp.Boolean):
    +2141            return self.sql(
    +2142                expression.this if expression.expression.this else exp.not_(expression.this)
    +2143            )
    +2144        return self.binary(expression, "IS")
    +2145
    +2146    def like_sql(self, expression: exp.Like) -> str:
    +2147        return self.binary(expression, "LIKE")
    +2148
    +2149    def likeany_sql(self, expression: exp.LikeAny) -> str:
    +2150        return self.binary(expression, "LIKE ANY")
    +2151
    +2152    def similarto_sql(self, expression: exp.SimilarTo) -> str:
    +2153        return self.binary(expression, "SIMILAR TO")
    +2154
    +2155    def lt_sql(self, expression: exp.LT) -> str:
    +2156        return self.binary(expression, "<")
    +2157
    +2158    def lte_sql(self, expression: exp.LTE) -> str:
    +2159        return self.binary(expression, "<=")
    +2160
    +2161    def mod_sql(self, expression: exp.Mod) -> str:
    +2162        return self.binary(expression, "%")
    +2163
    +2164    def mul_sql(self, expression: exp.Mul) -> str:
    +2165        return self.binary(expression, "*")
    +2166
    +2167    def neq_sql(self, expression: exp.NEQ) -> str:
    +2168        return self.binary(expression, "<>")
    +2169
    +2170    def nullsafeeq_sql(self, expression: exp.NullSafeEQ) -> str:
    +2171        return self.binary(expression, "IS NOT DISTINCT FROM")
    +2172
    +2173    def nullsafeneq_sql(self, expression: exp.NullSafeNEQ) -> str:
    +2174        return self.binary(expression, "IS DISTINCT FROM")
    +2175
    +2176    def or_sql(self, expression: exp.Or) -> str:
    +2177        return self.connector_sql(expression, "OR")
    +2178
    +2179    def slice_sql(self, expression: exp.Slice) -> str:
    +2180        return self.binary(expression, ":")
     2181
    -2182    def binary(self, expression: exp.Binary, op: str) -> str:
    -2183        op = self.maybe_comment(op, comments=expression.comments)
    -2184        return f"{self.sql(expression, 'this')} {op} {self.sql(expression, 'expression')}"
    -2185
    -2186    def function_fallback_sql(self, expression: exp.Func) -> str:
    -2187        args = []
    -2188        for arg_value in expression.args.values():
    -2189            if isinstance(arg_value, list):
    -2190                for value in arg_value:
    -2191                    args.append(value)
    -2192            else:
    -2193                args.append(arg_value)
    +2182    def sub_sql(self, expression: exp.Sub) -> str:
    +2183        return self.binary(expression, "-")
    +2184
    +2185    def trycast_sql(self, expression: exp.TryCast) -> str:
    +2186        return f"TRY_CAST({self.sql(expression, 'this')} AS {self.sql(expression, 'to')})"
    +2187
    +2188    def use_sql(self, expression: exp.Use) -> str:
    +2189        kind = self.sql(expression, "kind")
    +2190        kind = f" {kind}" if kind else ""
    +2191        this = self.sql(expression, "this")
    +2192        this = f" {this}" if this else ""
    +2193        return f"USE{kind}{this}"
     2194
    -2195        return self.func(expression.sql_name(), *args)
    -2196
    -2197    def func(self, name: str, *args: t.Optional[exp.Expression | str]) -> str:
    -2198        return f"{self.normalize_func(name)}({self.format_args(*args)})"
    -2199
    -2200    def format_args(self, *args: t.Optional[str | exp.Expression]) -> str:
    -2201        arg_sqls = tuple(self.sql(arg) for arg in args if arg is not None)
    -2202        if self.pretty and self.text_width(arg_sqls) > self._max_text_width:
    -2203            return self.indent("\n" + f",\n".join(arg_sqls) + "\n", skip_first=True, skip_last=True)
    -2204        return ", ".join(arg_sqls)
    -2205
    -2206    def text_width(self, args: t.Iterable) -> int:
    -2207        return sum(len(arg) for arg in args)
    -2208
    -2209    def format_time(self, expression: exp.Expression) -> t.Optional[str]:
    -2210        return format_time(self.sql(expression, "format"), self.time_mapping, self.time_trie)
    -2211
    -2212    def expressions(
    -2213        self,
    -2214        expression: t.Optional[exp.Expression] = None,
    -2215        key: t.Optional[str] = None,
    -2216        sqls: t.Optional[t.List[str]] = None,
    -2217        flat: bool = False,
    -2218        indent: bool = True,
    -2219        sep: str = ", ",
    -2220        prefix: str = "",
    -2221    ) -> str:
    -2222        expressions = expression.args.get(key or "expressions") if expression else sqls
    -2223
    -2224        if not expressions:
    -2225            return ""
    -2226
    -2227        if flat:
    -2228            return sep.join(self.sql(e) for e in expressions)
    -2229
    -2230        num_sqls = len(expressions)
    -2231
    -2232        # These are calculated once in case we have the leading_comma / pretty option set, correspondingly
    -2233        pad = " " * self.pad
    -2234        stripped_sep = sep.strip()
    -2235
    -2236        result_sqls = []
    -2237        for i, e in enumerate(expressions):
    -2238            sql = self.sql(e, comment=False)
    -2239            comments = self.maybe_comment("", e) if isinstance(e, exp.Expression) else ""
    -2240
    -2241            if self.pretty:
    -2242                if self._leading_comma:
    -2243                    result_sqls.append(f"{sep if i > 0 else pad}{prefix}{sql}{comments}")
    -2244                else:
    -2245                    result_sqls.append(
    -2246                        f"{prefix}{sql}{stripped_sep if i + 1 < num_sqls else ''}{comments}"
    -2247                    )
    -2248            else:
    -2249                result_sqls.append(f"{prefix}{sql}{comments}{sep if i + 1 < num_sqls else ''}")
    +2195    def binary(self, expression: exp.Binary, op: str) -> str:
    +2196        op = self.maybe_comment(op, comments=expression.comments)
    +2197        return f"{self.sql(expression, 'this')} {op} {self.sql(expression, 'expression')}"
    +2198
    +2199    def function_fallback_sql(self, expression: exp.Func) -> str:
    +2200        args = []
    +2201        for arg_value in expression.args.values():
    +2202            if isinstance(arg_value, list):
    +2203                for value in arg_value:
    +2204                    args.append(value)
    +2205            else:
    +2206                args.append(arg_value)
    +2207
    +2208        return self.func(expression.sql_name(), *args)
    +2209
    +2210    def func(
    +2211        self,
    +2212        name: str,
    +2213        *args: t.Optional[exp.Expression | str],
    +2214        prefix: str = "(",
    +2215        suffix: str = ")",
    +2216    ) -> str:
    +2217        return f"{self.normalize_func(name)}{prefix}{self.format_args(*args)}{suffix}"
    +2218
    +2219    def format_args(self, *args: t.Optional[str | exp.Expression]) -> str:
    +2220        arg_sqls = tuple(self.sql(arg) for arg in args if arg is not None)
    +2221        if self.pretty and self.text_width(arg_sqls) > self.max_text_width:
    +2222            return self.indent("\n" + f",\n".join(arg_sqls) + "\n", skip_first=True, skip_last=True)
    +2223        return ", ".join(arg_sqls)
    +2224
    +2225    def text_width(self, args: t.Iterable) -> int:
    +2226        return sum(len(arg) for arg in args)
    +2227
    +2228    def format_time(self, expression: exp.Expression) -> t.Optional[str]:
    +2229        return format_time(
    +2230            self.sql(expression, "format"), self.INVERSE_TIME_MAPPING, self.INVERSE_TIME_TRIE
    +2231        )
    +2232
    +2233    def expressions(
    +2234        self,
    +2235        expression: t.Optional[exp.Expression] = None,
    +2236        key: t.Optional[str] = None,
    +2237        sqls: t.Optional[t.List[str]] = None,
    +2238        flat: bool = False,
    +2239        indent: bool = True,
    +2240        sep: str = ", ",
    +2241        prefix: str = "",
    +2242    ) -> str:
    +2243        expressions = expression.args.get(key or "expressions") if expression else sqls
    +2244
    +2245        if not expressions:
    +2246            return ""
    +2247
    +2248        if flat:
    +2249            return sep.join(self.sql(e) for e in expressions)
     2250
    -2251        result_sql = "\n".join(result_sqls) if self.pretty else "".join(result_sqls)
    -2252        return self.indent(result_sql, skip_first=False) if indent else result_sql
    -2253
    -2254    def op_expressions(self, op: str, expression: exp.Expression, flat: bool = False) -> str:
    -2255        flat = flat or isinstance(expression.parent, exp.Properties)
    -2256        expressions_sql = self.expressions(expression, flat=flat)
    -2257        if flat:
    -2258            return f"{op} {expressions_sql}"
    -2259        return f"{self.seg(op)}{self.sep() if expressions_sql else ''}{expressions_sql}"
    -2260
    -2261    def naked_property(self, expression: exp.Property) -> str:
    -2262        property_name = exp.Properties.PROPERTY_TO_NAME.get(expression.__class__)
    -2263        if not property_name:
    -2264            self.unsupported(f"Unsupported property {expression.__class__.__name__}")
    -2265        return f"{property_name} {self.sql(expression, 'this')}"
    -2266
    -2267    def set_operation(self, expression: exp.Expression, op: str) -> str:
    -2268        this = self.sql(expression, "this")
    -2269        op = self.seg(op)
    -2270        return self.query_modifiers(
    -2271            expression, f"{this}{op}{self.sep()}{self.sql(expression, 'expression')}"
    -2272        )
    -2273
    -2274    def tag_sql(self, expression: exp.Tag) -> str:
    -2275        return f"{expression.args.get('prefix')}{self.sql(expression.this)}{expression.args.get('postfix')}"
    -2276
    -2277    def token_sql(self, token_type: TokenType) -> str:
    -2278        return self.TOKEN_MAPPING.get(token_type, token_type.name)
    -2279
    -2280    def userdefinedfunction_sql(self, expression: exp.UserDefinedFunction) -> str:
    -2281        this = self.sql(expression, "this")
    -2282        expressions = self.no_identify(self.expressions, expression)
    -2283        expressions = (
    -2284            self.wrap(expressions) if expression.args.get("wrapped") else f" {expressions}"
    -2285        )
    -2286        return f"{this}{expressions}"
    +2251        num_sqls = len(expressions)
    +2252
    +2253        # These are calculated once in case we have the leading_comma / pretty option set, correspondingly
    +2254        pad = " " * self.pad
    +2255        stripped_sep = sep.strip()
    +2256
    +2257        result_sqls = []
    +2258        for i, e in enumerate(expressions):
    +2259            sql = self.sql(e, comment=False)
    +2260            comments = self.maybe_comment("", e) if isinstance(e, exp.Expression) else ""
    +2261
    +2262            if self.pretty:
    +2263                if self.leading_comma:
    +2264                    result_sqls.append(f"{sep if i > 0 else pad}{prefix}{sql}{comments}")
    +2265                else:
    +2266                    result_sqls.append(
    +2267                        f"{prefix}{sql}{stripped_sep if i + 1 < num_sqls else ''}{comments}"
    +2268                    )
    +2269            else:
    +2270                result_sqls.append(f"{prefix}{sql}{comments}{sep if i + 1 < num_sqls else ''}")
    +2271
    +2272        result_sql = "\n".join(result_sqls) if self.pretty else "".join(result_sqls)
    +2273        return self.indent(result_sql, skip_first=False) if indent else result_sql
    +2274
    +2275    def op_expressions(self, op: str, expression: exp.Expression, flat: bool = False) -> str:
    +2276        flat = flat or isinstance(expression.parent, exp.Properties)
    +2277        expressions_sql = self.expressions(expression, flat=flat)
    +2278        if flat:
    +2279            return f"{op} {expressions_sql}"
    +2280        return f"{self.seg(op)}{self.sep() if expressions_sql else ''}{expressions_sql}"
    +2281
    +2282    def naked_property(self, expression: exp.Property) -> str:
    +2283        property_name = exp.Properties.PROPERTY_TO_NAME.get(expression.__class__)
    +2284        if not property_name:
    +2285            self.unsupported(f"Unsupported property {expression.__class__.__name__}")
    +2286        return f"{property_name} {self.sql(expression, 'this')}"
     2287
    -2288    def joinhint_sql(self, expression: exp.JoinHint) -> str:
    +2288    def set_operation(self, expression: exp.Expression, op: str) -> str:
     2289        this = self.sql(expression, "this")
    -2290        expressions = self.expressions(expression, flat=True)
    -2291        return f"{this}({expressions})"
    -2292
    -2293    def kwarg_sql(self, expression: exp.Kwarg) -> str:
    -2294        return self.binary(expression, "=>")
    -2295
    -2296    def when_sql(self, expression: exp.When) -> str:
    -2297        matched = "MATCHED" if expression.args["matched"] else "NOT MATCHED"
    -2298        source = " BY SOURCE" if self.MATCHED_BY_SOURCE and expression.args.get("source") else ""
    -2299        condition = self.sql(expression, "condition")
    -2300        condition = f" AND {condition}" if condition else ""
    -2301
    -2302        then_expression = expression.args.get("then")
    -2303        if isinstance(then_expression, exp.Insert):
    -2304            then = f"INSERT {self.sql(then_expression, 'this')}"
    -2305            if "expression" in then_expression.args:
    -2306                then += f" VALUES {self.sql(then_expression, 'expression')}"
    -2307        elif isinstance(then_expression, exp.Update):
    -2308            if isinstance(then_expression.args.get("expressions"), exp.Star):
    -2309                then = f"UPDATE {self.sql(then_expression, 'expressions')}"
    -2310            else:
    -2311                then = f"UPDATE SET {self.expressions(then_expression, flat=True)}"
    -2312        else:
    -2313            then = self.sql(then_expression)
    -2314        return f"WHEN {matched}{source}{condition} THEN {then}"
    -2315
    -2316    def merge_sql(self, expression: exp.Merge) -> str:
    -2317        this = self.sql(expression, "this")
    -2318        using = f"USING {self.sql(expression, 'using')}"
    -2319        on = f"ON {self.sql(expression, 'on')}"
    -2320        return f"MERGE INTO {this} {using} {on} {self.expressions(expression, sep=' ')}"
    -2321
    -2322    def tochar_sql(self, expression: exp.ToChar) -> str:
    -2323        if expression.args.get("format"):
    -2324            self.unsupported("Format argument unsupported for TO_CHAR/TO_VARCHAR function")
    -2325
    -2326        return self.sql(exp.cast(expression.this, "text"))
    -2327
    -2328    def dictproperty_sql(self, expression: exp.DictProperty) -> str:
    -2329        this = self.sql(expression, "this")
    -2330        kind = self.sql(expression, "kind")
    -2331        settings_sql = self.expressions(expression, key="settings", sep=" ")
    -2332        args = f"({self.sep('')}{settings_sql}{self.seg(')', sep='')}" if settings_sql else "()"
    -2333        return f"{this}({kind}{args})"
    -2334
    -2335    def dictrange_sql(self, expression: exp.DictRange) -> str:
    -2336        this = self.sql(expression, "this")
    -2337        max = self.sql(expression, "max")
    -2338        min = self.sql(expression, "min")
    -2339        return f"{this}(MIN {min} MAX {max})"
    -2340
    -2341    def dictsubproperty_sql(self, expression: exp.DictSubProperty) -> str:
    -2342        return f"{self.sql(expression, 'this')} {self.sql(expression, 'value')}"
    -
    - - -

    Generator interprets the given syntax tree and produces a SQL string as an output.

    +2290 op = self.seg(op) +2291 return self.query_modifiers( +2292 expression, f"{this}{op}{self.sep()}{self.sql(expression, 'expression')}" +2293 ) +2294 +2295 def tag_sql(self, expression: exp.Tag) -> str: +2296 return f"{expression.args.get('prefix')}{self.sql(expression.this)}{expression.args.get('postfix')}" +2297 +2298 def token_sql(self, token_type: TokenType) -> str: +2299 return self.TOKEN_MAPPING.get(token_type, token_type.name) +2300 +2301 def userdefinedfunction_sql(self, expression: exp.UserDefinedFunction) -> str: +2302 this = self.sql(expression, "this") +2303 expressions = self.no_identify(self.expressions, expression) +2304 expressions = ( +2305 self.wrap(expressions) if expression.args.get("wrapped") else f" {expressions}" +2306 ) +2307 return f"{this}{expressions}" +2308 +2309 def joinhint_sql(self, expression: exp.JoinHint) -> str: +2310 this = self.sql(expression, "this") +2311 expressions = self.expressions(expression, flat=True) +2312 return f"{this}({expressions})" +2313 +2314 def kwarg_sql(self, expression: exp.Kwarg) -> str: +2315 return self.binary(expression, "=>") +2316 +2317 def when_sql(self, expression: exp.When) -> str: +2318 matched = "MATCHED" if expression.args["matched"] else "NOT MATCHED" +2319 source = " BY SOURCE" if self.MATCHED_BY_SOURCE and expression.args.get("source") else "" +2320 condition = self.sql(expression, "condition") +2321 condition = f" AND {condition}" if condition else "" +2322 +2323 then_expression = expression.args.get("then") +2324 if isinstance(then_expression, exp.Insert): +2325 then = f"INSERT {self.sql(then_expression, 'this')}" +2326 if "expression" in then_expression.args: +2327 then += f" VALUES {self.sql(then_expression, 'expression')}" +2328 elif isinstance(then_expression, exp.Update): +2329 if isinstance(then_expression.args.get("expressions"), exp.Star): +2330 then = f"UPDATE {self.sql(then_expression, 'expressions')}" +2331 else: +2332 then = f"UPDATE SET {self.expressions(then_expression, flat=True)}" +2333 else: +2334 then = self.sql(then_expression) +2335 return f"WHEN {matched}{source}{condition} THEN {then}" +2336 +2337 def merge_sql(self, expression: exp.Merge) -> str: +2338 this = self.sql(expression, "this") +2339 using = f"USING {self.sql(expression, 'using')}" +2340 on = f"ON {self.sql(expression, 'on')}" +2341 return f"MERGE INTO {this} {using} {on} {self.expressions(expression, sep=' ')}" +2342 +2343 def tochar_sql(self, expression: exp.ToChar) -> str: +2344 if expression.args.get("format"): +2345 self.unsupported("Format argument unsupported for TO_CHAR/TO_VARCHAR function") +2346 +2347 return self.sql(exp.cast(expression.this, "text")) +2348 +2349 def dictproperty_sql(self, expression: exp.DictProperty) -> str: +2350 this = self.sql(expression, "this") +2351 kind = self.sql(expression, "kind") +2352 settings_sql = self.expressions(expression, key="settings", sep=" ") +2353 args = f"({self.sep('')}{settings_sql}{self.seg(')', sep='')}" if settings_sql else "()" +2354 return f"{this}({kind}{args})" +2355 +2356 def dictrange_sql(self, expression: exp.DictRange) -> str: +2357 this = self.sql(expression, "this") +2358 max = self.sql(expression, "max") +2359 min = self.sql(expression, "min") +2360 return f"{this}(MIN {min} MAX {max})" +2361 +2362 def dictsubproperty_sql(self, expression: exp.DictSubProperty) -> str: +2363 return f"{self.sql(expression, 'this')} {self.sql(expression, 'value')}" +2364 +2365 def oncluster_sql(self, expression: exp.OnCluster) -> str: +2366 return "" +
    + + +

    Generator converts a given syntax tree to the corresponding SQL string.

    Arguments:
      -
    • time_mapping (dict): the dictionary of custom time mappings in which the key -represents a python time format and the output the target time format
    • -
    • time_trie (trie): a trie of the time_mapping keys
    • -
    • pretty (bool): if set to True the returned string will be formatted. Default: False.
    • -
    • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
    • -
    • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
    • -
    • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
    • -
    • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
    • -
    • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
    • -
    • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
    • -
    • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
    • -
    • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
    • -
    • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
    • -
    • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
    • -
    • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
    • -
    • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
    • -
    • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
    • -
    • normalize (bool): if set to True all identifiers will lower cased
    • -
    • string_escape (str): specifies a string escape character. Default: '.
    • -
    • identifier_escape (str): specifies an identifier escape character. Default: ".
    • -
    • pad (int): determines padding in a formatted string. Default: 2.
    • -
    • indent (int): determines the size of indentation in a formatted string. Default: 4.
    • -
    • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
    • -
    • normalize_functions (str): normalize function names, "upper", "lower", or None -Default: "upper"
    • -
    • alias_post_tablesample (bool): if the table alias comes after tablesample -Default: False
    • -
    • identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit -Default: False
    • -
    • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters -unsupported expressions. Default ErrorLevel.WARN.
    • -
    • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
    • -
    • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +
    • pretty: Whether or not to format the produced SQL string. +Default: False.
    • +
    • identify: Determines when an identifier should be quoted. Possible values are: +False (default): Never quote, except in cases where it's mandatory by the dialect. +True or 'always': Always quote. +'safe': Only quote identifiers that are case insensitive.
    • +
    • normalize: Whether or not to normalize identifiers to lowercase. +Default: False.
    • +
    • pad: Determines the pad size in a formatted string. +Default: 2.
    • +
    • indent: Determines the indentation size in a formatted string. +Default: 2.
    • +
    • normalize_functions: Whether or not to normalize all function names. Possible values are: +"upper" or True (default): Convert names to uppercase. +"lower": Convert names to lowercase. +False: Disables function name normalization.
    • +
    • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. +Default ErrorLevel.WARN.
    • +
    • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
    • -
    • leading_comma (bool): if the the comma is leading or trailing in select statements +
    • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. +This is only relevant when generating in pretty mode. Default: False
    • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true @@ -5517,85 +5565,48 @@ Default: True
    • - Generator( time_mapping=None, time_trie=None, pretty=None, quote_start=None, quote_end=None, identifier_start=None, identifier_end=None, bit_start=None, bit_end=None, hex_start=None, hex_end=None, byte_start=None, byte_end=None, raw_start=None, raw_end=None, identify=False, normalize=False, string_escape=None, identifier_escape=None, pad=2, indent=2, index_offset=0, unnest_column_only=False, alias_post_tablesample=False, identifiers_can_start_with_digit=False, normalize_functions='upper', unsupported_level=<ErrorLevel.WARN: 'WARN'>, null_ordering=None, max_unsupported=3, leading_comma=False, max_text_width=80, comments=True) + Generator( pretty: Optional[bool] = None, identify: str | bool = False, normalize: bool = False, pad: int = 2, indent: int = 2, normalize_functions: Union[str, bool, NoneType] = None, unsupported_level: sqlglot.errors.ErrorLevel = <ErrorLevel.WARN: 'WARN'>, max_unsupported: int = 3, leading_comma: bool = False, max_text_width: int = 80, comments: bool = True)
      -
      287    def __init__(
      -288        self,
      -289        time_mapping=None,
      -290        time_trie=None,
      -291        pretty=None,
      -292        quote_start=None,
      -293        quote_end=None,
      -294        identifier_start=None,
      -295        identifier_end=None,
      -296        bit_start=None,
      -297        bit_end=None,
      -298        hex_start=None,
      -299        hex_end=None,
      -300        byte_start=None,
      -301        byte_end=None,
      -302        raw_start=None,
      -303        raw_end=None,
      -304        identify=False,
      -305        normalize=False,
      -306        string_escape=None,
      -307        identifier_escape=None,
      -308        pad=2,
      -309        indent=2,
      -310        index_offset=0,
      -311        unnest_column_only=False,
      -312        alias_post_tablesample=False,
      -313        identifiers_can_start_with_digit=False,
      -314        normalize_functions="upper",
      -315        unsupported_level=ErrorLevel.WARN,
      -316        null_ordering=None,
      -317        max_unsupported=3,
      -318        leading_comma=False,
      -319        max_text_width=80,
      -320        comments=True,
      -321    ):
      -322        import sqlglot
      -323
      -324        self.time_mapping = time_mapping or {}
      -325        self.time_trie = time_trie
      -326        self.pretty = pretty if pretty is not None else sqlglot.pretty
      -327        self.quote_start = quote_start or "'"
      -328        self.quote_end = quote_end or "'"
      -329        self.identifier_start = identifier_start or '"'
      -330        self.identifier_end = identifier_end or '"'
      -331        self.bit_start = bit_start
      -332        self.bit_end = bit_end
      -333        self.hex_start = hex_start
      -334        self.hex_end = hex_end
      -335        self.byte_start = byte_start
      -336        self.byte_end = byte_end
      -337        self.raw_start = raw_start
      -338        self.raw_end = raw_end
      -339        self.identify = identify
      -340        self.normalize = normalize
      -341        self.string_escape = string_escape or "'"
      -342        self.identifier_escape = identifier_escape or '"'
      -343        self.pad = pad
      -344        self.index_offset = index_offset
      -345        self.unnest_column_only = unnest_column_only
      -346        self.alias_post_tablesample = alias_post_tablesample
      -347        self.identifiers_can_start_with_digit = identifiers_can_start_with_digit
      -348        self.normalize_functions = normalize_functions
      -349        self.unsupported_level = unsupported_level
      -350        self.unsupported_messages = []
      -351        self.max_unsupported = max_unsupported
      -352        self.null_ordering = null_ordering
      -353        self._indent = indent
      -354        self._escaped_quote_end = self.string_escape + self.quote_end
      -355        self._escaped_identifier_end = self.identifier_escape + self.identifier_end
      -356        self._leading_comma = leading_comma
      -357        self._max_text_width = max_text_width
      -358        self._comments = comments
      -359        self._cache = None
      +            
      303    def __init__(
      +304        self,
      +305        pretty: t.Optional[bool] = None,
      +306        identify: str | bool = False,
      +307        normalize: bool = False,
      +308        pad: int = 2,
      +309        indent: int = 2,
      +310        normalize_functions: t.Optional[str | bool] = None,
      +311        unsupported_level: ErrorLevel = ErrorLevel.WARN,
      +312        max_unsupported: int = 3,
      +313        leading_comma: bool = False,
      +314        max_text_width: int = 80,
      +315        comments: bool = True,
      +316    ):
      +317        import sqlglot
      +318
      +319        self.pretty = pretty if pretty is not None else sqlglot.pretty
      +320        self.identify = identify
      +321        self.normalize = normalize
      +322        self.pad = pad
      +323        self._indent = indent
      +324        self.unsupported_level = unsupported_level
      +325        self.max_unsupported = max_unsupported
      +326        self.leading_comma = leading_comma
      +327        self.max_text_width = max_text_width
      +328        self.comments = comments
      +329
      +330        # This is both a Dialect property and a Generator argument, so we prioritize the latter
      +331        self.normalize_functions = (
      +332            self.NORMALIZE_FUNCTIONS if normalize_functions is None else normalize_functions
      +333        )
      +334
      +335        self.unsupported_messages: t.List[str] = []
      +336        self._escaped_quote_end: str = self.STRING_ESCAPE + self.QUOTE_END
      +337        self._escaped_identifier_end: str = self.IDENTIFIER_ESCAPE + self.IDENTIFIER_END
      +338        self._cache: t.Optional[t.Dict[int, str]] = None
       
      @@ -5613,50 +5624,59 @@ Default: True
      -
      361    def generate(
      -362        self,
      -363        expression: t.Optional[exp.Expression],
      -364        cache: t.Optional[t.Dict[int, str]] = None,
      -365    ) -> str:
      -366        """
      -367        Generates a SQL string by interpreting the given syntax tree.
      -368
      -369        Args
      -370            expression: the syntax tree.
      -371            cache: an optional sql string cache. this leverages the hash of an expression which is slow, so only use this if you set _hash on each node.
      -372
      -373        Returns
      -374            the SQL string.
      -375        """
      -376        if cache is not None:
      -377            self._cache = cache
      -378        self.unsupported_messages = []
      -379        sql = self.sql(expression).strip()
      -380        self._cache = None
      -381
      -382        if self.unsupported_level == ErrorLevel.IGNORE:
      -383            return sql
      -384
      -385        if self.unsupported_level == ErrorLevel.WARN:
      -386            for msg in self.unsupported_messages:
      -387                logger.warning(msg)
      -388        elif self.unsupported_level == ErrorLevel.RAISE and self.unsupported_messages:
      -389            raise UnsupportedError(concat_messages(self.unsupported_messages, self.max_unsupported))
      -390
      -391        if self.pretty:
      -392            sql = sql.replace(self.SENTINEL_LINE_BREAK, "\n")
      -393        return sql
      -
      - - -

      Generates a SQL string by interpreting the given syntax tree.

      - -

      Args - expression: the syntax tree. - cache: an optional sql string cache. this leverages the hash of an expression which is slow, so only use this if you set _hash on each node.

      - -

      Returns - the SQL string.

      +
      340    def generate(
      +341        self,
      +342        expression: t.Optional[exp.Expression],
      +343        cache: t.Optional[t.Dict[int, str]] = None,
      +344    ) -> str:
      +345        """
      +346        Generates the SQL string corresponding to the given syntax tree.
      +347
      +348        Args:
      +349            expression: The syntax tree.
      +350            cache: An optional sql string cache. This leverages the hash of an Expression
      +351                which can be slow to compute, so only use it if you set _hash on each node.
      +352
      +353        Returns:
      +354            The SQL string corresponding to `expression`.
      +355        """
      +356        if cache is not None:
      +357            self._cache = cache
      +358
      +359        self.unsupported_messages = []
      +360        sql = self.sql(expression).strip()
      +361        self._cache = None
      +362
      +363        if self.unsupported_level == ErrorLevel.IGNORE:
      +364            return sql
      +365
      +366        if self.unsupported_level == ErrorLevel.WARN:
      +367            for msg in self.unsupported_messages:
      +368                logger.warning(msg)
      +369        elif self.unsupported_level == ErrorLevel.RAISE and self.unsupported_messages:
      +370            raise UnsupportedError(concat_messages(self.unsupported_messages, self.max_unsupported))
      +371
      +372        if self.pretty:
      +373            sql = sql.replace(self.SENTINEL_LINE_BREAK, "\n")
      +374        return sql
      +
      + + +

      Generates the SQL string corresponding to the given syntax tree.

      + +
      Arguments:
      + +
        +
      • expression: The syntax tree.
      • +
      • cache: An optional sql string cache. This leverages the hash of an Expression +which can be slow to compute, so only use it if you set _hash on each node.
      • +
      + +
      Returns:
      + +
      +

      The SQL string corresponding to expression.

      +
      @@ -5672,10 +5692,10 @@ Default: True
      -
      395    def unsupported(self, message: str) -> None:
      -396        if self.unsupported_level == ErrorLevel.IMMEDIATE:
      -397            raise UnsupportedError(message)
      -398        self.unsupported_messages.append(message)
      +            
      376    def unsupported(self, message: str) -> None:
      +377        if self.unsupported_level == ErrorLevel.IMMEDIATE:
      +378            raise UnsupportedError(message)
      +379        self.unsupported_messages.append(message)
       
      @@ -5693,8 +5713,8 @@ Default: True
      -
      400    def sep(self, sep: str = " ") -> str:
      -401        return f"{sep.strip()}\n" if self.pretty else sep
      +            
      381    def sep(self, sep: str = " ") -> str:
      +382        return f"{sep.strip()}\n" if self.pretty else sep
       
      @@ -5712,8 +5732,8 @@ Default: True
      -
      403    def seg(self, sql: str, sep: str = " ") -> str:
      -404        return f"{self.sep(sep)}{sql}"
      +            
      384    def seg(self, sql: str, sep: str = " ") -> str:
      +385        return f"{self.sep(sep)}{sql}"
       
      @@ -5731,10 +5751,10 @@ Default: True
      -
      406    def pad_comment(self, comment: str) -> str:
      -407        comment = " " + comment if comment[0].strip() else comment
      -408        comment = comment + " " if comment[-1].strip() else comment
      -409        return comment
      +            
      387    def pad_comment(self, comment: str) -> str:
      +388        comment = " " + comment if comment[0].strip() else comment
      +389        comment = comment + " " if comment[-1].strip() else comment
      +390        return comment
       
      @@ -5752,33 +5772,37 @@ Default: True
      -
      411    def maybe_comment(
      -412        self,
      -413        sql: str,
      -414        expression: t.Optional[exp.Expression] = None,
      -415        comments: t.Optional[t.List[str]] = None,
      -416    ) -> str:
      -417        comments = ((expression and expression.comments) if comments is None else comments) if self._comments else None  # type: ignore
      -418
      -419        if not comments or isinstance(expression, exp.Binary):
      -420            return sql
      +            
      392    def maybe_comment(
      +393        self,
      +394        sql: str,
      +395        expression: t.Optional[exp.Expression] = None,
      +396        comments: t.Optional[t.List[str]] = None,
      +397    ) -> str:
      +398        comments = (
      +399            ((expression and expression.comments) if comments is None else comments)  # type: ignore
      +400            if self.comments
      +401            else None
      +402        )
      +403
      +404        if not comments or isinstance(expression, exp.Binary):
      +405            return sql
      +406
      +407        sep = "\n" if self.pretty else " "
      +408        comments_sql = sep.join(
      +409            f"/*{self.pad_comment(comment)}*/" for comment in comments if comment
      +410        )
      +411
      +412        if not comments_sql:
      +413            return sql
      +414
      +415        if isinstance(expression, self.WITH_SEPARATED_COMMENTS):
      +416            return (
      +417                f"{self.sep()}{comments_sql}{sql}"
      +418                if sql[0].isspace()
      +419                else f"{comments_sql}{self.sep()}{sql}"
      +420            )
       421
      -422        sep = "\n" if self.pretty else " "
      -423        comments_sql = sep.join(
      -424            f"/*{self.pad_comment(comment)}*/" for comment in comments if comment
      -425        )
      -426
      -427        if not comments_sql:
      -428            return sql
      -429
      -430        if isinstance(expression, self.WITH_SEPARATED_COMMENTS):
      -431            return (
      -432                f"{self.sep()}{comments_sql}{sql}"
      -433                if sql[0].isspace()
      -434                else f"{comments_sql}{self.sep()}{sql}"
      -435            )
      -436
      -437        return f"{sql} {comments_sql}"
      +422        return f"{sql} {comments_sql}"
       
      @@ -5796,15 +5820,15 @@ Default: True
      -
      439    def wrap(self, expression: exp.Expression | str) -> str:
      -440        this_sql = self.indent(
      -441            self.sql(expression)
      -442            if isinstance(expression, (exp.Select, exp.Union))
      -443            else self.sql(expression, "this"),
      -444            level=1,
      -445            pad=0,
      -446        )
      -447        return f"({self.sep('')}{this_sql}{self.seg(')', sep='')}"
      +            
      424    def wrap(self, expression: exp.Expression | str) -> str:
      +425        this_sql = self.indent(
      +426            self.sql(expression)
      +427            if isinstance(expression, (exp.Select, exp.Union))
      +428            else self.sql(expression, "this"),
      +429            level=1,
      +430            pad=0,
      +431        )
      +432        return f"({self.sep('')}{this_sql}{self.seg(')', sep='')}"
       
      @@ -5822,12 +5846,12 @@ Default: True
      -
      449    def no_identify(self, func: t.Callable[..., str], *args, **kwargs) -> str:
      -450        original = self.identify
      -451        self.identify = False
      -452        result = func(*args, **kwargs)
      -453        self.identify = original
      -454        return result
      +            
      434    def no_identify(self, func: t.Callable[..., str], *args, **kwargs) -> str:
      +435        original = self.identify
      +436        self.identify = False
      +437        result = func(*args, **kwargs)
      +438        self.identify = original
      +439        return result
       
      @@ -5845,12 +5869,12 @@ Default: True
      -
      456    def normalize_func(self, name: str) -> str:
      -457        if self.normalize_functions == "upper":
      -458            return name.upper()
      -459        if self.normalize_functions == "lower":
      -460            return name.lower()
      -461        return name
      +            
      441    def normalize_func(self, name: str) -> str:
      +442        if self.normalize_functions == "upper" or self.normalize_functions is True:
      +443            return name.upper()
      +444        if self.normalize_functions == "lower":
      +445            return name.lower()
      +446        return name
       
      @@ -5868,26 +5892,26 @@ Default: True
      -
      463    def indent(
      -464        self,
      -465        sql: str,
      -466        level: int = 0,
      -467        pad: t.Optional[int] = None,
      -468        skip_first: bool = False,
      -469        skip_last: bool = False,
      -470    ) -> str:
      -471        if not self.pretty:
      -472            return sql
      -473
      -474        pad = self.pad if pad is None else pad
      -475        lines = sql.split("\n")
      -476
      -477        return "\n".join(
      -478            line
      -479            if (skip_first and i == 0) or (skip_last and i == len(lines) - 1)
      -480            else f"{' ' * (level * self._indent + pad)}{line}"
      -481            for i, line in enumerate(lines)
      -482        )
      +            
      448    def indent(
      +449        self,
      +450        sql: str,
      +451        level: int = 0,
      +452        pad: t.Optional[int] = None,
      +453        skip_first: bool = False,
      +454        skip_last: bool = False,
      +455    ) -> str:
      +456        if not self.pretty:
      +457            return sql
      +458
      +459        pad = self.pad if pad is None else pad
      +460        lines = sql.split("\n")
      +461
      +462        return "\n".join(
      +463            line
      +464            if (skip_first and i == 0) or (skip_last and i == len(lines) - 1)
      +465            else f"{' ' * (level * self._indent + pad)}{line}"
      +466            for i, line in enumerate(lines)
      +467        )
       
      @@ -5905,52 +5929,52 @@ Default: True
      -
      484    def sql(
      -485        self,
      -486        expression: t.Optional[str | exp.Expression],
      -487        key: t.Optional[str] = None,
      -488        comment: bool = True,
      -489    ) -> str:
      -490        if not expression:
      -491            return ""
      -492
      -493        if isinstance(expression, str):
      -494            return expression
      -495
      -496        if key:
      -497            return self.sql(expression.args.get(key))
      +            
      469    def sql(
      +470        self,
      +471        expression: t.Optional[str | exp.Expression],
      +472        key: t.Optional[str] = None,
      +473        comment: bool = True,
      +474    ) -> str:
      +475        if not expression:
      +476            return ""
      +477
      +478        if isinstance(expression, str):
      +479            return expression
      +480
      +481        if key:
      +482            return self.sql(expression.args.get(key))
      +483
      +484        if self._cache is not None:
      +485            expression_id = hash(expression)
      +486
      +487            if expression_id in self._cache:
      +488                return self._cache[expression_id]
      +489
      +490        transform = self.TRANSFORMS.get(expression.__class__)
      +491
      +492        if callable(transform):
      +493            sql = transform(self, expression)
      +494        elif transform:
      +495            sql = transform
      +496        elif isinstance(expression, exp.Expression):
      +497            exp_handler_name = f"{expression.key}_sql"
       498
      -499        if self._cache is not None:
      -500            expression_id = hash(expression)
      -501
      -502            if expression_id in self._cache:
      -503                return self._cache[expression_id]
      -504
      -505        transform = self.TRANSFORMS.get(expression.__class__)
      -506
      -507        if callable(transform):
      -508            sql = transform(self, expression)
      -509        elif transform:
      -510            sql = transform
      -511        elif isinstance(expression, exp.Expression):
      -512            exp_handler_name = f"{expression.key}_sql"
      -513
      -514            if hasattr(self, exp_handler_name):
      -515                sql = getattr(self, exp_handler_name)(expression)
      -516            elif isinstance(expression, exp.Func):
      -517                sql = self.function_fallback_sql(expression)
      -518            elif isinstance(expression, exp.Property):
      -519                sql = self.property_sql(expression)
      -520            else:
      -521                raise ValueError(f"Unsupported expression type {expression.__class__.__name__}")
      -522        else:
      -523            raise ValueError(f"Expected an Expression. Received {type(expression)}: {expression}")
      -524
      -525        sql = self.maybe_comment(sql, expression) if self._comments and comment else sql
      -526
      -527        if self._cache is not None:
      -528            self._cache[expression_id] = sql
      -529        return sql
      +499            if hasattr(self, exp_handler_name):
      +500                sql = getattr(self, exp_handler_name)(expression)
      +501            elif isinstance(expression, exp.Func):
      +502                sql = self.function_fallback_sql(expression)
      +503            elif isinstance(expression, exp.Property):
      +504                sql = self.property_sql(expression)
      +505            else:
      +506                raise ValueError(f"Unsupported expression type {expression.__class__.__name__}")
      +507        else:
      +508            raise ValueError(f"Expected an Expression. Received {type(expression)}: {expression}")
      +509
      +510        sql = self.maybe_comment(sql, expression) if self.comments and comment else sql
      +511
      +512        if self._cache is not None:
      +513            self._cache[expression_id] = sql
      +514        return sql
       
      @@ -5968,10 +5992,10 @@ Default: True
      -
      531    def uncache_sql(self, expression: exp.Uncache) -> str:
      -532        table = self.sql(expression, "this")
      -533        exists_sql = " IF EXISTS" if expression.args.get("exists") else ""
      -534        return f"UNCACHE TABLE{exists_sql} {table}"
      +            
      516    def uncache_sql(self, expression: exp.Uncache) -> str:
      +517        table = self.sql(expression, "this")
      +518        exists_sql = " IF EXISTS" if expression.args.get("exists") else ""
      +519        return f"UNCACHE TABLE{exists_sql} {table}"
       
      @@ -5989,15 +6013,15 @@ Default: True
      -
      536    def cache_sql(self, expression: exp.Cache) -> str:
      -537        lazy = " LAZY" if expression.args.get("lazy") else ""
      -538        table = self.sql(expression, "this")
      -539        options = expression.args.get("options")
      -540        options = f" OPTIONS({self.sql(options[0])} = {self.sql(options[1])})" if options else ""
      -541        sql = self.sql(expression, "expression")
      -542        sql = f" AS{self.sep()}{sql}" if sql else ""
      -543        sql = f"CACHE{lazy} TABLE {table}{options}{sql}"
      -544        return self.prepend_ctes(expression, sql)
      +            
      521    def cache_sql(self, expression: exp.Cache) -> str:
      +522        lazy = " LAZY" if expression.args.get("lazy") else ""
      +523        table = self.sql(expression, "this")
      +524        options = expression.args.get("options")
      +525        options = f" OPTIONS({self.sql(options[0])} = {self.sql(options[1])})" if options else ""
      +526        sql = self.sql(expression, "expression")
      +527        sql = f" AS{self.sep()}{sql}" if sql else ""
      +528        sql = f"CACHE{lazy} TABLE {table}{options}{sql}"
      +529        return self.prepend_ctes(expression, sql)
       
      @@ -6015,11 +6039,11 @@ Default: True
      -
      546    def characterset_sql(self, expression: exp.CharacterSet) -> str:
      -547        if isinstance(expression.parent, exp.Cast):
      -548            return f"CHAR CHARACTER SET {self.sql(expression, 'this')}"
      -549        default = "DEFAULT " if expression.args.get("default") else ""
      -550        return f"{default}CHARACTER SET={self.sql(expression, 'this')}"
      +            
      531    def characterset_sql(self, expression: exp.CharacterSet) -> str:
      +532        if isinstance(expression.parent, exp.Cast):
      +533            return f"CHAR CHARACTER SET {self.sql(expression, 'this')}"
      +534        default = "DEFAULT " if expression.args.get("default") else ""
      +535        return f"{default}CHARACTER SET={self.sql(expression, 'this')}"
       
      @@ -6037,17 +6061,17 @@ Default: True
      -
      552    def column_sql(self, expression: exp.Column) -> str:
      -553        return ".".join(
      -554            self.sql(part)
      -555            for part in (
      -556                expression.args.get("catalog"),
      -557                expression.args.get("db"),
      -558                expression.args.get("table"),
      -559                expression.args.get("this"),
      -560            )
      -561            if part
      -562        )
      +            
      537    def column_sql(self, expression: exp.Column) -> str:
      +538        return ".".join(
      +539            self.sql(part)
      +540            for part in (
      +541                expression.args.get("catalog"),
      +542                expression.args.get("db"),
      +543                expression.args.get("table"),
      +544                expression.args.get("this"),
      +545            )
      +546            if part
      +547        )
       
      @@ -6065,11 +6089,11 @@ Default: True
      -
      564    def columnposition_sql(self, expression: exp.ColumnPosition) -> str:
      -565        this = self.sql(expression, "this")
      -566        this = f" {this}" if this else ""
      -567        position = self.sql(expression, "position")
      -568        return f"{position}{this}"
      +            
      549    def columnposition_sql(self, expression: exp.ColumnPosition) -> str:
      +550        this = self.sql(expression, "this")
      +551        this = f" {this}" if this else ""
      +552        position = self.sql(expression, "position")
      +553        return f"{position}{this}"
       
      @@ -6087,17 +6111,17 @@ Default: True
      -
      570    def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str:
      -571        column = self.sql(expression, "this")
      -572        kind = self.sql(expression, "kind")
      -573        constraints = self.expressions(expression, key="constraints", sep=" ", flat=True)
      -574        exists = "IF NOT EXISTS " if expression.args.get("exists") else ""
      -575        kind = f"{sep}{kind}" if kind else ""
      -576        constraints = f" {constraints}" if constraints else ""
      -577        position = self.sql(expression, "position")
      -578        position = f" {position}" if position else ""
      -579
      -580        return f"{exists}{column}{kind}{constraints}{position}"
      +            
      555    def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str:
      +556        column = self.sql(expression, "this")
      +557        kind = self.sql(expression, "kind")
      +558        constraints = self.expressions(expression, key="constraints", sep=" ", flat=True)
      +559        exists = "IF NOT EXISTS " if expression.args.get("exists") else ""
      +560        kind = f"{sep}{kind}" if kind else ""
      +561        constraints = f" {constraints}" if constraints else ""
      +562        position = self.sql(expression, "position")
      +563        position = f" {position}" if position else ""
      +564
      +565        return f"{exists}{column}{kind}{constraints}{position}"
       
      @@ -6115,10 +6139,10 @@ Default: True
      -
      582    def columnconstraint_sql(self, expression: exp.ColumnConstraint) -> str:
      -583        this = self.sql(expression, "this")
      -584        kind_sql = self.sql(expression, "kind").strip()
      -585        return f"CONSTRAINT {this} {kind_sql}" if this else kind_sql
      +            
      567    def columnconstraint_sql(self, expression: exp.ColumnConstraint) -> str:
      +568        this = self.sql(expression, "this")
      +569        kind_sql = self.sql(expression, "kind").strip()
      +570        return f"CONSTRAINT {this} {kind_sql}" if this else kind_sql
       
      @@ -6136,8 +6160,8 @@ Default: True
      -
      587    def autoincrementcolumnconstraint_sql(self, _) -> str:
      -588        return self.token_sql(TokenType.AUTO_INCREMENT)
      +            
      572    def autoincrementcolumnconstraint_sql(self, _) -> str:
      +573        return self.token_sql(TokenType.AUTO_INCREMENT)
       
      @@ -6155,13 +6179,13 @@ Default: True
      -
      590    def compresscolumnconstraint_sql(self, expression: exp.CompressColumnConstraint) -> str:
      -591        if isinstance(expression.this, list):
      -592            this = self.wrap(self.expressions(expression, key="this", flat=True))
      -593        else:
      -594            this = self.sql(expression, "this")
      -595
      -596        return f"COMPRESS {this}"
      +            
      575    def compresscolumnconstraint_sql(self, expression: exp.CompressColumnConstraint) -> str:
      +576        if isinstance(expression.this, list):
      +577            this = self.wrap(self.expressions(expression, key="this", flat=True))
      +578        else:
      +579            this = self.sql(expression, "this")
      +580
      +581        return f"COMPRESS {this}"
       
      @@ -6179,38 +6203,38 @@ Default: True
      -
      598    def generatedasidentitycolumnconstraint_sql(
      -599        self, expression: exp.GeneratedAsIdentityColumnConstraint
      -600    ) -> str:
      -601        this = ""
      -602        if expression.this is not None:
      -603            on_null = "ON NULL " if expression.args.get("on_null") else ""
      -604            this = " ALWAYS " if expression.this else f" BY DEFAULT {on_null}"
      +            
      583    def generatedasidentitycolumnconstraint_sql(
      +584        self, expression: exp.GeneratedAsIdentityColumnConstraint
      +585    ) -> str:
      +586        this = ""
      +587        if expression.this is not None:
      +588            on_null = "ON NULL " if expression.args.get("on_null") else ""
      +589            this = " ALWAYS " if expression.this else f" BY DEFAULT {on_null}"
      +590
      +591        start = expression.args.get("start")
      +592        start = f"START WITH {start}" if start else ""
      +593        increment = expression.args.get("increment")
      +594        increment = f" INCREMENT BY {increment}" if increment else ""
      +595        minvalue = expression.args.get("minvalue")
      +596        minvalue = f" MINVALUE {minvalue}" if minvalue else ""
      +597        maxvalue = expression.args.get("maxvalue")
      +598        maxvalue = f" MAXVALUE {maxvalue}" if maxvalue else ""
      +599        cycle = expression.args.get("cycle")
      +600        cycle_sql = ""
      +601
      +602        if cycle is not None:
      +603            cycle_sql = f"{' NO' if not cycle else ''} CYCLE"
      +604            cycle_sql = cycle_sql.strip() if not start and not increment else cycle_sql
       605
      -606        start = expression.args.get("start")
      -607        start = f"START WITH {start}" if start else ""
      -608        increment = expression.args.get("increment")
      -609        increment = f" INCREMENT BY {increment}" if increment else ""
      -610        minvalue = expression.args.get("minvalue")
      -611        minvalue = f" MINVALUE {minvalue}" if minvalue else ""
      -612        maxvalue = expression.args.get("maxvalue")
      -613        maxvalue = f" MAXVALUE {maxvalue}" if maxvalue else ""
      -614        cycle = expression.args.get("cycle")
      -615        cycle_sql = ""
      -616
      -617        if cycle is not None:
      -618            cycle_sql = f"{' NO' if not cycle else ''} CYCLE"
      -619            cycle_sql = cycle_sql.strip() if not start and not increment else cycle_sql
      -620
      -621        sequence_opts = ""
      -622        if start or increment or cycle_sql:
      -623            sequence_opts = f"{start}{increment}{minvalue}{maxvalue}{cycle_sql}"
      -624            sequence_opts = f" ({sequence_opts.strip()})"
      -625
      -626        expr = self.sql(expression, "expression")
      -627        expr = f"({expr})" if expr else "IDENTITY"
      -628
      -629        return f"GENERATED{this}AS {expr}{sequence_opts}"
      +606        sequence_opts = ""
      +607        if start or increment or cycle_sql:
      +608            sequence_opts = f"{start}{increment}{minvalue}{maxvalue}{cycle_sql}"
      +609            sequence_opts = f" ({sequence_opts.strip()})"
      +610
      +611        expr = self.sql(expression, "expression")
      +612        expr = f"({expr})" if expr else "IDENTITY"
      +613
      +614        return f"GENERATED{this}AS {expr}{sequence_opts}"
       
      @@ -6228,8 +6252,8 @@ Default: True
      -
      631    def notnullcolumnconstraint_sql(self, expression: exp.NotNullColumnConstraint) -> str:
      -632        return f"{'' if expression.args.get('allow_null') else 'NOT '}NULL"
      +            
      616    def notnullcolumnconstraint_sql(self, expression: exp.NotNullColumnConstraint) -> str:
      +617        return f"{'' if expression.args.get('allow_null') else 'NOT '}NULL"
       
      @@ -6247,11 +6271,11 @@ Default: True
      -
      634    def primarykeycolumnconstraint_sql(self, expression: exp.PrimaryKeyColumnConstraint) -> str:
      -635        desc = expression.args.get("desc")
      -636        if desc is not None:
      -637            return f"PRIMARY KEY{' DESC' if desc else ' ASC'}"
      -638        return f"PRIMARY KEY"
      +            
      619    def primarykeycolumnconstraint_sql(self, expression: exp.PrimaryKeyColumnConstraint) -> str:
      +620        desc = expression.args.get("desc")
      +621        if desc is not None:
      +622            return f"PRIMARY KEY{' DESC' if desc else ' ASC'}"
      +623        return f"PRIMARY KEY"
       
      @@ -6269,10 +6293,31 @@ Default: True
      -
      640    def uniquecolumnconstraint_sql(self, expression: exp.UniqueColumnConstraint) -> str:
      -641        this = self.sql(expression, "this")
      -642        this = f" {this}" if this else ""
      -643        return f"UNIQUE{this}"
      +            
      625    def uniquecolumnconstraint_sql(self, expression: exp.UniqueColumnConstraint) -> str:
      +626        this = self.sql(expression, "this")
      +627        this = f" {this}" if this else ""
      +628        return f"UNIQUE{this}"
      +
      + + + + +
      +
      + +
      + + def + createable_sql( self, expression: sqlglot.expressions.Create, locations: dict[sqlglot.expressions.Properties.Location, list[sqlglot.expressions.Property]]) -> str: + + + +
      + +
      630    def createable_sql(
      +631        self, expression: exp.Create, locations: dict[exp.Properties.Location, list[exp.Property]]
      +632    ) -> str:
      +633        return self.sql(expression, "this")
       
      @@ -6290,99 +6335,90 @@ Default: True
      -
      645    def create_sql(self, expression: exp.Create) -> str:
      -646        kind = self.sql(expression, "kind").upper()
      -647        properties = expression.args.get("properties")
      -648        properties_exp = expression.copy()
      -649        properties_locs = self.locate_properties(properties) if properties else {}
      -650        if properties_locs.get(exp.Properties.Location.POST_SCHEMA) or properties_locs.get(
      -651            exp.Properties.Location.POST_WITH
      -652        ):
      -653            properties_exp.set(
      -654                "properties",
      -655                exp.Properties(
      -656                    expressions=[
      -657                        *properties_locs[exp.Properties.Location.POST_SCHEMA],
      -658                        *properties_locs[exp.Properties.Location.POST_WITH],
      -659                    ]
      -660                ),
      -661            )
      -662        if kind == "TABLE" and properties_locs.get(exp.Properties.Location.POST_NAME):
      -663            this_name = self.sql(expression.this, "this")
      -664            this_properties = self.properties(
      -665                exp.Properties(expressions=properties_locs[exp.Properties.Location.POST_NAME]),
      -666                wrapped=False,
      -667            )
      -668            this_schema = f"({self.expressions(expression.this)})"
      -669            this = f"{this_name}, {this_properties} {this_schema}"
      -670            properties_sql = ""
      -671        else:
      -672            this = self.sql(expression, "this")
      -673            properties_sql = self.sql(properties_exp, "properties")
      -674        begin = " BEGIN" if expression.args.get("begin") else ""
      -675        expression_sql = self.sql(expression, "expression")
      -676        if expression_sql:
      -677            expression_sql = f"{begin}{self.sep()}{expression_sql}"
      -678
      -679            if self.CREATE_FUNCTION_RETURN_AS or not isinstance(expression.expression, exp.Return):
      -680                if properties_locs.get(exp.Properties.Location.POST_ALIAS):
      -681                    postalias_props_sql = self.properties(
      -682                        exp.Properties(
      -683                            expressions=properties_locs[exp.Properties.Location.POST_ALIAS]
      -684                        ),
      -685                        wrapped=False,
      -686                    )
      -687                    expression_sql = f" AS {postalias_props_sql}{expression_sql}"
      -688                else:
      -689                    expression_sql = f" AS{expression_sql}"
      -690
      -691        postindex_props_sql = ""
      -692        if properties_locs.get(exp.Properties.Location.POST_INDEX):
      -693            postindex_props_sql = self.properties(
      -694                exp.Properties(expressions=properties_locs[exp.Properties.Location.POST_INDEX]),
      -695                wrapped=False,
      -696                prefix=" ",
      -697            )
      -698
      -699        indexes = self.expressions(expression, key="indexes", indent=False, sep=" ")
      -700        indexes = f" {indexes}" if indexes else ""
      -701        index_sql = indexes + postindex_props_sql
      -702
      -703        replace = " OR REPLACE" if expression.args.get("replace") else ""
      -704        unique = " UNIQUE" if expression.args.get("unique") else ""
      -705
      -706        postcreate_props_sql = ""
      -707        if properties_locs.get(exp.Properties.Location.POST_CREATE):
      -708            postcreate_props_sql = self.properties(
      -709                exp.Properties(expressions=properties_locs[exp.Properties.Location.POST_CREATE]),
      -710                sep=" ",
      -711                prefix=" ",
      -712                wrapped=False,
      -713            )
      -714
      -715        modifiers = "".join((replace, unique, postcreate_props_sql))
      +            
      635    def create_sql(self, expression: exp.Create) -> str:
      +636        kind = self.sql(expression, "kind").upper()
      +637        properties = expression.args.get("properties")
      +638        properties_locs = self.locate_properties(properties) if properties else {}
      +639
      +640        this = self.createable_sql(expression, properties_locs)
      +641
      +642        properties_sql = ""
      +643        if properties_locs.get(exp.Properties.Location.POST_SCHEMA) or properties_locs.get(
      +644            exp.Properties.Location.POST_WITH
      +645        ):
      +646            properties_sql = self.sql(
      +647                exp.Properties(
      +648                    expressions=[
      +649                        *properties_locs[exp.Properties.Location.POST_SCHEMA],
      +650                        *properties_locs[exp.Properties.Location.POST_WITH],
      +651                    ]
      +652                )
      +653            )
      +654
      +655        begin = " BEGIN" if expression.args.get("begin") else ""
      +656        expression_sql = self.sql(expression, "expression")
      +657        if expression_sql:
      +658            expression_sql = f"{begin}{self.sep()}{expression_sql}"
      +659
      +660            if self.CREATE_FUNCTION_RETURN_AS or not isinstance(expression.expression, exp.Return):
      +661                if properties_locs.get(exp.Properties.Location.POST_ALIAS):
      +662                    postalias_props_sql = self.properties(
      +663                        exp.Properties(
      +664                            expressions=properties_locs[exp.Properties.Location.POST_ALIAS]
      +665                        ),
      +666                        wrapped=False,
      +667                    )
      +668                    expression_sql = f" AS {postalias_props_sql}{expression_sql}"
      +669                else:
      +670                    expression_sql = f" AS{expression_sql}"
      +671
      +672        postindex_props_sql = ""
      +673        if properties_locs.get(exp.Properties.Location.POST_INDEX):
      +674            postindex_props_sql = self.properties(
      +675                exp.Properties(expressions=properties_locs[exp.Properties.Location.POST_INDEX]),
      +676                wrapped=False,
      +677                prefix=" ",
      +678            )
      +679
      +680        indexes = self.expressions(expression, key="indexes", indent=False, sep=" ")
      +681        indexes = f" {indexes}" if indexes else ""
      +682        index_sql = indexes + postindex_props_sql
      +683
      +684        replace = " OR REPLACE" if expression.args.get("replace") else ""
      +685        unique = " UNIQUE" if expression.args.get("unique") else ""
      +686
      +687        postcreate_props_sql = ""
      +688        if properties_locs.get(exp.Properties.Location.POST_CREATE):
      +689            postcreate_props_sql = self.properties(
      +690                exp.Properties(expressions=properties_locs[exp.Properties.Location.POST_CREATE]),
      +691                sep=" ",
      +692                prefix=" ",
      +693                wrapped=False,
      +694            )
      +695
      +696        modifiers = "".join((replace, unique, postcreate_props_sql))
      +697
      +698        postexpression_props_sql = ""
      +699        if properties_locs.get(exp.Properties.Location.POST_EXPRESSION):
      +700            postexpression_props_sql = self.properties(
      +701                exp.Properties(
      +702                    expressions=properties_locs[exp.Properties.Location.POST_EXPRESSION]
      +703                ),
      +704                sep=" ",
      +705                prefix=" ",
      +706                wrapped=False,
      +707            )
      +708
      +709        exists_sql = " IF NOT EXISTS" if expression.args.get("exists") else ""
      +710        no_schema_binding = (
      +711            " WITH NO SCHEMA BINDING" if expression.args.get("no_schema_binding") else ""
      +712        )
      +713
      +714        clone = self.sql(expression, "clone")
      +715        clone = f" {clone}" if clone else ""
       716
      -717        postexpression_props_sql = ""
      -718        if properties_locs.get(exp.Properties.Location.POST_EXPRESSION):
      -719            postexpression_props_sql = self.properties(
      -720                exp.Properties(
      -721                    expressions=properties_locs[exp.Properties.Location.POST_EXPRESSION]
      -722                ),
      -723                sep=" ",
      -724                prefix=" ",
      -725                wrapped=False,
      -726            )
      -727
      -728        exists_sql = " IF NOT EXISTS" if expression.args.get("exists") else ""
      -729        no_schema_binding = (
      -730            " WITH NO SCHEMA BINDING" if expression.args.get("no_schema_binding") else ""
      -731        )
      -732
      -733        clone = self.sql(expression, "clone")
      -734        clone = f" {clone}" if clone else ""
      -735
      -736        expression_sql = f"CREATE{modifiers} {kind}{exists_sql} {this}{properties_sql}{expression_sql}{postexpression_props_sql}{index_sql}{no_schema_binding}{clone}"
      -737        return self.prepend_ctes(expression, expression_sql)
      +717        expression_sql = f"CREATE{modifiers} {kind}{exists_sql} {this}{properties_sql}{expression_sql}{postexpression_props_sql}{index_sql}{no_schema_binding}{clone}"
      +718        return self.prepend_ctes(expression, expression_sql)
       
      @@ -6400,16 +6436,16 @@ Default: True
      -
      739    def clone_sql(self, expression: exp.Clone) -> str:
      -740        this = self.sql(expression, "this")
      -741        when = self.sql(expression, "when")
      -742
      -743        if when:
      -744            kind = self.sql(expression, "kind")
      -745            expr = self.sql(expression, "expression")
      -746            return f"CLONE {this} {when} ({kind} => {expr})"
      -747
      -748        return f"CLONE {this}"
      +            
      720    def clone_sql(self, expression: exp.Clone) -> str:
      +721        this = self.sql(expression, "this")
      +722        when = self.sql(expression, "when")
      +723
      +724        if when:
      +725            kind = self.sql(expression, "kind")
      +726            expr = self.sql(expression, "expression")
      +727            return f"CLONE {this} {when} ({kind} => {expr})"
      +728
      +729        return f"CLONE {this}"
       
      @@ -6427,8 +6463,8 @@ Default: True
      -
      750    def describe_sql(self, expression: exp.Describe) -> str:
      -751        return f"DESCRIBE {self.sql(expression, 'this')}"
      +            
      731    def describe_sql(self, expression: exp.Describe) -> str:
      +732        return f"DESCRIBE {self.sql(expression, 'this')}"
       
      @@ -6446,11 +6482,11 @@ Default: True
      -
      753    def prepend_ctes(self, expression: exp.Expression, sql: str) -> str:
      -754        with_ = self.sql(expression, "with")
      -755        if with_:
      -756            sql = f"{with_}{self.sep()}{sql}"
      -757        return sql
      +            
      734    def prepend_ctes(self, expression: exp.Expression, sql: str) -> str:
      +735        with_ = self.sql(expression, "with")
      +736        if with_:
      +737            sql = f"{with_}{self.sep()}{sql}"
      +738        return sql
       
      @@ -6468,11 +6504,11 @@ Default: True
      -
      759    def with_sql(self, expression: exp.With) -> str:
      -760        sql = self.expressions(expression, flat=True)
      -761        recursive = "RECURSIVE " if expression.args.get("recursive") else ""
      -762
      -763        return f"WITH {recursive}{sql}"
      +            
      740    def with_sql(self, expression: exp.With) -> str:
      +741        sql = self.expressions(expression, flat=True)
      +742        recursive = "RECURSIVE " if expression.args.get("recursive") else ""
      +743
      +744        return f"WITH {recursive}{sql}"
       
      @@ -6490,9 +6526,9 @@ Default: True
      -
      765    def cte_sql(self, expression: exp.CTE) -> str:
      -766        alias = self.sql(expression, "alias")
      -767        return f"{alias} AS {self.wrap(expression)}"
      +            
      746    def cte_sql(self, expression: exp.CTE) -> str:
      +747        alias = self.sql(expression, "alias")
      +748        return f"{alias} AS {self.wrap(expression)}"
       
      @@ -6510,11 +6546,11 @@ Default: True
      -
      769    def tablealias_sql(self, expression: exp.TableAlias) -> str:
      -770        alias = self.sql(expression, "this")
      -771        columns = self.expressions(expression, key="columns", flat=True)
      -772        columns = f"({columns})" if columns else ""
      -773        return f"{alias}{columns}"
      +            
      750    def tablealias_sql(self, expression: exp.TableAlias) -> str:
      +751        alias = self.sql(expression, "this")
      +752        columns = self.expressions(expression, key="columns", flat=True)
      +753        columns = f"({columns})" if columns else ""
      +754        return f"{alias}{columns}"
       
      @@ -6532,11 +6568,11 @@ Default: True
      -
      775    def bitstring_sql(self, expression: exp.BitString) -> str:
      -776        this = self.sql(expression, "this")
      -777        if self.bit_start:
      -778            return f"{self.bit_start}{this}{self.bit_end}"
      -779        return f"{int(this, 2)}"
      +            
      756    def bitstring_sql(self, expression: exp.BitString) -> str:
      +757        this = self.sql(expression, "this")
      +758        if self.BIT_START:
      +759            return f"{self.BIT_START}{this}{self.BIT_END}"
      +760        return f"{int(this, 2)}"
       
      @@ -6554,11 +6590,11 @@ Default: True
      -
      781    def hexstring_sql(self, expression: exp.HexString) -> str:
      -782        this = self.sql(expression, "this")
      -783        if self.hex_start:
      -784            return f"{self.hex_start}{this}{self.hex_end}"
      -785        return f"{int(this, 16)}"
      +            
      762    def hexstring_sql(self, expression: exp.HexString) -> str:
      +763        this = self.sql(expression, "this")
      +764        if self.HEX_START:
      +765            return f"{self.HEX_START}{this}{self.HEX_END}"
      +766        return f"{int(this, 16)}"
       
      @@ -6576,11 +6612,11 @@ Default: True
      -
      787    def bytestring_sql(self, expression: exp.ByteString) -> str:
      -788        this = self.sql(expression, "this")
      -789        if self.byte_start:
      -790            return f"{self.byte_start}{this}{self.byte_end}"
      -791        return this
      +            
      768    def bytestring_sql(self, expression: exp.ByteString) -> str:
      +769        this = self.sql(expression, "this")
      +770        if self.BYTE_START:
      +771            return f"{self.BYTE_START}{this}{self.BYTE_END}"
      +772        return this
       
      @@ -6598,10 +6634,10 @@ Default: True
      -
      793    def rawstring_sql(self, expression: exp.RawString) -> str:
      -794        if self.raw_start:
      -795            return f"{self.raw_start}{expression.name}{self.raw_end}"
      -796        return self.sql(exp.Literal.string(expression.name.replace("\\", "\\\\")))
      +            
      774    def rawstring_sql(self, expression: exp.RawString) -> str:
      +775        if self.RAW_START:
      +776            return f"{self.RAW_START}{expression.name}{self.RAW_END}"
      +777        return self.sql(exp.Literal.string(expression.name.replace("\\", "\\\\")))
       
      @@ -6619,11 +6655,11 @@ Default: True
      -
      798    def datatypesize_sql(self, expression: exp.DataTypeSize) -> str:
      -799        this = self.sql(expression, "this")
      -800        specifier = self.sql(expression, "expression")
      -801        specifier = f" {specifier}" if specifier else ""
      -802        return f"{this}{specifier}"
      +            
      779    def datatypesize_sql(self, expression: exp.DataTypeSize) -> str:
      +780        this = self.sql(expression, "this")
      +781        specifier = self.sql(expression, "expression")
      +782        specifier = f" {specifier}" if specifier else ""
      +783        return f"{this}{specifier}"
       
      @@ -6641,23 +6677,23 @@ Default: True
      -
      804    def datatype_sql(self, expression: exp.DataType) -> str:
      -805        type_value = expression.this
      -806        type_sql = self.TYPE_MAPPING.get(type_value, type_value.value)
      -807        nested = ""
      -808        interior = self.expressions(expression, flat=True)
      -809        values = ""
      -810        if interior:
      -811            if expression.args.get("nested"):
      -812                nested = f"{self.STRUCT_DELIMITER[0]}{interior}{self.STRUCT_DELIMITER[1]}"
      -813                if expression.args.get("values") is not None:
      -814                    delimiters = ("[", "]") if type_value == exp.DataType.Type.ARRAY else ("(", ")")
      -815                    values = self.expressions(expression, key="values", flat=True)
      -816                    values = f"{delimiters[0]}{values}{delimiters[1]}"
      -817            else:
      -818                nested = f"({interior})"
      -819
      -820        return f"{type_sql}{nested}{values}"
      +            
      785    def datatype_sql(self, expression: exp.DataType) -> str:
      +786        type_value = expression.this
      +787        type_sql = self.TYPE_MAPPING.get(type_value, type_value.value)
      +788        nested = ""
      +789        interior = self.expressions(expression, flat=True)
      +790        values = ""
      +791        if interior:
      +792            if expression.args.get("nested"):
      +793                nested = f"{self.STRUCT_DELIMITER[0]}{interior}{self.STRUCT_DELIMITER[1]}"
      +794                if expression.args.get("values") is not None:
      +795                    delimiters = ("[", "]") if type_value == exp.DataType.Type.ARRAY else ("(", ")")
      +796                    values = self.expressions(expression, key="values", flat=True)
      +797                    values = f"{delimiters[0]}{values}{delimiters[1]}"
      +798            else:
      +799                nested = f"({interior})"
      +800
      +801        return f"{type_sql}{nested}{values}"
       
      @@ -6675,11 +6711,11 @@ Default: True
      -
      822    def directory_sql(self, expression: exp.Directory) -> str:
      -823        local = "LOCAL " if expression.args.get("local") else ""
      -824        row_format = self.sql(expression, "row_format")
      -825        row_format = f" {row_format}" if row_format else ""
      -826        return f"{local}DIRECTORY {self.sql(expression, 'this')}{row_format}"
      +            
      803    def directory_sql(self, expression: exp.Directory) -> str:
      +804        local = "LOCAL " if expression.args.get("local") else ""
      +805        row_format = self.sql(expression, "row_format")
      +806        row_format = f" {row_format}" if row_format else ""
      +807        return f"{local}DIRECTORY {self.sql(expression, 'this')}{row_format}"
       
      @@ -6697,18 +6733,18 @@ Default: True
      -
      828    def delete_sql(self, expression: exp.Delete) -> str:
      -829        this = self.sql(expression, "this")
      -830        this = f" FROM {this}" if this else ""
      -831        using_sql = (
      -832            f" USING {self.expressions(expression, key='using', sep=', USING ')}"
      -833            if expression.args.get("using")
      -834            else ""
      -835        )
      -836        where_sql = self.sql(expression, "where")
      -837        returning = self.sql(expression, "returning")
      -838        sql = f"DELETE{this}{using_sql}{where_sql}{returning}"
      -839        return self.prepend_ctes(expression, sql)
      +            
      809    def delete_sql(self, expression: exp.Delete) -> str:
      +810        this = self.sql(expression, "this")
      +811        this = f" FROM {this}" if this else ""
      +812        using_sql = (
      +813            f" USING {self.expressions(expression, key='using', sep=', USING ')}"
      +814            if expression.args.get("using")
      +815            else ""
      +816        )
      +817        where_sql = self.sql(expression, "where")
      +818        returning = self.sql(expression, "returning")
      +819        sql = f"DELETE{this}{using_sql}{where_sql}{returning}"
      +820        return self.prepend_ctes(expression, sql)
       
      @@ -6726,18 +6762,18 @@ Default: True
      -
      841    def drop_sql(self, expression: exp.Drop) -> str:
      -842        this = self.sql(expression, "this")
      -843        kind = expression.args["kind"]
      -844        exists_sql = " IF EXISTS " if expression.args.get("exists") else " "
      -845        temporary = " TEMPORARY" if expression.args.get("temporary") else ""
      -846        materialized = " MATERIALIZED" if expression.args.get("materialized") else ""
      -847        cascade = " CASCADE" if expression.args.get("cascade") else ""
      -848        constraints = " CONSTRAINTS" if expression.args.get("constraints") else ""
      -849        purge = " PURGE" if expression.args.get("purge") else ""
      -850        return (
      -851            f"DROP{temporary}{materialized} {kind}{exists_sql}{this}{cascade}{constraints}{purge}"
      -852        )
      +            
      822    def drop_sql(self, expression: exp.Drop) -> str:
      +823        this = self.sql(expression, "this")
      +824        kind = expression.args["kind"]
      +825        exists_sql = " IF EXISTS " if expression.args.get("exists") else " "
      +826        temporary = " TEMPORARY" if expression.args.get("temporary") else ""
      +827        materialized = " MATERIALIZED" if expression.args.get("materialized") else ""
      +828        cascade = " CASCADE" if expression.args.get("cascade") else ""
      +829        constraints = " CONSTRAINTS" if expression.args.get("constraints") else ""
      +830        purge = " PURGE" if expression.args.get("purge") else ""
      +831        return (
      +832            f"DROP{temporary}{materialized} {kind}{exists_sql}{this}{cascade}{constraints}{purge}"
      +833        )
       
      @@ -6755,11 +6791,11 @@ Default: True
      -
      854    def except_sql(self, expression: exp.Except) -> str:
      -855        return self.prepend_ctes(
      -856            expression,
      -857            self.set_operation(expression, self.except_op(expression)),
      -858        )
      +            
      835    def except_sql(self, expression: exp.Except) -> str:
      +836        return self.prepend_ctes(
      +837            expression,
      +838            self.set_operation(expression, self.except_op(expression)),
      +839        )
       
      @@ -6777,8 +6813,8 @@ Default: True
      -
      860    def except_op(self, expression: exp.Except) -> str:
      -861        return f"EXCEPT{'' if expression.args.get('distinct') else ' ALL'}"
      +            
      841    def except_op(self, expression: exp.Except) -> str:
      +842        return f"EXCEPT{'' if expression.args.get('distinct') else ' ALL'}"
       
      @@ -6796,15 +6832,15 @@ Default: True
      -
      863    def fetch_sql(self, expression: exp.Fetch) -> str:
      -864        direction = expression.args.get("direction")
      -865        direction = f" {direction.upper()}" if direction else ""
      -866        count = expression.args.get("count")
      -867        count = f" {count}" if count else ""
      -868        if expression.args.get("percent"):
      -869            count = f"{count} PERCENT"
      -870        with_ties_or_only = "WITH TIES" if expression.args.get("with_ties") else "ONLY"
      -871        return f"{self.seg('FETCH')}{direction}{count} ROWS {with_ties_or_only}"
      +            
      844    def fetch_sql(self, expression: exp.Fetch) -> str:
      +845        direction = expression.args.get("direction")
      +846        direction = f" {direction.upper()}" if direction else ""
      +847        count = expression.args.get("count")
      +848        count = f" {count}" if count else ""
      +849        if expression.args.get("percent"):
      +850            count = f"{count} PERCENT"
      +851        with_ties_or_only = "WITH TIES" if expression.args.get("with_ties") else "ONLY"
      +852        return f"{self.seg('FETCH')}{direction}{count} ROWS {with_ties_or_only}"
       
      @@ -6822,10 +6858,10 @@ Default: True
      -
      873    def filter_sql(self, expression: exp.Filter) -> str:
      -874        this = self.sql(expression, "this")
      -875        where = self.sql(expression, "expression")[1:]  # where has a leading space
      -876        return f"{this} FILTER({where})"
      +            
      854    def filter_sql(self, expression: exp.Filter) -> str:
      +855        this = self.sql(expression, "this")
      +856        where = self.sql(expression, "expression")[1:]  # where has a leading space
      +857        return f"{this} FILTER({where})"
       
      @@ -6843,10 +6879,10 @@ Default: True
      -
      878    def hint_sql(self, expression: exp.Hint) -> str:
      -879        if self.sql(expression, "this"):
      -880            self.unsupported("Hints are not supported")
      -881        return ""
      +            
      859    def hint_sql(self, expression: exp.Hint) -> str:
      +860        if self.sql(expression, "this"):
      +861            self.unsupported("Hints are not supported")
      +862        return ""
       
      @@ -6864,18 +6900,21 @@ Default: True
      -
      883    def index_sql(self, expression: exp.Index) -> str:
      -884        unique = "UNIQUE " if expression.args.get("unique") else ""
      -885        primary = "PRIMARY " if expression.args.get("primary") else ""
      -886        amp = "AMP " if expression.args.get("amp") else ""
      -887        name = f"{expression.name} " if expression.name else ""
      -888        table = self.sql(expression, "table")
      -889        table = f"{self.INDEX_ON} {table} " if table else ""
      -890        index = "INDEX " if not table else ""
      -891        columns = self.expressions(expression, key="columns", flat=True)
      -892        partition_by = self.expressions(expression, key="partition_by", flat=True)
      -893        partition_by = f" PARTITION BY {partition_by}" if partition_by else ""
      -894        return f"{unique}{primary}{amp}{index}{name}{table}({columns}){partition_by}"
      +            
      864    def index_sql(self, expression: exp.Index) -> str:
      +865        unique = "UNIQUE " if expression.args.get("unique") else ""
      +866        primary = "PRIMARY " if expression.args.get("primary") else ""
      +867        amp = "AMP " if expression.args.get("amp") else ""
      +868        name = f"{expression.name} " if expression.name else ""
      +869        table = self.sql(expression, "table")
      +870        table = f"{self.INDEX_ON} {table} " if table else ""
      +871        using = self.sql(expression, "using")
      +872        using = f"USING {using} " if using else ""
      +873        index = "INDEX " if not table else ""
      +874        columns = self.expressions(expression, key="columns", flat=True)
      +875        columns = f"({columns})" if columns else ""
      +876        partition_by = self.expressions(expression, key="partition_by", flat=True)
      +877        partition_by = f" PARTITION BY {partition_by}" if partition_by else ""
      +878        return f"{unique}{primary}{amp}{index}{name}{table}{using}{columns}{partition_by}"
       
      @@ -6893,19 +6932,19 @@ Default: True
      -
      896    def identifier_sql(self, expression: exp.Identifier) -> str:
      -897        text = expression.name
      -898        lower = text.lower()
      -899        text = lower if self.normalize and not expression.quoted else text
      -900        text = text.replace(self.identifier_end, self._escaped_identifier_end)
      -901        if (
      -902            expression.quoted
      -903            or should_identify(text, self.identify)
      -904            or lower in self.RESERVED_KEYWORDS
      -905            or (not self.identifiers_can_start_with_digit and text[:1].isdigit())
      -906        ):
      -907            text = f"{self.identifier_start}{text}{self.identifier_end}"
      -908        return text
      +            
      880    def identifier_sql(self, expression: exp.Identifier) -> str:
      +881        text = expression.name
      +882        lower = text.lower()
      +883        text = lower if self.normalize and not expression.quoted else text
      +884        text = text.replace(self.IDENTIFIER_END, self._escaped_identifier_end)
      +885        if (
      +886            expression.quoted
      +887            or should_identify(text, self.identify)
      +888            or lower in self.RESERVED_KEYWORDS
      +889            or (not self.IDENTIFIERS_CAN_START_WITH_DIGIT and text[:1].isdigit())
      +890        ):
      +891            text = f"{self.IDENTIFIER_START}{text}{self.IDENTIFIER_END}"
      +892        return text
       
      @@ -6923,12 +6962,12 @@ Default: True
      -
      910    def inputoutputformat_sql(self, expression: exp.InputOutputFormat) -> str:
      -911        input_format = self.sql(expression, "input_format")
      -912        input_format = f"INPUTFORMAT {input_format}" if input_format else ""
      -913        output_format = self.sql(expression, "output_format")
      -914        output_format = f"OUTPUTFORMAT {output_format}" if output_format else ""
      -915        return self.sep().join((input_format, output_format))
      +            
      894    def inputoutputformat_sql(self, expression: exp.InputOutputFormat) -> str:
      +895        input_format = self.sql(expression, "input_format")
      +896        input_format = f"INPUTFORMAT {input_format}" if input_format else ""
      +897        output_format = self.sql(expression, "output_format")
      +898        output_format = f"OUTPUTFORMAT {output_format}" if output_format else ""
      +899        return self.sep().join((input_format, output_format))
       
      @@ -6946,9 +6985,9 @@ Default: True
      -
      917    def national_sql(self, expression: exp.National, prefix: str = "N") -> str:
      -918        string = self.sql(exp.Literal.string(expression.name))
      -919        return f"{prefix}{string}"
      +            
      901    def national_sql(self, expression: exp.National, prefix: str = "N") -> str:
      +902        string = self.sql(exp.Literal.string(expression.name))
      +903        return f"{prefix}{string}"
       
      @@ -6966,8 +7005,8 @@ Default: True
      -
      921    def partition_sql(self, expression: exp.Partition) -> str:
      -922        return f"PARTITION({self.expressions(expression)})"
      +            
      905    def partition_sql(self, expression: exp.Partition) -> str:
      +906        return f"PARTITION({self.expressions(expression)})"
       
      @@ -6985,20 +7024,20 @@ Default: True
      -
      924    def properties_sql(self, expression: exp.Properties) -> str:
      -925        root_properties = []
      -926        with_properties = []
      -927
      -928        for p in expression.expressions:
      -929            p_loc = self.PROPERTIES_LOCATION[p.__class__]
      -930            if p_loc == exp.Properties.Location.POST_WITH:
      -931                with_properties.append(p)
      -932            elif p_loc == exp.Properties.Location.POST_SCHEMA:
      -933                root_properties.append(p)
      -934
      -935        return self.root_properties(
      -936            exp.Properties(expressions=root_properties)
      -937        ) + self.with_properties(exp.Properties(expressions=with_properties))
      +            
      908    def properties_sql(self, expression: exp.Properties) -> str:
      +909        root_properties = []
      +910        with_properties = []
      +911
      +912        for p in expression.expressions:
      +913            p_loc = self.PROPERTIES_LOCATION[p.__class__]
      +914            if p_loc == exp.Properties.Location.POST_WITH:
      +915                with_properties.append(p)
      +916            elif p_loc == exp.Properties.Location.POST_SCHEMA:
      +917                root_properties.append(p)
      +918
      +919        return self.root_properties(
      +920            exp.Properties(expressions=root_properties)
      +921        ) + self.with_properties(exp.Properties(expressions=with_properties))
       
      @@ -7016,10 +7055,10 @@ Default: True
      -
      939    def root_properties(self, properties: exp.Properties) -> str:
      -940        if properties.expressions:
      -941            return self.sep() + self.expressions(properties, indent=False, sep=" ")
      -942        return ""
      +            
      923    def root_properties(self, properties: exp.Properties) -> str:
      +924        if properties.expressions:
      +925            return self.sep() + self.expressions(properties, indent=False, sep=" ")
      +926        return ""
       
      @@ -7037,19 +7076,19 @@ Default: True
      -
      944    def properties(
      -945        self,
      -946        properties: exp.Properties,
      -947        prefix: str = "",
      -948        sep: str = ", ",
      -949        suffix: str = "",
      -950        wrapped: bool = True,
      -951    ) -> str:
      -952        if properties.expressions:
      -953            expressions = self.expressions(properties, sep=sep, indent=False)
      -954            expressions = self.wrap(expressions) if wrapped else expressions
      -955            return f"{prefix}{' ' if prefix and prefix != ' ' else ''}{expressions}{suffix}"
      -956        return ""
      +            
      928    def properties(
      +929        self,
      +930        properties: exp.Properties,
      +931        prefix: str = "",
      +932        sep: str = ", ",
      +933        suffix: str = "",
      +934        wrapped: bool = True,
      +935    ) -> str:
      +936        if properties.expressions:
      +937            expressions = self.expressions(properties, sep=sep, indent=False)
      +938            expressions = self.wrap(expressions) if wrapped else expressions
      +939            return f"{prefix}{' ' if prefix and prefix != ' ' else ''}{expressions}{suffix}"
      +940        return ""
       
      @@ -7067,8 +7106,8 @@ Default: True
      -
      958    def with_properties(self, properties: exp.Properties) -> str:
      -959        return self.properties(properties, prefix=self.seg("WITH"))
      +            
      942    def with_properties(self, properties: exp.Properties) -> str:
      +943        return self.properties(properties, prefix=self.seg("WITH"))
       
      @@ -7086,33 +7125,33 @@ Default: True
      -
      961    def locate_properties(
      -962        self, properties: exp.Properties
      -963    ) -> t.Dict[exp.Properties.Location, list[exp.Property]]:
      -964        properties_locs: t.Dict[exp.Properties.Location, list[exp.Property]] = {
      -965            key: [] for key in exp.Properties.Location
      -966        }
      -967
      -968        for p in properties.expressions:
      -969            p_loc = self.PROPERTIES_LOCATION[p.__class__]
      -970            if p_loc == exp.Properties.Location.POST_NAME:
      -971                properties_locs[exp.Properties.Location.POST_NAME].append(p)
      -972            elif p_loc == exp.Properties.Location.POST_INDEX:
      -973                properties_locs[exp.Properties.Location.POST_INDEX].append(p)
      -974            elif p_loc == exp.Properties.Location.POST_SCHEMA:
      -975                properties_locs[exp.Properties.Location.POST_SCHEMA].append(p)
      -976            elif p_loc == exp.Properties.Location.POST_WITH:
      -977                properties_locs[exp.Properties.Location.POST_WITH].append(p)
      -978            elif p_loc == exp.Properties.Location.POST_CREATE:
      -979                properties_locs[exp.Properties.Location.POST_CREATE].append(p)
      -980            elif p_loc == exp.Properties.Location.POST_ALIAS:
      -981                properties_locs[exp.Properties.Location.POST_ALIAS].append(p)
      -982            elif p_loc == exp.Properties.Location.POST_EXPRESSION:
      -983                properties_locs[exp.Properties.Location.POST_EXPRESSION].append(p)
      -984            elif p_loc == exp.Properties.Location.UNSUPPORTED:
      -985                self.unsupported(f"Unsupported property {p.key}")
      -986
      -987        return properties_locs
      +            
      945    def locate_properties(
      +946        self, properties: exp.Properties
      +947    ) -> t.Dict[exp.Properties.Location, list[exp.Property]]:
      +948        properties_locs: t.Dict[exp.Properties.Location, list[exp.Property]] = {
      +949            key: [] for key in exp.Properties.Location
      +950        }
      +951
      +952        for p in properties.expressions:
      +953            p_loc = self.PROPERTIES_LOCATION[p.__class__]
      +954            if p_loc == exp.Properties.Location.POST_NAME:
      +955                properties_locs[exp.Properties.Location.POST_NAME].append(p)
      +956            elif p_loc == exp.Properties.Location.POST_INDEX:
      +957                properties_locs[exp.Properties.Location.POST_INDEX].append(p)
      +958            elif p_loc == exp.Properties.Location.POST_SCHEMA:
      +959                properties_locs[exp.Properties.Location.POST_SCHEMA].append(p)
      +960            elif p_loc == exp.Properties.Location.POST_WITH:
      +961                properties_locs[exp.Properties.Location.POST_WITH].append(p)
      +962            elif p_loc == exp.Properties.Location.POST_CREATE:
      +963                properties_locs[exp.Properties.Location.POST_CREATE].append(p)
      +964            elif p_loc == exp.Properties.Location.POST_ALIAS:
      +965                properties_locs[exp.Properties.Location.POST_ALIAS].append(p)
      +966            elif p_loc == exp.Properties.Location.POST_EXPRESSION:
      +967                properties_locs[exp.Properties.Location.POST_EXPRESSION].append(p)
      +968            elif p_loc == exp.Properties.Location.UNSUPPORTED:
      +969                self.unsupported(f"Unsupported property {p.key}")
      +970
      +971        return properties_locs
       
      @@ -7130,16 +7169,16 @@ Default: True
      -
      989    def property_sql(self, expression: exp.Property) -> str:
      -990        property_cls = expression.__class__
      -991        if property_cls == exp.Property:
      -992            return f"{expression.name}={self.sql(expression, 'value')}"
      -993
      -994        property_name = exp.Properties.PROPERTY_TO_NAME.get(property_cls)
      -995        if not property_name:
      -996            self.unsupported(f"Unsupported property {expression.key}")
      -997
      -998        return f"{property_name}={self.sql(expression, 'this')}"
      +            
      973    def property_sql(self, expression: exp.Property) -> str:
      +974        property_cls = expression.__class__
      +975        if property_cls == exp.Property:
      +976            return f"{expression.name}={self.sql(expression, 'value')}"
      +977
      +978        property_name = exp.Properties.PROPERTY_TO_NAME.get(property_cls)
      +979        if not property_name:
      +980            self.unsupported(f"Unsupported property {expression.key}")
      +981
      +982        return f"{property_name}={self.sql(expression, 'this')}"
       
      @@ -7157,10 +7196,10 @@ Default: True
      -
      1000    def likeproperty_sql(self, expression: exp.LikeProperty) -> str:
      -1001        options = " ".join(f"{e.name} {self.sql(e, 'value')}" for e in expression.expressions)
      -1002        options = f" {options}" if options else ""
      -1003        return f"LIKE {self.sql(expression, 'this')}{options}"
      +            
      984    def likeproperty_sql(self, expression: exp.LikeProperty) -> str:
      +985        options = " ".join(f"{e.name} {self.sql(e, 'value')}" for e in expression.expressions)
      +986        options = f" {options}" if options else ""
      +987        return f"LIKE {self.sql(expression, 'this')}{options}"
       
      @@ -7178,10 +7217,10 @@ Default: True
      -
      1005    def fallbackproperty_sql(self, expression: exp.FallbackProperty) -> str:
      -1006        no = "NO " if expression.args.get("no") else ""
      -1007        protection = " PROTECTION" if expression.args.get("protection") else ""
      -1008        return f"{no}FALLBACK{protection}"
      +            
      989    def fallbackproperty_sql(self, expression: exp.FallbackProperty) -> str:
      +990        no = "NO " if expression.args.get("no") else ""
      +991        protection = " PROTECTION" if expression.args.get("protection") else ""
      +992        return f"{no}FALLBACK{protection}"
       
      @@ -7199,14 +7238,14 @@ Default: True
      -
      1010    def journalproperty_sql(self, expression: exp.JournalProperty) -> str:
      -1011        no = "NO " if expression.args.get("no") else ""
      -1012        local = expression.args.get("local")
      -1013        local = f"{local} " if local else ""
      -1014        dual = "DUAL " if expression.args.get("dual") else ""
      -1015        before = "BEFORE " if expression.args.get("before") else ""
      -1016        after = "AFTER " if expression.args.get("after") else ""
      -1017        return f"{no}{local}{dual}{before}{after}JOURNAL"
      +            
       994    def journalproperty_sql(self, expression: exp.JournalProperty) -> str:
      + 995        no = "NO " if expression.args.get("no") else ""
      + 996        local = expression.args.get("local")
      + 997        local = f"{local} " if local else ""
      + 998        dual = "DUAL " if expression.args.get("dual") else ""
      + 999        before = "BEFORE " if expression.args.get("before") else ""
      +1000        after = "AFTER " if expression.args.get("after") else ""
      +1001        return f"{no}{local}{dual}{before}{after}JOURNAL"
       
      @@ -7224,10 +7263,10 @@ Default: True
      -
      1019    def freespaceproperty_sql(self, expression: exp.FreespaceProperty) -> str:
      -1020        freespace = self.sql(expression, "this")
      -1021        percent = " PERCENT" if expression.args.get("percent") else ""
      -1022        return f"FREESPACE={freespace}{percent}"
      +            
      1003    def freespaceproperty_sql(self, expression: exp.FreespaceProperty) -> str:
      +1004        freespace = self.sql(expression, "this")
      +1005        percent = " PERCENT" if expression.args.get("percent") else ""
      +1006        return f"FREESPACE={freespace}{percent}"
       
      @@ -7245,14 +7284,14 @@ Default: True
      -
      1024    def checksumproperty_sql(self, expression: exp.ChecksumProperty) -> str:
      -1025        if expression.args.get("default"):
      -1026            property = "DEFAULT"
      -1027        elif expression.args.get("on"):
      -1028            property = "ON"
      -1029        else:
      -1030            property = "OFF"
      -1031        return f"CHECKSUM={property}"
      +            
      1008    def checksumproperty_sql(self, expression: exp.ChecksumProperty) -> str:
      +1009        if expression.args.get("default"):
      +1010            property = "DEFAULT"
      +1011        elif expression.args.get("on"):
      +1012            property = "ON"
      +1013        else:
      +1014            property = "OFF"
      +1015        return f"CHECKSUM={property}"
       
      @@ -7270,14 +7309,14 @@ Default: True
      -
      1033    def mergeblockratioproperty_sql(self, expression: exp.MergeBlockRatioProperty) -> str:
      -1034        if expression.args.get("no"):
      -1035            return "NO MERGEBLOCKRATIO"
      -1036        if expression.args.get("default"):
      -1037            return "DEFAULT MERGEBLOCKRATIO"
      -1038
      -1039        percent = " PERCENT" if expression.args.get("percent") else ""
      -1040        return f"MERGEBLOCKRATIO={self.sql(expression, 'this')}{percent}"
      +            
      1017    def mergeblockratioproperty_sql(self, expression: exp.MergeBlockRatioProperty) -> str:
      +1018        if expression.args.get("no"):
      +1019            return "NO MERGEBLOCKRATIO"
      +1020        if expression.args.get("default"):
      +1021            return "DEFAULT MERGEBLOCKRATIO"
      +1022
      +1023        percent = " PERCENT" if expression.args.get("percent") else ""
      +1024        return f"MERGEBLOCKRATIO={self.sql(expression, 'this')}{percent}"
       
      @@ -7295,21 +7334,21 @@ Default: True
      -
      1042    def datablocksizeproperty_sql(self, expression: exp.DataBlocksizeProperty) -> str:
      -1043        default = expression.args.get("default")
      -1044        minimum = expression.args.get("minimum")
      -1045        maximum = expression.args.get("maximum")
      -1046        if default or minimum or maximum:
      -1047            if default:
      -1048                prop = "DEFAULT"
      -1049            elif minimum:
      -1050                prop = "MINIMUM"
      -1051            else:
      -1052                prop = "MAXIMUM"
      -1053            return f"{prop} DATABLOCKSIZE"
      -1054        units = expression.args.get("units")
      -1055        units = f" {units}" if units else ""
      -1056        return f"DATABLOCKSIZE={self.sql(expression, 'size')}{units}"
      +            
      1026    def datablocksizeproperty_sql(self, expression: exp.DataBlocksizeProperty) -> str:
      +1027        default = expression.args.get("default")
      +1028        minimum = expression.args.get("minimum")
      +1029        maximum = expression.args.get("maximum")
      +1030        if default or minimum or maximum:
      +1031            if default:
      +1032                prop = "DEFAULT"
      +1033            elif minimum:
      +1034                prop = "MINIMUM"
      +1035            else:
      +1036                prop = "MAXIMUM"
      +1037            return f"{prop} DATABLOCKSIZE"
      +1038        units = expression.args.get("units")
      +1039        units = f" {units}" if units else ""
      +1040        return f"DATABLOCKSIZE={self.sql(expression, 'size')}{units}"
       
      @@ -7327,24 +7366,24 @@ Default: True
      -
      1058    def blockcompressionproperty_sql(self, expression: exp.BlockCompressionProperty) -> str:
      -1059        autotemp = expression.args.get("autotemp")
      -1060        always = expression.args.get("always")
      -1061        default = expression.args.get("default")
      -1062        manual = expression.args.get("manual")
      -1063        never = expression.args.get("never")
      -1064
      -1065        if autotemp is not None:
      -1066            prop = f"AUTOTEMP({self.expressions(autotemp)})"
      -1067        elif always:
      -1068            prop = "ALWAYS"
      -1069        elif default:
      -1070            prop = "DEFAULT"
      -1071        elif manual:
      -1072            prop = "MANUAL"
      -1073        elif never:
      -1074            prop = "NEVER"
      -1075        return f"BLOCKCOMPRESSION={prop}"
      +            
      1042    def blockcompressionproperty_sql(self, expression: exp.BlockCompressionProperty) -> str:
      +1043        autotemp = expression.args.get("autotemp")
      +1044        always = expression.args.get("always")
      +1045        default = expression.args.get("default")
      +1046        manual = expression.args.get("manual")
      +1047        never = expression.args.get("never")
      +1048
      +1049        if autotemp is not None:
      +1050            prop = f"AUTOTEMP({self.expressions(autotemp)})"
      +1051        elif always:
      +1052            prop = "ALWAYS"
      +1053        elif default:
      +1054            prop = "DEFAULT"
      +1055        elif manual:
      +1056            prop = "MANUAL"
      +1057        elif never:
      +1058            prop = "NEVER"
      +1059        return f"BLOCKCOMPRESSION={prop}"
       
      @@ -7362,20 +7401,20 @@ Default: True
      -
      1077    def isolatedloadingproperty_sql(self, expression: exp.IsolatedLoadingProperty) -> str:
      -1078        no = expression.args.get("no")
      -1079        no = " NO" if no else ""
      -1080        concurrent = expression.args.get("concurrent")
      -1081        concurrent = " CONCURRENT" if concurrent else ""
      -1082
      -1083        for_ = ""
      -1084        if expression.args.get("for_all"):
      -1085            for_ = " FOR ALL"
      -1086        elif expression.args.get("for_insert"):
      -1087            for_ = " FOR INSERT"
      -1088        elif expression.args.get("for_none"):
      -1089            for_ = " FOR NONE"
      -1090        return f"WITH{no}{concurrent} ISOLATED LOADING{for_}"
      +            
      1061    def isolatedloadingproperty_sql(self, expression: exp.IsolatedLoadingProperty) -> str:
      +1062        no = expression.args.get("no")
      +1063        no = " NO" if no else ""
      +1064        concurrent = expression.args.get("concurrent")
      +1065        concurrent = " CONCURRENT" if concurrent else ""
      +1066
      +1067        for_ = ""
      +1068        if expression.args.get("for_all"):
      +1069            for_ = " FOR ALL"
      +1070        elif expression.args.get("for_insert"):
      +1071            for_ = " FOR INSERT"
      +1072        elif expression.args.get("for_none"):
      +1073            for_ = " FOR NONE"
      +1074        return f"WITH{no}{concurrent} ISOLATED LOADING{for_}"
       
      @@ -7393,13 +7432,13 @@ Default: True
      -
      1092    def lockingproperty_sql(self, expression: exp.LockingProperty) -> str:
      -1093        kind = expression.args.get("kind")
      -1094        this = f" {self.sql(expression, 'this')}" if expression.this else ""
      -1095        for_or_in = expression.args.get("for_or_in")
      -1096        lock_type = expression.args.get("lock_type")
      -1097        override = " OVERRIDE" if expression.args.get("override") else ""
      -1098        return f"LOCKING {kind}{this} {for_or_in} {lock_type}{override}"
      +            
      1076    def lockingproperty_sql(self, expression: exp.LockingProperty) -> str:
      +1077        kind = expression.args.get("kind")
      +1078        this = f" {self.sql(expression, 'this')}" if expression.this else ""
      +1079        for_or_in = expression.args.get("for_or_in")
      +1080        lock_type = expression.args.get("lock_type")
      +1081        override = " OVERRIDE" if expression.args.get("override") else ""
      +1082        return f"LOCKING {kind}{this} {for_or_in} {lock_type}{override}"
       
      @@ -7417,13 +7456,13 @@ Default: True
      -
      1100    def withdataproperty_sql(self, expression: exp.WithDataProperty) -> str:
      -1101        data_sql = f"WITH {'NO ' if expression.args.get('no') else ''}DATA"
      -1102        statistics = expression.args.get("statistics")
      -1103        statistics_sql = ""
      -1104        if statistics is not None:
      -1105            statistics_sql = f" AND {'NO ' if not statistics else ''}STATISTICS"
      -1106        return f"{data_sql}{statistics_sql}"
      +            
      1084    def withdataproperty_sql(self, expression: exp.WithDataProperty) -> str:
      +1085        data_sql = f"WITH {'NO ' if expression.args.get('no') else ''}DATA"
      +1086        statistics = expression.args.get("statistics")
      +1087        statistics_sql = ""
      +1088        if statistics is not None:
      +1089            statistics_sql = f" AND {'NO ' if not statistics else ''}STATISTICS"
      +1090        return f"{data_sql}{statistics_sql}"
       
      @@ -7441,28 +7480,28 @@ Default: True
      -
      1108    def insert_sql(self, expression: exp.Insert) -> str:
      -1109        overwrite = expression.args.get("overwrite")
      -1110
      -1111        if isinstance(expression.this, exp.Directory):
      -1112            this = "OVERWRITE " if overwrite else "INTO "
      -1113        else:
      -1114            this = "OVERWRITE TABLE " if overwrite else "INTO "
      -1115
      -1116        alternative = expression.args.get("alternative")
      -1117        alternative = f" OR {alternative} " if alternative else " "
      -1118        this = f"{this}{self.sql(expression, 'this')}"
      -1119
      -1120        exists = " IF EXISTS " if expression.args.get("exists") else " "
      -1121        partition_sql = (
      -1122            self.sql(expression, "partition") if expression.args.get("partition") else ""
      -1123        )
      -1124        expression_sql = self.sql(expression, "expression")
      -1125        conflict = self.sql(expression, "conflict")
      -1126        returning = self.sql(expression, "returning")
      -1127        sep = self.sep() if partition_sql else ""
      -1128        sql = f"INSERT{alternative}{this}{exists}{partition_sql}{sep}{expression_sql}{conflict}{returning}"
      -1129        return self.prepend_ctes(expression, sql)
      +            
      1092    def insert_sql(self, expression: exp.Insert) -> str:
      +1093        overwrite = expression.args.get("overwrite")
      +1094
      +1095        if isinstance(expression.this, exp.Directory):
      +1096            this = "OVERWRITE " if overwrite else "INTO "
      +1097        else:
      +1098            this = "OVERWRITE TABLE " if overwrite else "INTO "
      +1099
      +1100        alternative = expression.args.get("alternative")
      +1101        alternative = f" OR {alternative} " if alternative else " "
      +1102        this = f"{this}{self.sql(expression, 'this')}"
      +1103
      +1104        exists = " IF EXISTS " if expression.args.get("exists") else " "
      +1105        partition_sql = (
      +1106            self.sql(expression, "partition") if expression.args.get("partition") else ""
      +1107        )
      +1108        expression_sql = self.sql(expression, "expression")
      +1109        conflict = self.sql(expression, "conflict")
      +1110        returning = self.sql(expression, "returning")
      +1111        sep = self.sep() if partition_sql else ""
      +1112        sql = f"INSERT{alternative}{this}{exists}{partition_sql}{sep}{expression_sql}{conflict}{returning}"
      +1113        return self.prepend_ctes(expression, sql)
       
      @@ -7480,11 +7519,11 @@ Default: True
      -
      1131    def intersect_sql(self, expression: exp.Intersect) -> str:
      -1132        return self.prepend_ctes(
      -1133            expression,
      -1134            self.set_operation(expression, self.intersect_op(expression)),
      -1135        )
      +            
      1115    def intersect_sql(self, expression: exp.Intersect) -> str:
      +1116        return self.prepend_ctes(
      +1117            expression,
      +1118            self.set_operation(expression, self.intersect_op(expression)),
      +1119        )
       
      @@ -7502,8 +7541,8 @@ Default: True
      -
      1137    def intersect_op(self, expression: exp.Intersect) -> str:
      -1138        return f"INTERSECT{'' if expression.args.get('distinct') else ' ALL'}"
      +            
      1121    def intersect_op(self, expression: exp.Intersect) -> str:
      +1122        return f"INTERSECT{'' if expression.args.get('distinct') else ' ALL'}"
       
      @@ -7521,8 +7560,8 @@ Default: True
      -
      1140    def introducer_sql(self, expression: exp.Introducer) -> str:
      -1141        return f"{self.sql(expression, 'this')} {self.sql(expression, 'expression')}"
      +            
      1124    def introducer_sql(self, expression: exp.Introducer) -> str:
      +1125        return f"{self.sql(expression, 'this')} {self.sql(expression, 'expression')}"
       
      @@ -7540,8 +7579,8 @@ Default: True
      -
      1143    def pseudotype_sql(self, expression: exp.PseudoType) -> str:
      -1144        return expression.name.upper()
      +            
      1127    def pseudotype_sql(self, expression: exp.PseudoType) -> str:
      +1128        return expression.name.upper()
       
      @@ -7559,18 +7598,18 @@ Default: True
      -
      1146    def onconflict_sql(self, expression: exp.OnConflict) -> str:
      -1147        conflict = "ON DUPLICATE KEY" if expression.args.get("duplicate") else "ON CONFLICT"
      -1148        constraint = self.sql(expression, "constraint")
      -1149        if constraint:
      -1150            constraint = f"ON CONSTRAINT {constraint}"
      -1151        key = self.expressions(expression, key="key", flat=True)
      -1152        do = "" if expression.args.get("duplicate") else " DO "
      -1153        nothing = "NOTHING" if expression.args.get("nothing") else ""
      -1154        expressions = self.expressions(expression, flat=True)
      -1155        if expressions:
      -1156            expressions = f"UPDATE SET {expressions}"
      -1157        return f"{self.seg(conflict)} {constraint}{key}{do}{nothing}{expressions}"
      +            
      1130    def onconflict_sql(self, expression: exp.OnConflict) -> str:
      +1131        conflict = "ON DUPLICATE KEY" if expression.args.get("duplicate") else "ON CONFLICT"
      +1132        constraint = self.sql(expression, "constraint")
      +1133        if constraint:
      +1134            constraint = f"ON CONSTRAINT {constraint}"
      +1135        key = self.expressions(expression, key="key", flat=True)
      +1136        do = "" if expression.args.get("duplicate") else " DO "
      +1137        nothing = "NOTHING" if expression.args.get("nothing") else ""
      +1138        expressions = self.expressions(expression, flat=True)
      +1139        if expressions:
      +1140            expressions = f"UPDATE SET {expressions}"
      +1141        return f"{self.seg(conflict)} {constraint}{key}{do}{nothing}{expressions}"
       
      @@ -7588,8 +7627,8 @@ Default: True
      -
      1159    def returning_sql(self, expression: exp.Returning) -> str:
      -1160        return f"{self.seg('RETURNING')} {self.expressions(expression, flat=True)}"
      +            
      1143    def returning_sql(self, expression: exp.Returning) -> str:
      +1144        return f"{self.seg('RETURNING')} {self.expressions(expression, flat=True)}"
       
      @@ -7607,20 +7646,20 @@ Default: True
      -
      1162    def rowformatdelimitedproperty_sql(self, expression: exp.RowFormatDelimitedProperty) -> str:
      -1163        fields = expression.args.get("fields")
      -1164        fields = f" FIELDS TERMINATED BY {fields}" if fields else ""
      -1165        escaped = expression.args.get("escaped")
      -1166        escaped = f" ESCAPED BY {escaped}" if escaped else ""
      -1167        items = expression.args.get("collection_items")
      -1168        items = f" COLLECTION ITEMS TERMINATED BY {items}" if items else ""
      -1169        keys = expression.args.get("map_keys")
      -1170        keys = f" MAP KEYS TERMINATED BY {keys}" if keys else ""
      -1171        lines = expression.args.get("lines")
      -1172        lines = f" LINES TERMINATED BY {lines}" if lines else ""
      -1173        null = expression.args.get("null")
      -1174        null = f" NULL DEFINED AS {null}" if null else ""
      -1175        return f"ROW FORMAT DELIMITED{fields}{escaped}{items}{keys}{lines}{null}"
      +            
      1146    def rowformatdelimitedproperty_sql(self, expression: exp.RowFormatDelimitedProperty) -> str:
      +1147        fields = expression.args.get("fields")
      +1148        fields = f" FIELDS TERMINATED BY {fields}" if fields else ""
      +1149        escaped = expression.args.get("escaped")
      +1150        escaped = f" ESCAPED BY {escaped}" if escaped else ""
      +1151        items = expression.args.get("collection_items")
      +1152        items = f" COLLECTION ITEMS TERMINATED BY {items}" if items else ""
      +1153        keys = expression.args.get("map_keys")
      +1154        keys = f" MAP KEYS TERMINATED BY {keys}" if keys else ""
      +1155        lines = expression.args.get("lines")
      +1156        lines = f" LINES TERMINATED BY {lines}" if lines else ""
      +1157        null = expression.args.get("null")
      +1158        null = f" NULL DEFINED AS {null}" if null else ""
      +1159        return f"ROW FORMAT DELIMITED{fields}{escaped}{items}{keys}{lines}{null}"
       
      @@ -7638,29 +7677,29 @@ Default: True
      -
      1177    def table_sql(self, expression: exp.Table, sep: str = " AS ") -> str:
      -1178        table = ".".join(
      -1179            part
      -1180            for part in [
      -1181                self.sql(expression, "catalog"),
      -1182                self.sql(expression, "db"),
      -1183                self.sql(expression, "this"),
      -1184            ]
      -1185            if part
      -1186        )
      -1187
      -1188        alias = self.sql(expression, "alias")
      -1189        alias = f"{sep}{alias}" if alias else ""
      -1190        hints = self.expressions(expression, key="hints", flat=True)
      -1191        hints = f" WITH ({hints})" if hints and self.TABLE_HINTS else ""
      -1192        pivots = self.expressions(expression, key="pivots", sep=" ", flat=True)
      -1193        pivots = f" {pivots}" if pivots else ""
      -1194        joins = self.expressions(expression, key="joins", sep="")
      -1195        laterals = self.expressions(expression, key="laterals", sep="")
      -1196        system_time = expression.args.get("system_time")
      -1197        system_time = f" {self.sql(expression, 'system_time')}" if system_time else ""
      -1198
      -1199        return f"{table}{system_time}{alias}{hints}{pivots}{joins}{laterals}"
      +            
      1161    def table_sql(self, expression: exp.Table, sep: str = " AS ") -> str:
      +1162        table = ".".join(
      +1163            part
      +1164            for part in [
      +1165                self.sql(expression, "catalog"),
      +1166                self.sql(expression, "db"),
      +1167                self.sql(expression, "this"),
      +1168            ]
      +1169            if part
      +1170        )
      +1171
      +1172        alias = self.sql(expression, "alias")
      +1173        alias = f"{sep}{alias}" if alias else ""
      +1174        hints = self.expressions(expression, key="hints", flat=True)
      +1175        hints = f" WITH ({hints})" if hints and self.TABLE_HINTS else ""
      +1176        pivots = self.expressions(expression, key="pivots", sep=" ", flat=True)
      +1177        pivots = f" {pivots}" if pivots else ""
      +1178        joins = self.expressions(expression, key="joins", sep="")
      +1179        laterals = self.expressions(expression, key="laterals", sep="")
      +1180        system_time = expression.args.get("system_time")
      +1181        system_time = f" {self.sql(expression, 'system_time')}" if system_time else ""
      +1182
      +1183        return f"{table}{system_time}{alias}{hints}{pivots}{joins}{laterals}"
       
      @@ -7678,35 +7717,35 @@ Default: True
      -
      1201    def tablesample_sql(
      -1202        self, expression: exp.TableSample, seed_prefix: str = "SEED", sep=" AS "
      -1203    ) -> str:
      -1204        if self.alias_post_tablesample and expression.this.alias:
      -1205            table = expression.this.copy()
      -1206            table.set("alias", None)
      -1207            this = self.sql(table)
      -1208            alias = f"{sep}{self.sql(expression.this, 'alias')}"
      -1209        else:
      -1210            this = self.sql(expression, "this")
      -1211            alias = ""
      -1212        method = self.sql(expression, "method")
      -1213        method = f"{method.upper()} " if method and self.TABLESAMPLE_WITH_METHOD else ""
      -1214        numerator = self.sql(expression, "bucket_numerator")
      -1215        denominator = self.sql(expression, "bucket_denominator")
      -1216        field = self.sql(expression, "bucket_field")
      -1217        field = f" ON {field}" if field else ""
      -1218        bucket = f"BUCKET {numerator} OUT OF {denominator}{field}" if numerator else ""
      -1219        percent = self.sql(expression, "percent")
      -1220        percent = f"{percent} PERCENT" if percent else ""
      -1221        rows = self.sql(expression, "rows")
      -1222        rows = f"{rows} ROWS" if rows else ""
      -1223        size = self.sql(expression, "size")
      -1224        if size and self.TABLESAMPLE_SIZE_IS_PERCENT:
      -1225            size = f"{size} PERCENT"
      -1226        seed = self.sql(expression, "seed")
      -1227        seed = f" {seed_prefix} ({seed})" if seed else ""
      -1228        kind = expression.args.get("kind", "TABLESAMPLE")
      -1229        return f"{this} {kind} {method}({bucket}{percent}{rows}{size}){seed}{alias}"
      +            
      1185    def tablesample_sql(
      +1186        self, expression: exp.TableSample, seed_prefix: str = "SEED", sep=" AS "
      +1187    ) -> str:
      +1188        if self.ALIAS_POST_TABLESAMPLE and expression.this.alias:
      +1189            table = expression.this.copy()
      +1190            table.set("alias", None)
      +1191            this = self.sql(table)
      +1192            alias = f"{sep}{self.sql(expression.this, 'alias')}"
      +1193        else:
      +1194            this = self.sql(expression, "this")
      +1195            alias = ""
      +1196        method = self.sql(expression, "method")
      +1197        method = f"{method.upper()} " if method and self.TABLESAMPLE_WITH_METHOD else ""
      +1198        numerator = self.sql(expression, "bucket_numerator")
      +1199        denominator = self.sql(expression, "bucket_denominator")
      +1200        field = self.sql(expression, "bucket_field")
      +1201        field = f" ON {field}" if field else ""
      +1202        bucket = f"BUCKET {numerator} OUT OF {denominator}{field}" if numerator else ""
      +1203        percent = self.sql(expression, "percent")
      +1204        percent = f"{percent} PERCENT" if percent else ""
      +1205        rows = self.sql(expression, "rows")
      +1206        rows = f"{rows} ROWS" if rows else ""
      +1207        size = self.sql(expression, "size")
      +1208        if size and self.TABLESAMPLE_SIZE_IS_PERCENT:
      +1209            size = f"{size} PERCENT"
      +1210        seed = self.sql(expression, "seed")
      +1211        seed = f" {seed_prefix} ({seed})" if seed else ""
      +1212        kind = expression.args.get("kind", "TABLESAMPLE")
      +1213        return f"{this} {kind} {method}({bucket}{percent}{rows}{size}){seed}{alias}"
       
      @@ -7724,23 +7763,23 @@ Default: True
      -
      1231    def pivot_sql(self, expression: exp.Pivot) -> str:
      -1232        expressions = self.expressions(expression, flat=True)
      -1233
      -1234        if expression.this:
      -1235            this = self.sql(expression, "this")
      -1236            on = f"{self.seg('ON')} {expressions}"
      -1237            using = self.expressions(expression, key="using", flat=True)
      -1238            using = f"{self.seg('USING')} {using}" if using else ""
      -1239            group = self.sql(expression, "group")
      -1240            return f"PIVOT {this}{on}{using}{group}"
      -1241
      -1242        alias = self.sql(expression, "alias")
      -1243        alias = f" AS {alias}" if alias else ""
      -1244        unpivot = expression.args.get("unpivot")
      -1245        direction = "UNPIVOT" if unpivot else "PIVOT"
      -1246        field = self.sql(expression, "field")
      -1247        return f"{direction}({expressions} FOR {field}){alias}"
      +            
      1215    def pivot_sql(self, expression: exp.Pivot) -> str:
      +1216        expressions = self.expressions(expression, flat=True)
      +1217
      +1218        if expression.this:
      +1219            this = self.sql(expression, "this")
      +1220            on = f"{self.seg('ON')} {expressions}"
      +1221            using = self.expressions(expression, key="using", flat=True)
      +1222            using = f"{self.seg('USING')} {using}" if using else ""
      +1223            group = self.sql(expression, "group")
      +1224            return f"PIVOT {this}{on}{using}{group}"
      +1225
      +1226        alias = self.sql(expression, "alias")
      +1227        alias = f" AS {alias}" if alias else ""
      +1228        unpivot = expression.args.get("unpivot")
      +1229        direction = "UNPIVOT" if unpivot else "PIVOT"
      +1230        field = self.sql(expression, "field")
      +1231        return f"{direction}({expressions} FOR {field}){alias}"
       
      @@ -7758,8 +7797,8 @@ Default: True
      -
      1249    def tuple_sql(self, expression: exp.Tuple) -> str:
      -1250        return f"({self.expressions(expression, flat=True)})"
      +            
      1233    def tuple_sql(self, expression: exp.Tuple) -> str:
      +1234        return f"({self.expressions(expression, flat=True)})"
       
      @@ -7777,14 +7816,14 @@ Default: True
      -
      1252    def update_sql(self, expression: exp.Update) -> str:
      -1253        this = self.sql(expression, "this")
      -1254        set_sql = self.expressions(expression, flat=True)
      -1255        from_sql = self.sql(expression, "from")
      -1256        where_sql = self.sql(expression, "where")
      -1257        returning = self.sql(expression, "returning")
      -1258        sql = f"UPDATE {this} SET {set_sql}{from_sql}{where_sql}{returning}"
      -1259        return self.prepend_ctes(expression, sql)
      +            
      1236    def update_sql(self, expression: exp.Update) -> str:
      +1237        this = self.sql(expression, "this")
      +1238        set_sql = self.expressions(expression, flat=True)
      +1239        from_sql = self.sql(expression, "from")
      +1240        where_sql = self.sql(expression, "where")
      +1241        returning = self.sql(expression, "returning")
      +1242        sql = f"UPDATE {this} SET {set_sql}{from_sql}{where_sql}{returning}"
      +1243        return self.prepend_ctes(expression, sql)
       
      @@ -7802,16 +7841,16 @@ Default: True
      -
      1261    def values_sql(self, expression: exp.Values) -> str:
      -1262        args = self.expressions(expression)
      -1263        alias = self.sql(expression, "alias")
      -1264        values = f"VALUES{self.seg('')}{args}"
      -1265        values = (
      -1266            f"({values})"
      -1267            if self.WRAP_DERIVED_VALUES and (alias or isinstance(expression.parent, exp.From))
      -1268            else values
      -1269        )
      -1270        return f"{values} AS {alias}" if alias else values
      +            
      1245    def values_sql(self, expression: exp.Values) -> str:
      +1246        args = self.expressions(expression)
      +1247        alias = self.sql(expression, "alias")
      +1248        values = f"VALUES{self.seg('')}{args}"
      +1249        values = (
      +1250            f"({values})"
      +1251            if self.WRAP_DERIVED_VALUES and (alias or isinstance(expression.parent, exp.From))
      +1252            else values
      +1253        )
      +1254        return f"{values} AS {alias}" if alias else values
       
      @@ -7829,8 +7868,8 @@ Default: True
      -
      1272    def var_sql(self, expression: exp.Var) -> str:
      -1273        return self.sql(expression, "this")
      +            
      1256    def var_sql(self, expression: exp.Var) -> str:
      +1257        return self.sql(expression, "this")
       
      @@ -7848,10 +7887,10 @@ Default: True
      -
      1275    def into_sql(self, expression: exp.Into) -> str:
      -1276        temporary = " TEMPORARY" if expression.args.get("temporary") else ""
      -1277        unlogged = " UNLOGGED" if expression.args.get("unlogged") else ""
      -1278        return f"{self.seg('INTO')}{temporary or unlogged} {self.sql(expression, 'this')}"
      +            
      1259    def into_sql(self, expression: exp.Into) -> str:
      +1260        temporary = " TEMPORARY" if expression.args.get("temporary") else ""
      +1261        unlogged = " UNLOGGED" if expression.args.get("unlogged") else ""
      +1262        return f"{self.seg('INTO')}{temporary or unlogged} {self.sql(expression, 'this')}"
       
      @@ -7869,8 +7908,8 @@ Default: True
      -
      1280    def from_sql(self, expression: exp.From) -> str:
      -1281        return f"{self.seg('FROM')} {self.sql(expression, 'this')}"
      +            
      1264    def from_sql(self, expression: exp.From) -> str:
      +1265        return f"{self.seg('FROM')} {self.sql(expression, 'this')}"
       
      @@ -7888,39 +7927,39 @@ Default: True
      -
      1283    def group_sql(self, expression: exp.Group) -> str:
      -1284        group_by = self.op_expressions("GROUP BY", expression)
      -1285        grouping_sets = self.expressions(expression, key="grouping_sets", indent=False)
      -1286        grouping_sets = (
      -1287            f"{self.seg('GROUPING SETS')} {self.wrap(grouping_sets)}" if grouping_sets else ""
      -1288        )
      -1289
      -1290        cube = expression.args.get("cube", [])
      -1291        if seq_get(cube, 0) is True:
      -1292            return f"{group_by}{self.seg('WITH CUBE')}"
      -1293        else:
      -1294            cube_sql = self.expressions(expression, key="cube", indent=False)
      -1295            cube_sql = f"{self.seg('CUBE')} {self.wrap(cube_sql)}" if cube_sql else ""
      -1296
      -1297        rollup = expression.args.get("rollup", [])
      -1298        if seq_get(rollup, 0) is True:
      -1299            return f"{group_by}{self.seg('WITH ROLLUP')}"
      -1300        else:
      -1301            rollup_sql = self.expressions(expression, key="rollup", indent=False)
      -1302            rollup_sql = f"{self.seg('ROLLUP')} {self.wrap(rollup_sql)}" if rollup_sql else ""
      -1303
      -1304        groupings = csv(
      -1305            grouping_sets,
      -1306            cube_sql,
      -1307            rollup_sql,
      -1308            self.seg("WITH TOTALS") if expression.args.get("totals") else "",
      -1309            sep=self.GROUPINGS_SEP,
      -1310        )
      -1311
      -1312        if expression.args.get("expressions") and groupings:
      -1313            group_by = f"{group_by}{self.GROUPINGS_SEP}"
      -1314
      -1315        return f"{group_by}{groupings}"
      +            
      1267    def group_sql(self, expression: exp.Group) -> str:
      +1268        group_by = self.op_expressions("GROUP BY", expression)
      +1269        grouping_sets = self.expressions(expression, key="grouping_sets", indent=False)
      +1270        grouping_sets = (
      +1271            f"{self.seg('GROUPING SETS')} {self.wrap(grouping_sets)}" if grouping_sets else ""
      +1272        )
      +1273
      +1274        cube = expression.args.get("cube", [])
      +1275        if seq_get(cube, 0) is True:
      +1276            return f"{group_by}{self.seg('WITH CUBE')}"
      +1277        else:
      +1278            cube_sql = self.expressions(expression, key="cube", indent=False)
      +1279            cube_sql = f"{self.seg('CUBE')} {self.wrap(cube_sql)}" if cube_sql else ""
      +1280
      +1281        rollup = expression.args.get("rollup", [])
      +1282        if seq_get(rollup, 0) is True:
      +1283            return f"{group_by}{self.seg('WITH ROLLUP')}"
      +1284        else:
      +1285            rollup_sql = self.expressions(expression, key="rollup", indent=False)
      +1286            rollup_sql = f"{self.seg('ROLLUP')} {self.wrap(rollup_sql)}" if rollup_sql else ""
      +1287
      +1288        groupings = csv(
      +1289            grouping_sets,
      +1290            cube_sql,
      +1291            rollup_sql,
      +1292            self.seg("WITH TOTALS") if expression.args.get("totals") else "",
      +1293            sep=self.GROUPINGS_SEP,
      +1294        )
      +1295
      +1296        if expression.args.get("expressions") and groupings:
      +1297            group_by = f"{group_by}{self.GROUPINGS_SEP}"
      +1298
      +1299        return f"{group_by}{groupings}"
       
      @@ -7938,9 +7977,9 @@ Default: True
      -
      1317    def having_sql(self, expression: exp.Having) -> str:
      -1318        this = self.indent(self.sql(expression, "this"))
      -1319        return f"{self.seg('HAVING')}{self.sep()}{this}"
      +            
      1301    def having_sql(self, expression: exp.Having) -> str:
      +1302        this = self.indent(self.sql(expression, "this"))
      +1303        return f"{self.seg('HAVING')}{self.sep()}{this}"
       
      @@ -7958,38 +7997,38 @@ Default: True
      -
      1321    def join_sql(self, expression: exp.Join) -> str:
      -1322        op_sql = " ".join(
      -1323            op
      -1324            for op in (
      -1325                expression.method,
      -1326                "GLOBAL" if expression.args.get("global") else None,
      -1327                expression.side,
      -1328                expression.kind,
      -1329                expression.hint if self.JOIN_HINTS else None,
      -1330            )
      -1331            if op
      -1332        )
      -1333        on_sql = self.sql(expression, "on")
      -1334        using = expression.args.get("using")
      -1335
      -1336        if not on_sql and using:
      -1337            on_sql = csv(*(self.sql(column) for column in using))
      -1338
      -1339        this_sql = self.sql(expression, "this")
      -1340
      -1341        if on_sql:
      -1342            on_sql = self.indent(on_sql, skip_first=True)
      -1343            space = self.seg(" " * self.pad) if self.pretty else " "
      -1344            if using:
      -1345                on_sql = f"{space}USING ({on_sql})"
      -1346            else:
      -1347                on_sql = f"{space}ON {on_sql}"
      -1348        elif not op_sql:
      -1349            return f", {this_sql}"
      -1350
      -1351        op_sql = f"{op_sql} JOIN" if op_sql else "JOIN"
      -1352        return f"{self.seg(op_sql)} {this_sql}{on_sql}"
      +            
      1305    def join_sql(self, expression: exp.Join) -> str:
      +1306        op_sql = " ".join(
      +1307            op
      +1308            for op in (
      +1309                expression.method,
      +1310                "GLOBAL" if expression.args.get("global") else None,
      +1311                expression.side,
      +1312                expression.kind,
      +1313                expression.hint if self.JOIN_HINTS else None,
      +1314            )
      +1315            if op
      +1316        )
      +1317        on_sql = self.sql(expression, "on")
      +1318        using = expression.args.get("using")
      +1319
      +1320        if not on_sql and using:
      +1321            on_sql = csv(*(self.sql(column) for column in using))
      +1322
      +1323        this_sql = self.sql(expression, "this")
      +1324
      +1325        if on_sql:
      +1326            on_sql = self.indent(on_sql, skip_first=True)
      +1327            space = self.seg(" " * self.pad) if self.pretty else " "
      +1328            if using:
      +1329                on_sql = f"{space}USING ({on_sql})"
      +1330            else:
      +1331                on_sql = f"{space}ON {on_sql}"
      +1332        elif not op_sql:
      +1333            return f", {this_sql}"
      +1334
      +1335        op_sql = f"{op_sql} JOIN" if op_sql else "JOIN"
      +1336        return f"{self.seg(op_sql)} {this_sql}{on_sql}"
       
      @@ -8007,10 +8046,10 @@ Default: True
      -
      1354    def lambda_sql(self, expression: exp.Lambda, arrow_sep: str = "->") -> str:
      -1355        args = self.expressions(expression, flat=True)
      -1356        args = f"({args})" if len(args.split(",")) > 1 else args
      -1357        return f"{args} {arrow_sep} {self.sql(expression, 'this')}"
      +            
      1338    def lambda_sql(self, expression: exp.Lambda, arrow_sep: str = "->") -> str:
      +1339        args = self.expressions(expression, flat=True)
      +1340        args = f"({args})" if len(args.split(",")) > 1 else args
      +1341        return f"{args} {arrow_sep} {self.sql(expression, 'this')}"
       
      @@ -8028,23 +8067,23 @@ Default: True
      -
      1359    def lateral_sql(self, expression: exp.Lateral) -> str:
      -1360        this = self.sql(expression, "this")
      -1361
      -1362        if isinstance(expression.this, exp.Subquery):
      -1363            return f"LATERAL {this}"
      -1364
      -1365        if expression.args.get("view"):
      -1366            alias = expression.args["alias"]
      -1367            columns = self.expressions(alias, key="columns", flat=True)
      -1368            table = f" {alias.name}" if alias.name else ""
      -1369            columns = f" AS {columns}" if columns else ""
      -1370            op_sql = self.seg(f"LATERAL VIEW{' OUTER' if expression.args.get('outer') else ''}")
      -1371            return f"{op_sql}{self.sep()}{this}{table}{columns}"
      -1372
      -1373        alias = self.sql(expression, "alias")
      -1374        alias = f" AS {alias}" if alias else ""
      -1375        return f"LATERAL {this}{alias}"
      +            
      1343    def lateral_sql(self, expression: exp.Lateral) -> str:
      +1344        this = self.sql(expression, "this")
      +1345
      +1346        if isinstance(expression.this, exp.Subquery):
      +1347            return f"LATERAL {this}"
      +1348
      +1349        if expression.args.get("view"):
      +1350            alias = expression.args["alias"]
      +1351            columns = self.expressions(alias, key="columns", flat=True)
      +1352            table = f" {alias.name}" if alias.name else ""
      +1353            columns = f" AS {columns}" if columns else ""
      +1354            op_sql = self.seg(f"LATERAL VIEW{' OUTER' if expression.args.get('outer') else ''}")
      +1355            return f"{op_sql}{self.sep()}{this}{table}{columns}"
      +1356
      +1357        alias = self.sql(expression, "alias")
      +1358        alias = f" AS {alias}" if alias else ""
      +1359        return f"LATERAL {this}{alias}"
       
      @@ -8062,9 +8101,17 @@ Default: True
      -
      1377    def limit_sql(self, expression: exp.Limit) -> str:
      -1378        this = self.sql(expression, "this")
      -1379        return f"{this}{self.seg('LIMIT')} {self.sql(expression, 'expression')}"
      +            
      1361    def limit_sql(self, expression: exp.Limit) -> str:
      +1362        this = self.sql(expression, "this")
      +1363        args = ", ".join(
      +1364            sql
      +1365            for sql in (
      +1366                self.sql(expression, "offset"),
      +1367                self.sql(expression, "expression"),
      +1368            )
      +1369            if sql
      +1370        )
      +1371        return f"{this}{self.seg('LIMIT')} {args}"
       
      @@ -8082,9 +8129,9 @@ Default: True
      -
      1381    def offset_sql(self, expression: exp.Offset) -> str:
      -1382        this = self.sql(expression, "this")
      -1383        return f"{this}{self.seg('OFFSET')} {self.sql(expression, 'expression')}"
      +            
      1373    def offset_sql(self, expression: exp.Offset) -> str:
      +1374        this = self.sql(expression, "this")
      +1375        return f"{this}{self.seg('OFFSET')} {self.sql(expression, 'expression')}"
       
      @@ -8102,15 +8149,15 @@ Default: True
      -
      1385    def setitem_sql(self, expression: exp.SetItem) -> str:
      -1386        kind = self.sql(expression, "kind")
      -1387        kind = f"{kind} " if kind else ""
      -1388        this = self.sql(expression, "this")
      -1389        expressions = self.expressions(expression)
      -1390        collate = self.sql(expression, "collate")
      -1391        collate = f" COLLATE {collate}" if collate else ""
      -1392        global_ = "GLOBAL " if expression.args.get("global") else ""
      -1393        return f"{global_}{kind}{this}{expressions}{collate}"
      +            
      1377    def setitem_sql(self, expression: exp.SetItem) -> str:
      +1378        kind = self.sql(expression, "kind")
      +1379        kind = f"{kind} " if kind else ""
      +1380        this = self.sql(expression, "this")
      +1381        expressions = self.expressions(expression)
      +1382        collate = self.sql(expression, "collate")
      +1383        collate = f" COLLATE {collate}" if collate else ""
      +1384        global_ = "GLOBAL " if expression.args.get("global") else ""
      +1385        return f"{global_}{kind}{this}{expressions}{collate}"
       
      @@ -8128,11 +8175,11 @@ Default: True
      -
      1395    def set_sql(self, expression: exp.Set) -> str:
      -1396        expressions = (
      -1397            f" {self.expressions(expression, flat=True)}" if expression.expressions else ""
      -1398        )
      -1399        return f"SET{expressions}"
      +            
      1387    def set_sql(self, expression: exp.Set) -> str:
      +1388        expressions = (
      +1389            f" {self.expressions(expression, flat=True)}" if expression.expressions else ""
      +1390        )
      +1391        return f"SET{expressions}"
       
      @@ -8150,8 +8197,8 @@ Default: True
      -
      1401    def pragma_sql(self, expression: exp.Pragma) -> str:
      -1402        return f"PRAGMA {self.sql(expression, 'this')}"
      +            
      1393    def pragma_sql(self, expression: exp.Pragma) -> str:
      +1394        return f"PRAGMA {self.sql(expression, 'this')}"
       
      @@ -8169,23 +8216,23 @@ Default: True
      -
      1404    def lock_sql(self, expression: exp.Lock) -> str:
      -1405        if not self.LOCKING_READS_SUPPORTED:
      -1406            self.unsupported("Locking reads using 'FOR UPDATE/SHARE' are not supported")
      -1407            return ""
      -1408
      -1409        lock_type = "FOR UPDATE" if expression.args["update"] else "FOR SHARE"
      -1410        expressions = self.expressions(expression, flat=True)
      -1411        expressions = f" OF {expressions}" if expressions else ""
      -1412        wait = expression.args.get("wait")
      -1413
      -1414        if wait is not None:
      -1415            if isinstance(wait, exp.Literal):
      -1416                wait = f" WAIT {self.sql(wait)}"
      -1417            else:
      -1418                wait = " NOWAIT" if wait else " SKIP LOCKED"
      -1419
      -1420        return f"{lock_type}{expressions}{wait or ''}"
      +            
      1396    def lock_sql(self, expression: exp.Lock) -> str:
      +1397        if not self.LOCKING_READS_SUPPORTED:
      +1398            self.unsupported("Locking reads using 'FOR UPDATE/SHARE' are not supported")
      +1399            return ""
      +1400
      +1401        lock_type = "FOR UPDATE" if expression.args["update"] else "FOR SHARE"
      +1402        expressions = self.expressions(expression, flat=True)
      +1403        expressions = f" OF {expressions}" if expressions else ""
      +1404        wait = expression.args.get("wait")
      +1405
      +1406        if wait is not None:
      +1407            if isinstance(wait, exp.Literal):
      +1408                wait = f" WAIT {self.sql(wait)}"
      +1409            else:
      +1410                wait = " NOWAIT" if wait else " SKIP LOCKED"
      +1411
      +1412        return f"{lock_type}{expressions}{wait or ''}"
       
      @@ -8203,14 +8250,14 @@ Default: True
      -
      1422    def literal_sql(self, expression: exp.Literal) -> str:
      -1423        text = expression.this or ""
      -1424        if expression.is_string:
      -1425            text = text.replace(self.quote_end, self._escaped_quote_end)
      -1426            if self.pretty:
      -1427                text = text.replace("\n", self.SENTINEL_LINE_BREAK)
      -1428            text = f"{self.quote_start}{text}{self.quote_end}"
      -1429        return text
      +            
      1414    def literal_sql(self, expression: exp.Literal) -> str:
      +1415        text = expression.this or ""
      +1416        if expression.is_string:
      +1417            text = text.replace(self.QUOTE_END, self._escaped_quote_end)
      +1418            if self.pretty:
      +1419                text = text.replace("\n", self.SENTINEL_LINE_BREAK)
      +1420            text = f"{self.QUOTE_START}{text}{self.QUOTE_END}"
      +1421        return text
       
      @@ -8228,18 +8275,18 @@ Default: True
      -
      1431    def loaddata_sql(self, expression: exp.LoadData) -> str:
      -1432        local = " LOCAL" if expression.args.get("local") else ""
      -1433        inpath = f" INPATH {self.sql(expression, 'inpath')}"
      -1434        overwrite = " OVERWRITE" if expression.args.get("overwrite") else ""
      -1435        this = f" INTO TABLE {self.sql(expression, 'this')}"
      -1436        partition = self.sql(expression, "partition")
      -1437        partition = f" {partition}" if partition else ""
      -1438        input_format = self.sql(expression, "input_format")
      -1439        input_format = f" INPUTFORMAT {input_format}" if input_format else ""
      -1440        serde = self.sql(expression, "serde")
      -1441        serde = f" SERDE {serde}" if serde else ""
      -1442        return f"LOAD DATA{local}{inpath}{overwrite}{this}{partition}{input_format}{serde}"
      +            
      1423    def loaddata_sql(self, expression: exp.LoadData) -> str:
      +1424        local = " LOCAL" if expression.args.get("local") else ""
      +1425        inpath = f" INPATH {self.sql(expression, 'inpath')}"
      +1426        overwrite = " OVERWRITE" if expression.args.get("overwrite") else ""
      +1427        this = f" INTO TABLE {self.sql(expression, 'this')}"
      +1428        partition = self.sql(expression, "partition")
      +1429        partition = f" {partition}" if partition else ""
      +1430        input_format = self.sql(expression, "input_format")
      +1431        input_format = f" INPUTFORMAT {input_format}" if input_format else ""
      +1432        serde = self.sql(expression, "serde")
      +1433        serde = f" SERDE {serde}" if serde else ""
      +1434        return f"LOAD DATA{local}{inpath}{overwrite}{this}{partition}{input_format}{serde}"
       
      @@ -8257,8 +8304,8 @@ Default: True
      -
      1444    def null_sql(self, *_) -> str:
      -1445        return "NULL"
      +            
      1436    def null_sql(self, *_) -> str:
      +1437        return "NULL"
       
      @@ -8276,8 +8323,8 @@ Default: True
      -
      1447    def boolean_sql(self, expression: exp.Boolean) -> str:
      -1448        return "TRUE" if expression.this else "FALSE"
      +            
      1439    def boolean_sql(self, expression: exp.Boolean) -> str:
      +1440        return "TRUE" if expression.this else "FALSE"
       
      @@ -8295,10 +8342,10 @@ Default: True
      -
      1450    def order_sql(self, expression: exp.Order, flat: bool = False) -> str:
      -1451        this = self.sql(expression, "this")
      -1452        this = f"{this} " if this else this
      -1453        return self.op_expressions(f"{this}ORDER BY", expression, flat=this or flat)  # type: ignore
      +            
      1442    def order_sql(self, expression: exp.Order, flat: bool = False) -> str:
      +1443        this = self.sql(expression, "this")
      +1444        this = f"{this} " if this else this
      +1445        return self.op_expressions(f"{this}ORDER BY", expression, flat=this or flat)  # type: ignore
       
      @@ -8316,8 +8363,8 @@ Default: True
      -
      1455    def cluster_sql(self, expression: exp.Cluster) -> str:
      -1456        return self.op_expressions("CLUSTER BY", expression)
      +            
      1447    def cluster_sql(self, expression: exp.Cluster) -> str:
      +1448        return self.op_expressions("CLUSTER BY", expression)
       
      @@ -8335,8 +8382,8 @@ Default: True
      -
      1458    def distribute_sql(self, expression: exp.Distribute) -> str:
      -1459        return self.op_expressions("DISTRIBUTE BY", expression)
      +            
      1450    def distribute_sql(self, expression: exp.Distribute) -> str:
      +1451        return self.op_expressions("DISTRIBUTE BY", expression)
       
      @@ -8354,8 +8401,8 @@ Default: True
      -
      1461    def sort_sql(self, expression: exp.Sort) -> str:
      -1462        return self.op_expressions("SORT BY", expression)
      +            
      1453    def sort_sql(self, expression: exp.Sort) -> str:
      +1454        return self.op_expressions("SORT BY", expression)
       
      @@ -8373,36 +8420,36 @@ Default: True
      -
      1464    def ordered_sql(self, expression: exp.Ordered) -> str:
      -1465        desc = expression.args.get("desc")
      -1466        asc = not desc
      -1467
      -1468        nulls_first = expression.args.get("nulls_first")
      -1469        nulls_last = not nulls_first
      -1470        nulls_are_large = self.null_ordering == "nulls_are_large"
      -1471        nulls_are_small = self.null_ordering == "nulls_are_small"
      -1472        nulls_are_last = self.null_ordering == "nulls_are_last"
      -1473
      -1474        sort_order = " DESC" if desc else ""
      -1475        nulls_sort_change = ""
      -1476        if nulls_first and (
      -1477            (asc and nulls_are_large) or (desc and nulls_are_small) or nulls_are_last
      -1478        ):
      -1479            nulls_sort_change = " NULLS FIRST"
      -1480        elif (
      -1481            nulls_last
      -1482            and ((asc and nulls_are_small) or (desc and nulls_are_large))
      -1483            and not nulls_are_last
      -1484        ):
      -1485            nulls_sort_change = " NULLS LAST"
      -1486
      -1487        if nulls_sort_change and not self.NULL_ORDERING_SUPPORTED:
      -1488            self.unsupported(
      -1489                "Sorting in an ORDER BY on NULLS FIRST/NULLS LAST is not supported by this dialect"
      -1490            )
      -1491            nulls_sort_change = ""
      -1492
      -1493        return f"{self.sql(expression, 'this')}{sort_order}{nulls_sort_change}"
      +            
      1456    def ordered_sql(self, expression: exp.Ordered) -> str:
      +1457        desc = expression.args.get("desc")
      +1458        asc = not desc
      +1459
      +1460        nulls_first = expression.args.get("nulls_first")
      +1461        nulls_last = not nulls_first
      +1462        nulls_are_large = self.NULL_ORDERING == "nulls_are_large"
      +1463        nulls_are_small = self.NULL_ORDERING == "nulls_are_small"
      +1464        nulls_are_last = self.NULL_ORDERING == "nulls_are_last"
      +1465
      +1466        sort_order = " DESC" if desc else ""
      +1467        nulls_sort_change = ""
      +1468        if nulls_first and (
      +1469            (asc and nulls_are_large) or (desc and nulls_are_small) or nulls_are_last
      +1470        ):
      +1471            nulls_sort_change = " NULLS FIRST"
      +1472        elif (
      +1473            nulls_last
      +1474            and ((asc and nulls_are_small) or (desc and nulls_are_large))
      +1475            and not nulls_are_last
      +1476        ):
      +1477            nulls_sort_change = " NULLS LAST"
      +1478
      +1479        if nulls_sort_change and not self.NULL_ORDERING_SUPPORTED:
      +1480            self.unsupported(
      +1481                "Sorting in an ORDER BY on NULLS FIRST/NULLS LAST is not supported by this dialect"
      +1482            )
      +1483            nulls_sort_change = ""
      +1484
      +1485        return f"{self.sql(expression, 'this')}{sort_order}{nulls_sort_change}"
       
      @@ -8420,37 +8467,37 @@ Default: True
      -
      1495    def matchrecognize_sql(self, expression: exp.MatchRecognize) -> str:
      -1496        partition = self.partition_by_sql(expression)
      -1497        order = self.sql(expression, "order")
      -1498        measures = self.expressions(expression, key="measures")
      -1499        measures = self.seg(f"MEASURES{self.seg(measures)}") if measures else ""
      -1500        rows = self.sql(expression, "rows")
      -1501        rows = self.seg(rows) if rows else ""
      -1502        after = self.sql(expression, "after")
      -1503        after = self.seg(after) if after else ""
      -1504        pattern = self.sql(expression, "pattern")
      -1505        pattern = self.seg(f"PATTERN ({pattern})") if pattern else ""
      -1506        definition_sqls = [
      -1507            f"{self.sql(definition, 'alias')} AS {self.sql(definition, 'this')}"
      -1508            for definition in expression.args.get("define", [])
      -1509        ]
      -1510        definitions = self.expressions(sqls=definition_sqls)
      -1511        define = self.seg(f"DEFINE{self.seg(definitions)}") if definitions else ""
      -1512        body = "".join(
      -1513            (
      -1514                partition,
      -1515                order,
      -1516                measures,
      -1517                rows,
      -1518                after,
      -1519                pattern,
      -1520                define,
      -1521            )
      -1522        )
      -1523        alias = self.sql(expression, "alias")
      -1524        alias = f" {alias}" if alias else ""
      -1525        return f"{self.seg('MATCH_RECOGNIZE')} {self.wrap(body)}{alias}"
      +            
      1487    def matchrecognize_sql(self, expression: exp.MatchRecognize) -> str:
      +1488        partition = self.partition_by_sql(expression)
      +1489        order = self.sql(expression, "order")
      +1490        measures = self.expressions(expression, key="measures")
      +1491        measures = self.seg(f"MEASURES{self.seg(measures)}") if measures else ""
      +1492        rows = self.sql(expression, "rows")
      +1493        rows = self.seg(rows) if rows else ""
      +1494        after = self.sql(expression, "after")
      +1495        after = self.seg(after) if after else ""
      +1496        pattern = self.sql(expression, "pattern")
      +1497        pattern = self.seg(f"PATTERN ({pattern})") if pattern else ""
      +1498        definition_sqls = [
      +1499            f"{self.sql(definition, 'alias')} AS {self.sql(definition, 'this')}"
      +1500            for definition in expression.args.get("define", [])
      +1501        ]
      +1502        definitions = self.expressions(sqls=definition_sqls)
      +1503        define = self.seg(f"DEFINE{self.seg(definitions)}") if definitions else ""
      +1504        body = "".join(
      +1505            (
      +1506                partition,
      +1507                order,
      +1508                measures,
      +1509                rows,
      +1510                after,
      +1511                pattern,
      +1512                define,
      +1513            )
      +1514        )
      +1515        alias = self.sql(expression, "alias")
      +1516        alias = f" {alias}" if alias else ""
      +1517        return f"{self.seg('MATCH_RECOGNIZE')} {self.wrap(body)}{alias}"
       
      @@ -8468,31 +8515,54 @@ Default: True
      -
      1527    def query_modifiers(self, expression: exp.Expression, *sqls: str) -> str:
      -1528        limit = expression.args.get("limit")
      -1529
      -1530        if self.LIMIT_FETCH == "LIMIT" and isinstance(limit, exp.Fetch):
      -1531            limit = exp.Limit(expression=limit.args.get("count"))
      -1532        elif self.LIMIT_FETCH == "FETCH" and isinstance(limit, exp.Limit):
      -1533            limit = exp.Fetch(direction="FIRST", count=limit.expression)
      -1534
      -1535        fetch = isinstance(limit, exp.Fetch)
      -1536
      -1537        return csv(
      -1538            *sqls,
      -1539            *[self.sql(join) for join in expression.args.get("joins") or []],
      -1540            self.sql(expression, "match"),
      -1541            *[self.sql(lateral) for lateral in expression.args.get("laterals") or []],
      -1542            self.sql(expression, "where"),
      -1543            self.sql(expression, "group"),
      -1544            self.sql(expression, "having"),
      -1545            *self.after_having_modifiers(expression),
      -1546            self.sql(expression, "order"),
      -1547            self.sql(expression, "offset") if fetch else self.sql(limit),
      -1548            self.sql(limit) if fetch else self.sql(expression, "offset"),
      -1549            *self.after_limit_modifiers(expression),
      -1550            sep="",
      -1551        )
      +            
      1519    def query_modifiers(self, expression: exp.Expression, *sqls: str) -> str:
      +1520        limit: t.Optional[exp.Fetch | exp.Limit] = expression.args.get("limit")
      +1521
      +1522        if self.LIMIT_FETCH == "LIMIT" and isinstance(limit, exp.Fetch):
      +1523            limit = exp.Limit(expression=limit.args.get("count"))
      +1524        elif self.LIMIT_FETCH == "FETCH" and isinstance(limit, exp.Limit):
      +1525            limit = exp.Fetch(direction="FIRST", count=limit.expression)
      +1526
      +1527        fetch = isinstance(limit, exp.Fetch)
      +1528
      +1529        return csv(
      +1530            *sqls,
      +1531            *[self.sql(join) for join in expression.args.get("joins") or []],
      +1532            self.sql(expression, "match"),
      +1533            *[self.sql(lateral) for lateral in expression.args.get("laterals") or []],
      +1534            self.sql(expression, "where"),
      +1535            self.sql(expression, "group"),
      +1536            self.sql(expression, "having"),
      +1537            *self.after_having_modifiers(expression),
      +1538            self.sql(expression, "order"),
      +1539            *self.offset_limit_modifiers(expression, fetch, limit),
      +1540            *self.after_limit_modifiers(expression),
      +1541            sep="",
      +1542        )
      +
      + + + + +
      +
      + +
      + + def + offset_limit_modifiers( self, expression: sqlglot.expressions.Expression, fetch: bool, limit: Union[sqlglot.expressions.Fetch, sqlglot.expressions.Limit, NoneType]) -> List[str]: + + + +
      + +
      1544    def offset_limit_modifiers(
      +1545        self, expression: exp.Expression, fetch: bool, limit: t.Optional[exp.Fetch | exp.Limit]
      +1546    ) -> t.List[str]:
      +1547        return [
      +1548            self.sql(expression, "offset") if fetch else self.sql(limit),
      +1549            self.sql(limit) if fetch else self.sql(expression, "offset"),
      +1550        ]
       
      @@ -8510,13 +8580,13 @@ Default: True
      -
      1553    def after_having_modifiers(self, expression: exp.Expression) -> t.List[str]:
      -1554        return [
      -1555            self.sql(expression, "qualify"),
      -1556            self.seg("WINDOW ") + self.expressions(expression, key="windows", flat=True)
      -1557            if expression.args.get("windows")
      -1558            else "",
      -1559        ]
      +            
      1552    def after_having_modifiers(self, expression: exp.Expression) -> t.List[str]:
      +1553        return [
      +1554            self.sql(expression, "qualify"),
      +1555            self.seg("WINDOW ") + self.expressions(expression, key="windows", flat=True)
      +1556            if expression.args.get("windows")
      +1557            else "",
      +1558        ]
       
      @@ -8534,10 +8604,10 @@ Default: True
      -
      1561    def after_limit_modifiers(self, expression: exp.Expression) -> t.List[str]:
      -1562        locks = self.expressions(expression, key="locks", sep=" ")
      -1563        locks = f" {locks}" if locks else ""
      -1564        return [locks, self.sql(expression, "sample")]
      +            
      1560    def after_limit_modifiers(self, expression: exp.Expression) -> t.List[str]:
      +1561        locks = self.expressions(expression, key="locks", sep=" ")
      +1562        locks = f" {locks}" if locks else ""
      +1563        return [locks, self.sql(expression, "sample")]
       
      @@ -8555,21 +8625,21 @@ Default: True
      -
      1566    def select_sql(self, expression: exp.Select) -> str:
      -1567        hint = self.sql(expression, "hint")
      -1568        distinct = self.sql(expression, "distinct")
      -1569        distinct = f" {distinct}" if distinct else ""
      -1570        kind = expression.args.get("kind")
      -1571        kind = f" AS {kind}" if kind else ""
      -1572        expressions = self.expressions(expression)
      -1573        expressions = f"{self.sep()}{expressions}" if expressions else expressions
      -1574        sql = self.query_modifiers(
      -1575            expression,
      -1576            f"SELECT{hint}{distinct}{kind}{expressions}",
      -1577            self.sql(expression, "into", comment=False),
      -1578            self.sql(expression, "from", comment=False),
      -1579        )
      -1580        return self.prepend_ctes(expression, sql)
      +            
      1565    def select_sql(self, expression: exp.Select) -> str:
      +1566        hint = self.sql(expression, "hint")
      +1567        distinct = self.sql(expression, "distinct")
      +1568        distinct = f" {distinct}" if distinct else ""
      +1569        kind = expression.args.get("kind")
      +1570        kind = f" AS {kind}" if kind else ""
      +1571        expressions = self.expressions(expression)
      +1572        expressions = f"{self.sep()}{expressions}" if expressions else expressions
      +1573        sql = self.query_modifiers(
      +1574            expression,
      +1575            f"SELECT{hint}{distinct}{kind}{expressions}",
      +1576            self.sql(expression, "into", comment=False),
      +1577            self.sql(expression, "from", comment=False),
      +1578        )
      +1579        return self.prepend_ctes(expression, sql)
       
      @@ -8587,11 +8657,30 @@ Default: True
      -
      1582    def schema_sql(self, expression: exp.Schema) -> str:
      -1583        this = self.sql(expression, "this")
      -1584        this = f"{this} " if this else ""
      -1585        sql = f"({self.sep('')}{self.expressions(expression)}{self.seg(')', sep='')}"
      -1586        return f"{this}{sql}"
      +            
      1581    def schema_sql(self, expression: exp.Schema) -> str:
      +1582        this = self.sql(expression, "this")
      +1583        this = f"{this} " if this else ""
      +1584        sql = self.schema_columns_sql(expression)
      +1585        return f"{this}{sql}"
      +
      + + + + +
      +
      + +
      + + def + schema_columns_sql(self, expression: sqlglot.expressions.Schema) -> str: + + + +
      + +
      1587    def schema_columns_sql(self, expression: exp.Schema) -> str:
      +1588        return f"({self.sep('')}{self.expressions(expression)}{self.seg(')', sep='')}"
       
      @@ -8609,12 +8698,12 @@ Default: True
      -
      1588    def star_sql(self, expression: exp.Star) -> str:
      -1589        except_ = self.expressions(expression, key="except", flat=True)
      -1590        except_ = f"{self.seg(self.STAR_MAPPING['except'])} ({except_})" if except_ else ""
      -1591        replace = self.expressions(expression, key="replace", flat=True)
      -1592        replace = f"{self.seg(self.STAR_MAPPING['replace'])} ({replace})" if replace else ""
      -1593        return f"*{except_}{replace}"
      +            
      1590    def star_sql(self, expression: exp.Star) -> str:
      +1591        except_ = self.expressions(expression, key="except", flat=True)
      +1592        except_ = f"{self.seg(self.STAR_MAPPING['except'])} ({except_})" if except_ else ""
      +1593        replace = self.expressions(expression, key="replace", flat=True)
      +1594        replace = f"{self.seg(self.STAR_MAPPING['replace'])} ({replace})" if replace else ""
      +1595        return f"*{except_}{replace}"
       
      @@ -8632,10 +8721,10 @@ Default: True
      -
      1595    def parameter_sql(self, expression: exp.Parameter) -> str:
      -1596        this = self.sql(expression, "this")
      -1597        this = f"{{{this}}}" if expression.args.get("wrapped") else f"{this}"
      -1598        return f"{self.PARAMETER_TOKEN}{this}"
      +            
      1597    def parameter_sql(self, expression: exp.Parameter) -> str:
      +1598        this = self.sql(expression, "this")
      +1599        this = f"{{{this}}}" if expression.args.get("wrapped") else f"{this}"
      +1600        return f"{self.PARAMETER_TOKEN}{this}"
       
      @@ -8653,12 +8742,12 @@ Default: True
      -
      1600    def sessionparameter_sql(self, expression: exp.SessionParameter) -> str:
      -1601        this = self.sql(expression, "this")
      -1602        kind = expression.text("kind")
      -1603        if kind:
      -1604            kind = f"{kind}."
      -1605        return f"@@{kind}{this}"
      +            
      1602    def sessionparameter_sql(self, expression: exp.SessionParameter) -> str:
      +1603        this = self.sql(expression, "this")
      +1604        kind = expression.text("kind")
      +1605        if kind:
      +1606            kind = f"{kind}."
      +1607        return f"@@{kind}{this}"
       
      @@ -8676,8 +8765,8 @@ Default: True
      -
      1607    def placeholder_sql(self, expression: exp.Placeholder) -> str:
      -1608        return f":{expression.name}" if expression.name else "?"
      +            
      1609    def placeholder_sql(self, expression: exp.Placeholder) -> str:
      +1610        return f":{expression.name}" if expression.name else "?"
       
      @@ -8695,15 +8784,15 @@ Default: True
      -
      1610    def subquery_sql(self, expression: exp.Subquery, sep: str = " AS ") -> str:
      -1611        alias = self.sql(expression, "alias")
      -1612        alias = f"{sep}{alias}" if alias else ""
      -1613
      -1614        pivots = self.expressions(expression, key="pivots", sep=" ", flat=True)
      -1615        pivots = f" {pivots}" if pivots else ""
      -1616
      -1617        sql = self.query_modifiers(expression, self.wrap(expression), alias, pivots)
      -1618        return self.prepend_ctes(expression, sql)
      +            
      1612    def subquery_sql(self, expression: exp.Subquery, sep: str = " AS ") -> str:
      +1613        alias = self.sql(expression, "alias")
      +1614        alias = f"{sep}{alias}" if alias else ""
      +1615
      +1616        pivots = self.expressions(expression, key="pivots", sep=" ", flat=True)
      +1617        pivots = f" {pivots}" if pivots else ""
      +1618
      +1619        sql = self.query_modifiers(expression, self.wrap(expression), alias, pivots)
      +1620        return self.prepend_ctes(expression, sql)
       
      @@ -8721,9 +8810,9 @@ Default: True
      -
      1620    def qualify_sql(self, expression: exp.Qualify) -> str:
      -1621        this = self.indent(self.sql(expression, "this"))
      -1622        return f"{self.seg('QUALIFY')}{self.sep()}{this}"
      +            
      1622    def qualify_sql(self, expression: exp.Qualify) -> str:
      +1623        this = self.indent(self.sql(expression, "this"))
      +1624        return f"{self.seg('QUALIFY')}{self.sep()}{this}"
       
      @@ -8741,11 +8830,11 @@ Default: True
      -
      1624    def union_sql(self, expression: exp.Union) -> str:
      -1625        return self.prepend_ctes(
      -1626            expression,
      -1627            self.set_operation(expression, self.union_op(expression)),
      -1628        )
      +            
      1626    def union_sql(self, expression: exp.Union) -> str:
      +1627        return self.prepend_ctes(
      +1628            expression,
      +1629            self.set_operation(expression, self.union_op(expression)),
      +1630        )
       
      @@ -8763,10 +8852,10 @@ Default: True
      -
      1630    def union_op(self, expression: exp.Union) -> str:
      -1631        kind = " DISTINCT" if self.EXPLICIT_UNION else ""
      -1632        kind = kind if expression.args.get("distinct") else " ALL"
      -1633        return f"UNION{kind}"
      +            
      1632    def union_op(self, expression: exp.Union) -> str:
      +1633        kind = " DISTINCT" if self.EXPLICIT_UNION else ""
      +1634        kind = kind if expression.args.get("distinct") else " ALL"
      +1635        return f"UNION{kind}"
       
      @@ -8784,19 +8873,19 @@ Default: True
      -
      1635    def unnest_sql(self, expression: exp.Unnest) -> str:
      -1636        args = self.expressions(expression, flat=True)
      -1637        alias = expression.args.get("alias")
      -1638        if alias and self.unnest_column_only:
      -1639            columns = alias.columns
      -1640            alias = self.sql(columns[0]) if columns else ""
      -1641        else:
      -1642            alias = self.sql(expression, "alias")
      -1643        alias = f" AS {alias}" if alias else alias
      -1644        ordinality = " WITH ORDINALITY" if expression.args.get("ordinality") else ""
      -1645        offset = expression.args.get("offset")
      -1646        offset = f" WITH OFFSET AS {self.sql(offset)}" if offset else ""
      -1647        return f"UNNEST({args}){ordinality}{alias}{offset}"
      +            
      1637    def unnest_sql(self, expression: exp.Unnest) -> str:
      +1638        args = self.expressions(expression, flat=True)
      +1639        alias = expression.args.get("alias")
      +1640        if alias and self.UNNEST_COLUMN_ONLY:
      +1641            columns = alias.columns
      +1642            alias = self.sql(columns[0]) if columns else ""
      +1643        else:
      +1644            alias = self.sql(expression, "alias")
      +1645        alias = f" AS {alias}" if alias else alias
      +1646        ordinality = " WITH ORDINALITY" if expression.args.get("ordinality") else ""
      +1647        offset = expression.args.get("offset")
      +1648        offset = f" WITH OFFSET AS {self.sql(offset)}" if offset else ""
      +1649        return f"UNNEST({args}){ordinality}{alias}{offset}"
       
      @@ -8814,9 +8903,9 @@ Default: True
      -
      1649    def where_sql(self, expression: exp.Where) -> str:
      -1650        this = self.indent(self.sql(expression, "this"))
      -1651        return f"{self.seg('WHERE')}{self.sep()}{this}"
      +            
      1651    def where_sql(self, expression: exp.Where) -> str:
      +1652        this = self.indent(self.sql(expression, "this"))
      +1653        return f"{self.seg('WHERE')}{self.sep()}{this}"
       
      @@ -8834,28 +8923,28 @@ Default: True
      -
      1653    def window_sql(self, expression: exp.Window) -> str:
      -1654        this = self.sql(expression, "this")
      -1655        partition = self.partition_by_sql(expression)
      -1656        order = expression.args.get("order")
      -1657        order = self.order_sql(order, flat=True) if order else ""
      -1658        spec = self.sql(expression, "spec")
      -1659        alias = self.sql(expression, "alias")
      -1660        over = self.sql(expression, "over") or "OVER"
      -1661
      -1662        this = f"{this} {'AS' if expression.arg_key == 'windows' else over}"
      +            
      1655    def window_sql(self, expression: exp.Window) -> str:
      +1656        this = self.sql(expression, "this")
      +1657        partition = self.partition_by_sql(expression)
      +1658        order = expression.args.get("order")
      +1659        order = self.order_sql(order, flat=True) if order else ""
      +1660        spec = self.sql(expression, "spec")
      +1661        alias = self.sql(expression, "alias")
      +1662        over = self.sql(expression, "over") or "OVER"
       1663
      -1664        first = expression.args.get("first")
      -1665        if first is None:
      -1666            first = ""
      -1667        else:
      -1668            first = "FIRST" if first else "LAST"
      -1669
      -1670        if not partition and not order and not spec and alias:
      -1671            return f"{this} {alias}"
      -1672
      -1673        args = " ".join(arg for arg in (alias, first, partition, order, spec) if arg)
      -1674        return f"{this} ({args})"
      +1664        this = f"{this} {'AS' if expression.arg_key == 'windows' else over}"
      +1665
      +1666        first = expression.args.get("first")
      +1667        if first is None:
      +1668            first = ""
      +1669        else:
      +1670            first = "FIRST" if first else "LAST"
      +1671
      +1672        if not partition and not order and not spec and alias:
      +1673            return f"{this} {alias}"
      +1674
      +1675        args = " ".join(arg for arg in (alias, first, partition, order, spec) if arg)
      +1676        return f"{this} ({args})"
       
      @@ -8873,9 +8962,9 @@ Default: True
      -
      1676    def partition_by_sql(self, expression: exp.Window | exp.MatchRecognize) -> str:
      -1677        partition = self.expressions(expression, key="partition_by", flat=True)
      -1678        return f"PARTITION BY {partition}" if partition else ""
      +            
      1678    def partition_by_sql(self, expression: exp.Window | exp.MatchRecognize) -> str:
      +1679        partition = self.expressions(expression, key="partition_by", flat=True)
      +1680        return f"PARTITION BY {partition}" if partition else ""
       
      @@ -8893,14 +8982,14 @@ Default: True
      -
      1680    def windowspec_sql(self, expression: exp.WindowSpec) -> str:
      -1681        kind = self.sql(expression, "kind")
      -1682        start = csv(self.sql(expression, "start"), self.sql(expression, "start_side"), sep=" ")
      -1683        end = (
      -1684            csv(self.sql(expression, "end"), self.sql(expression, "end_side"), sep=" ")
      -1685            or "CURRENT ROW"
      -1686        )
      -1687        return f"{kind} BETWEEN {start} AND {end}"
      +            
      1682    def windowspec_sql(self, expression: exp.WindowSpec) -> str:
      +1683        kind = self.sql(expression, "kind")
      +1684        start = csv(self.sql(expression, "start"), self.sql(expression, "start_side"), sep=" ")
      +1685        end = (
      +1686            csv(self.sql(expression, "end"), self.sql(expression, "end_side"), sep=" ")
      +1687            or "CURRENT ROW"
      +1688        )
      +1689        return f"{kind} BETWEEN {start} AND {end}"
       
      @@ -8918,10 +9007,10 @@ Default: True
      -
      1689    def withingroup_sql(self, expression: exp.WithinGroup) -> str:
      -1690        this = self.sql(expression, "this")
      -1691        expression_sql = self.sql(expression, "expression")[1:]  # order has a leading space
      -1692        return f"{this} WITHIN GROUP ({expression_sql})"
      +            
      1691    def withingroup_sql(self, expression: exp.WithinGroup) -> str:
      +1692        this = self.sql(expression, "this")
      +1693        expression_sql = self.sql(expression, "expression")[1:]  # order has a leading space
      +1694        return f"{this} WITHIN GROUP ({expression_sql})"
       
      @@ -8939,11 +9028,11 @@ Default: True
      -
      1694    def between_sql(self, expression: exp.Between) -> str:
      -1695        this = self.sql(expression, "this")
      -1696        low = self.sql(expression, "low")
      -1697        high = self.sql(expression, "high")
      -1698        return f"{this} BETWEEN {low} AND {high}"
      +            
      1696    def between_sql(self, expression: exp.Between) -> str:
      +1697        this = self.sql(expression, "this")
      +1698        low = self.sql(expression, "low")
      +1699        high = self.sql(expression, "high")
      +1700        return f"{this} BETWEEN {low} AND {high}"
       
      @@ -8961,11 +9050,11 @@ Default: True
      -
      1700    def bracket_sql(self, expression: exp.Bracket) -> str:
      -1701        expressions = apply_index_offset(expression.this, expression.expressions, self.index_offset)
      -1702        expressions_sql = ", ".join(self.sql(e) for e in expressions)
      -1703
      -1704        return f"{self.sql(expression, 'this')}[{expressions_sql}]"
      +            
      1702    def bracket_sql(self, expression: exp.Bracket) -> str:
      +1703        expressions = apply_index_offset(expression.this, expression.expressions, self.INDEX_OFFSET)
      +1704        expressions_sql = ", ".join(self.sql(e) for e in expressions)
      +1705
      +1706        return f"{self.sql(expression, 'this')}[{expressions_sql}]"
       
      @@ -8983,8 +9072,8 @@ Default: True
      -
      1706    def all_sql(self, expression: exp.All) -> str:
      -1707        return f"ALL {self.wrap(expression)}"
      +            
      1708    def all_sql(self, expression: exp.All) -> str:
      +1709        return f"ALL {self.wrap(expression)}"
       
      @@ -9002,11 +9091,11 @@ Default: True
      -
      1709    def any_sql(self, expression: exp.Any) -> str:
      -1710        this = self.sql(expression, "this")
      -1711        if isinstance(expression.this, exp.Subqueryable):
      -1712            this = self.wrap(this)
      -1713        return f"ANY {this}"
      +            
      1711    def any_sql(self, expression: exp.Any) -> str:
      +1712        this = self.sql(expression, "this")
      +1713        if isinstance(expression.this, exp.Subqueryable):
      +1714            this = self.wrap(this)
      +1715        return f"ANY {this}"
       
      @@ -9024,8 +9113,8 @@ Default: True
      -
      1715    def exists_sql(self, expression: exp.Exists) -> str:
      -1716        return f"EXISTS{self.wrap(expression)}"
      +            
      1717    def exists_sql(self, expression: exp.Exists) -> str:
      +1718        return f"EXISTS{self.wrap(expression)}"
       
      @@ -9043,25 +9132,25 @@ Default: True
      -
      1718    def case_sql(self, expression: exp.Case) -> str:
      -1719        this = self.sql(expression, "this")
      -1720        statements = [f"CASE {this}" if this else "CASE"]
      -1721
      -1722        for e in expression.args["ifs"]:
      -1723            statements.append(f"WHEN {self.sql(e, 'this')}")
      -1724            statements.append(f"THEN {self.sql(e, 'true')}")
      -1725
      -1726        default = self.sql(expression, "default")
      +            
      1720    def case_sql(self, expression: exp.Case) -> str:
      +1721        this = self.sql(expression, "this")
      +1722        statements = [f"CASE {this}" if this else "CASE"]
      +1723
      +1724        for e in expression.args["ifs"]:
      +1725            statements.append(f"WHEN {self.sql(e, 'this')}")
      +1726            statements.append(f"THEN {self.sql(e, 'true')}")
       1727
      -1728        if default:
      -1729            statements.append(f"ELSE {default}")
      -1730
      -1731        statements.append("END")
      +1728        default = self.sql(expression, "default")
      +1729
      +1730        if default:
      +1731            statements.append(f"ELSE {default}")
       1732
      -1733        if self.pretty and self.text_width(statements) > self._max_text_width:
      -1734            return self.indent("\n".join(statements), skip_first=True, skip_last=True)
      -1735
      -1736        return " ".join(statements)
      +1733        statements.append("END")
      +1734
      +1735        if self.pretty and self.text_width(statements) > self.max_text_width:
      +1736            return self.indent("\n".join(statements), skip_first=True, skip_last=True)
      +1737
      +1738        return " ".join(statements)
       
      @@ -9079,10 +9168,10 @@ Default: True
      -
      1738    def constraint_sql(self, expression: exp.Constraint) -> str:
      -1739        this = self.sql(expression, "this")
      -1740        expressions = self.expressions(expression, flat=True)
      -1741        return f"CONSTRAINT {this} {expressions}"
      +            
      1740    def constraint_sql(self, expression: exp.Constraint) -> str:
      +1741        this = self.sql(expression, "this")
      +1742        expressions = self.expressions(expression, flat=True)
      +1743        return f"CONSTRAINT {this} {expressions}"
       
      @@ -9100,10 +9189,10 @@ Default: True
      -
      1743    def nextvaluefor_sql(self, expression: exp.NextValueFor) -> str:
      -1744        order = expression.args.get("order")
      -1745        order = f" OVER ({self.order_sql(order, flat=True)})" if order else ""
      -1746        return f"NEXT VALUE FOR {self.sql(expression, 'this')}{order}"
      +            
      1745    def nextvaluefor_sql(self, expression: exp.NextValueFor) -> str:
      +1746        order = expression.args.get("order")
      +1747        order = f" OVER ({self.order_sql(order, flat=True)})" if order else ""
      +1748        return f"NEXT VALUE FOR {self.sql(expression, 'this')}{order}"
       
      @@ -9121,10 +9210,10 @@ Default: True
      -
      1748    def extract_sql(self, expression: exp.Extract) -> str:
      -1749        this = self.sql(expression, "this")
      -1750        expression_sql = self.sql(expression, "expression")
      -1751        return f"EXTRACT({this} FROM {expression_sql})"
      +            
      1750    def extract_sql(self, expression: exp.Extract) -> str:
      +1751        this = self.sql(expression, "this")
      +1752        expression_sql = self.sql(expression, "expression")
      +1753        return f"EXTRACT({this} FROM {expression_sql})"
       
      @@ -9142,36 +9231,37 @@ Default: True
      -
      1753    def trim_sql(self, expression: exp.Trim) -> str:
      -1754        trim_type = self.sql(expression, "position")
      -1755
      -1756        if trim_type == "LEADING":
      -1757            return self.func("LTRIM", expression.this)
      -1758        elif trim_type == "TRAILING":
      -1759            return self.func("RTRIM", expression.this)
      -1760        else:
      -1761            return self.func("TRIM", expression.this, expression.expression)
      +            
      1755    def trim_sql(self, expression: exp.Trim) -> str:
      +1756        trim_type = self.sql(expression, "position")
      +1757
      +1758        if trim_type == "LEADING":
      +1759            return self.func("LTRIM", expression.this)
      +1760        elif trim_type == "TRAILING":
      +1761            return self.func("RTRIM", expression.this)
      +1762        else:
      +1763            return self.func("TRIM", expression.this, expression.expression)
       
      -
      - +
      +
      def - concat_sql(self, expression: sqlglot.expressions.Concat) -> str: + safeconcat_sql(self, expression: sqlglot.expressions.SafeConcat) -> str: - +
      - -
      1763    def concat_sql(self, expression: exp.Concat) -> str:
      -1764        if len(expression.expressions) == 1:
      -1765            return self.sql(expression.expressions[0])
      -1766        return self.function_fallback_sql(expression)
      +    
      +            
      1765    def safeconcat_sql(self, expression: exp.SafeConcat) -> str:
      +1766        expressions = expression.expressions
      +1767        if self.STRICT_STRING_CONCAT:
      +1768            expressions = (exp.cast(e, "text") for e in expressions)
      +1769        return self.func("CONCAT", *expressions)
       
      @@ -9189,9 +9279,9 @@ Default: True
      -
      1768    def check_sql(self, expression: exp.Check) -> str:
      -1769        this = self.sql(expression, key="this")
      -1770        return f"CHECK ({this})"
      +            
      1771    def check_sql(self, expression: exp.Check) -> str:
      +1772        this = self.sql(expression, key="this")
      +1773        return f"CHECK ({this})"
       
      @@ -9209,15 +9299,15 @@ Default: True
      -
      1772    def foreignkey_sql(self, expression: exp.ForeignKey) -> str:
      -1773        expressions = self.expressions(expression, flat=True)
      -1774        reference = self.sql(expression, "reference")
      -1775        reference = f" {reference}" if reference else ""
      -1776        delete = self.sql(expression, "delete")
      -1777        delete = f" ON DELETE {delete}" if delete else ""
      -1778        update = self.sql(expression, "update")
      -1779        update = f" ON UPDATE {update}" if update else ""
      -1780        return f"FOREIGN KEY ({expressions}){reference}{delete}{update}"
      +            
      1775    def foreignkey_sql(self, expression: exp.ForeignKey) -> str:
      +1776        expressions = self.expressions(expression, flat=True)
      +1777        reference = self.sql(expression, "reference")
      +1778        reference = f" {reference}" if reference else ""
      +1779        delete = self.sql(expression, "delete")
      +1780        delete = f" ON DELETE {delete}" if delete else ""
      +1781        update = self.sql(expression, "update")
      +1782        update = f" ON UPDATE {update}" if update else ""
      +1783        return f"FOREIGN KEY ({expressions}){reference}{delete}{update}"
       
      @@ -9235,11 +9325,11 @@ Default: True
      -
      1782    def primarykey_sql(self, expression: exp.ForeignKey) -> str:
      -1783        expressions = self.expressions(expression, flat=True)
      -1784        options = self.expressions(expression, key="options", flat=True, sep=" ")
      -1785        options = f" {options}" if options else ""
      -1786        return f"PRIMARY KEY ({expressions}){options}"
      +            
      1785    def primarykey_sql(self, expression: exp.ForeignKey) -> str:
      +1786        expressions = self.expressions(expression, flat=True)
      +1787        options = self.expressions(expression, key="options", flat=True, sep=" ")
      +1788        options = f" {options}" if options else ""
      +1789        return f"PRIMARY KEY ({expressions}){options}"
       
      @@ -9257,10 +9347,8 @@ Default: True
      -
      1788    def if_sql(self, expression: exp.If) -> str:
      -1789        return self.case_sql(
      -1790            exp.Case(ifs=[expression.copy()], default=expression.args.get("false"))
      -1791        )
      +            
      1791    def if_sql(self, expression: exp.If) -> str:
      +1792        return self.case_sql(exp.Case(ifs=[expression], default=expression.args.get("false")))
       
      @@ -9278,10 +9366,10 @@ Default: True
      -
      1793    def matchagainst_sql(self, expression: exp.MatchAgainst) -> str:
      -1794        modifier = expression.args.get("modifier")
      -1795        modifier = f" {modifier}" if modifier else ""
      -1796        return f"{self.func('MATCH', *expression.expressions)} AGAINST({self.sql(expression, 'this')}{modifier})"
      +            
      1794    def matchagainst_sql(self, expression: exp.MatchAgainst) -> str:
      +1795        modifier = expression.args.get("modifier")
      +1796        modifier = f" {modifier}" if modifier else ""
      +1797        return f"{self.func('MATCH', *expression.expressions)} AGAINST({self.sql(expression, 'this')}{modifier})"
       
      @@ -9299,8 +9387,8 @@ Default: True
      -
      1798    def jsonkeyvalue_sql(self, expression: exp.JSONKeyValue) -> str:
      -1799        return f"{self.sql(expression, 'this')}: {self.sql(expression, 'expression')}"
      +            
      1799    def jsonkeyvalue_sql(self, expression: exp.JSONKeyValue) -> str:
      +1800        return f"{self.sql(expression, 'this')}: {self.sql(expression, 'expression')}"
       
      @@ -9318,8 +9406,7 @@ Default: True
      -
      1801    def jsonobject_sql(self, expression: exp.JSONObject) -> str:
      -1802        expressions = self.expressions(expression)
      +            
      1802    def jsonobject_sql(self, expression: exp.JSONObject) -> str:
       1803        null_handling = expression.args.get("null_handling")
       1804        null_handling = f" {null_handling}" if null_handling else ""
       1805        unique_keys = expression.args.get("unique_keys")
      @@ -9332,7 +9419,11 @@ Default: True
       1812        format_json = " FORMAT JSON" if expression.args.get("format_json") else ""
       1813        encoding = self.sql(expression, "encoding")
       1814        encoding = f" ENCODING {encoding}" if encoding else ""
      -1815        return f"JSON_OBJECT({expressions}{null_handling}{unique_keys}{return_type}{format_json}{encoding})"
      +1815        return self.func(
      +1816            "JSON_OBJECT",
      +1817            *expression.expressions,
      +1818            suffix=f"{null_handling}{unique_keys}{return_type}{format_json}{encoding})",
      +1819        )
       
      @@ -9350,13 +9441,13 @@ Default: True
      -
      1817    def openjsoncolumndef_sql(self, expression: exp.OpenJSONColumnDef) -> str:
      -1818        this = self.sql(expression, "this")
      -1819        kind = self.sql(expression, "kind")
      -1820        path = self.sql(expression, "path")
      -1821        path = f" {path}" if path else ""
      -1822        as_json = " AS JSON" if expression.args.get("as_json") else ""
      -1823        return f"{this} {kind}{path}{as_json}"
      +            
      1821    def openjsoncolumndef_sql(self, expression: exp.OpenJSONColumnDef) -> str:
      +1822        this = self.sql(expression, "this")
      +1823        kind = self.sql(expression, "kind")
      +1824        path = self.sql(expression, "path")
      +1825        path = f" {path}" if path else ""
      +1826        as_json = " AS JSON" if expression.args.get("as_json") else ""
      +1827        return f"{this} {kind}{path}{as_json}"
       
      @@ -9374,17 +9465,17 @@ Default: True
      -
      1825    def openjson_sql(self, expression: exp.OpenJSON) -> str:
      -1826        this = self.sql(expression, "this")
      -1827        path = self.sql(expression, "path")
      -1828        path = f", {path}" if path else ""
      -1829        expressions = self.expressions(expression)
      -1830        with_ = (
      -1831            f" WITH ({self.seg(self.indent(expressions), sep='')}{self.seg(')', sep='')}"
      -1832            if expressions
      -1833            else ""
      -1834        )
      -1835        return f"OPENJSON({this}{path}){with_}"
      +            
      1829    def openjson_sql(self, expression: exp.OpenJSON) -> str:
      +1830        this = self.sql(expression, "this")
      +1831        path = self.sql(expression, "path")
      +1832        path = f", {path}" if path else ""
      +1833        expressions = self.expressions(expression)
      +1834        with_ = (
      +1835            f" WITH ({self.seg(self.indent(expressions), sep='')}{self.seg(')', sep='')}"
      +1836            if expressions
      +1837            else ""
      +1838        )
      +1839        return f"OPENJSON({this}{path}){with_}"
       
      @@ -9402,22 +9493,22 @@ Default: True
      -
      1837    def in_sql(self, expression: exp.In) -> str:
      -1838        query = expression.args.get("query")
      -1839        unnest = expression.args.get("unnest")
      -1840        field = expression.args.get("field")
      -1841        is_global = " GLOBAL" if expression.args.get("is_global") else ""
      -1842
      -1843        if query:
      -1844            in_sql = self.wrap(query)
      -1845        elif unnest:
      -1846            in_sql = self.in_unnest_op(unnest)
      -1847        elif field:
      -1848            in_sql = self.sql(field)
      -1849        else:
      -1850            in_sql = f"({self.expressions(expression, flat=True)})"
      -1851
      -1852        return f"{self.sql(expression, 'this')}{is_global} IN {in_sql}"
      +            
      1841    def in_sql(self, expression: exp.In) -> str:
      +1842        query = expression.args.get("query")
      +1843        unnest = expression.args.get("unnest")
      +1844        field = expression.args.get("field")
      +1845        is_global = " GLOBAL" if expression.args.get("is_global") else ""
      +1846
      +1847        if query:
      +1848            in_sql = self.wrap(query)
      +1849        elif unnest:
      +1850            in_sql = self.in_unnest_op(unnest)
      +1851        elif field:
      +1852            in_sql = self.sql(field)
      +1853        else:
      +1854            in_sql = f"({self.expressions(expression, flat=True)})"
      +1855
      +1856        return f"{self.sql(expression, 'this')}{is_global} IN {in_sql}"
       
      @@ -9435,8 +9526,8 @@ Default: True
      -
      1854    def in_unnest_op(self, unnest: exp.Unnest) -> str:
      -1855        return f"(SELECT {self.sql(unnest)})"
      +            
      1858    def in_unnest_op(self, unnest: exp.Unnest) -> str:
      +1859        return f"(SELECT {self.sql(unnest)})"
       
      @@ -9454,22 +9545,22 @@ Default: True
      -
      1857    def interval_sql(self, expression: exp.Interval) -> str:
      -1858        unit = self.sql(expression, "unit")
      -1859        if not self.INTERVAL_ALLOWS_PLURAL_FORM:
      -1860            unit = self.TIME_PART_SINGULARS.get(unit.lower(), unit)
      -1861        unit = f" {unit}" if unit else ""
      -1862
      -1863        if self.SINGLE_STRING_INTERVAL:
      -1864            this = expression.this.name if expression.this else ""
      -1865            return f"INTERVAL '{this}{unit}'" if this else f"INTERVAL{unit}"
      +            
      1861    def interval_sql(self, expression: exp.Interval) -> str:
      +1862        unit = self.sql(expression, "unit")
      +1863        if not self.INTERVAL_ALLOWS_PLURAL_FORM:
      +1864            unit = self.TIME_PART_SINGULARS.get(unit.lower(), unit)
      +1865        unit = f" {unit}" if unit else ""
       1866
      -1867        this = self.sql(expression, "this")
      -1868        if this:
      -1869            unwrapped = isinstance(expression.this, self.UNWRAPPED_INTERVAL_VALUES)
      -1870            this = f" {this}" if unwrapped else f" ({this})"
      -1871
      -1872        return f"INTERVAL{this}{unit}"
      +1867        if self.SINGLE_STRING_INTERVAL:
      +1868            this = expression.this.name if expression.this else ""
      +1869            return f"INTERVAL '{this}{unit}'" if this else f"INTERVAL{unit}"
      +1870
      +1871        this = self.sql(expression, "this")
      +1872        if this:
      +1873            unwrapped = isinstance(expression.this, self.UNWRAPPED_INTERVAL_VALUES)
      +1874            this = f" {this}" if unwrapped else f" ({this})"
      +1875
      +1876        return f"INTERVAL{this}{unit}"
       
      @@ -9487,8 +9578,8 @@ Default: True
      -
      1874    def return_sql(self, expression: exp.Return) -> str:
      -1875        return f"RETURN {self.sql(expression, 'this')}"
      +            
      1878    def return_sql(self, expression: exp.Return) -> str:
      +1879        return f"RETURN {self.sql(expression, 'this')}"
       
      @@ -9506,13 +9597,13 @@ Default: True
      -
      1877    def reference_sql(self, expression: exp.Reference) -> str:
      -1878        this = self.sql(expression, "this")
      -1879        expressions = self.expressions(expression, flat=True)
      -1880        expressions = f"({expressions})" if expressions else ""
      -1881        options = self.expressions(expression, key="options", flat=True, sep=" ")
      -1882        options = f" {options}" if options else ""
      -1883        return f"REFERENCES {this}{expressions}{options}"
      +            
      1881    def reference_sql(self, expression: exp.Reference) -> str:
      +1882        this = self.sql(expression, "this")
      +1883        expressions = self.expressions(expression, flat=True)
      +1884        expressions = f"({expressions})" if expressions else ""
      +1885        options = self.expressions(expression, key="options", flat=True, sep=" ")
      +1886        options = f" {options}" if options else ""
      +1887        return f"REFERENCES {this}{expressions}{options}"
       
      @@ -9530,8 +9621,8 @@ Default: True
      -
      1885    def anonymous_sql(self, expression: exp.Anonymous) -> str:
      -1886        return self.func(expression.name, *expression.expressions)
      +            
      1889    def anonymous_sql(self, expression: exp.Anonymous) -> str:
      +1890        return self.func(expression.name, *expression.expressions)
       
      @@ -9549,14 +9640,14 @@ Default: True
      -
      1888    def paren_sql(self, expression: exp.Paren) -> str:
      -1889        if isinstance(expression.unnest(), exp.Select):
      -1890            sql = self.wrap(expression)
      -1891        else:
      -1892            sql = self.seg(self.indent(self.sql(expression, "this")), sep="")
      -1893            sql = f"({sql}{self.seg(')', sep='')}"
      -1894
      -1895        return self.prepend_ctes(expression, sql)
      +            
      1892    def paren_sql(self, expression: exp.Paren) -> str:
      +1893        if isinstance(expression.unnest(), exp.Select):
      +1894            sql = self.wrap(expression)
      +1895        else:
      +1896            sql = self.seg(self.indent(self.sql(expression, "this")), sep="")
      +1897            sql = f"({sql}{self.seg(')', sep='')}"
      +1898
      +1899        return self.prepend_ctes(expression, sql)
       
      @@ -9574,11 +9665,11 @@ Default: True
      -
      1897    def neg_sql(self, expression: exp.Neg) -> str:
      -1898        # This makes sure we don't convert "- - 5" to "--5", which is a comment
      -1899        this_sql = self.sql(expression, "this")
      -1900        sep = " " if this_sql[0] == "-" else ""
      -1901        return f"-{sep}{this_sql}"
      +            
      1901    def neg_sql(self, expression: exp.Neg) -> str:
      +1902        # This makes sure we don't convert "- - 5" to "--5", which is a comment
      +1903        this_sql = self.sql(expression, "this")
      +1904        sep = " " if this_sql[0] == "-" else ""
      +1905        return f"-{sep}{this_sql}"
       
      @@ -9596,8 +9687,8 @@ Default: True
      -
      1903    def not_sql(self, expression: exp.Not) -> str:
      -1904        return f"NOT {self.sql(expression, 'this')}"
      +            
      1907    def not_sql(self, expression: exp.Not) -> str:
      +1908        return f"NOT {self.sql(expression, 'this')}"
       
      @@ -9615,10 +9706,10 @@ Default: True
      -
      1906    def alias_sql(self, expression: exp.Alias) -> str:
      -1907        alias = self.sql(expression, "alias")
      -1908        alias = f" AS {alias}" if alias else ""
      -1909        return f"{self.sql(expression, 'this')}{alias}"
      +            
      1910    def alias_sql(self, expression: exp.Alias) -> str:
      +1911        alias = self.sql(expression, "alias")
      +1912        alias = f" AS {alias}" if alias else ""
      +1913        return f"{self.sql(expression, 'this')}{alias}"
       
      @@ -9636,8 +9727,8 @@ Default: True
      -
      1911    def aliases_sql(self, expression: exp.Aliases) -> str:
      -1912        return f"{self.sql(expression, 'this')} AS ({self.expressions(expression, flat=True)})"
      +            
      1915    def aliases_sql(self, expression: exp.Aliases) -> str:
      +1916        return f"{self.sql(expression, 'this')} AS ({self.expressions(expression, flat=True)})"
       
      @@ -9655,10 +9746,10 @@ Default: True
      -
      1914    def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
      -1915        this = self.sql(expression, "this")
      -1916        zone = self.sql(expression, "zone")
      -1917        return f"{this} AT TIME ZONE {zone}"
      +            
      1918    def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
      +1919        this = self.sql(expression, "this")
      +1920        zone = self.sql(expression, "zone")
      +1921        return f"{this} AT TIME ZONE {zone}"
       
      @@ -9676,8 +9767,8 @@ Default: True
      -
      1919    def add_sql(self, expression: exp.Add) -> str:
      -1920        return self.binary(expression, "+")
      +            
      1923    def add_sql(self, expression: exp.Add) -> str:
      +1924        return self.binary(expression, "+")
       
      @@ -9695,8 +9786,8 @@ Default: True
      -
      1922    def and_sql(self, expression: exp.And) -> str:
      -1923        return self.connector_sql(expression, "AND")
      +            
      1926    def and_sql(self, expression: exp.And) -> str:
      +1927        return self.connector_sql(expression, "AND")
       
      @@ -9714,17 +9805,17 @@ Default: True
      -
      1925    def connector_sql(self, expression: exp.Connector, op: str) -> str:
      -1926        if not self.pretty:
      -1927            return self.binary(expression, op)
      -1928
      -1929        sqls = tuple(
      -1930            self.maybe_comment(self.sql(e), e, e.parent.comments or []) if i != 1 else self.sql(e)
      -1931            for i, e in enumerate(expression.flatten(unnest=False))
      -1932        )
      -1933
      -1934        sep = "\n" if self.text_width(sqls) > self._max_text_width else " "
      -1935        return f"{sep}{op} ".join(sqls)
      +            
      1929    def connector_sql(self, expression: exp.Connector, op: str) -> str:
      +1930        if not self.pretty:
      +1931            return self.binary(expression, op)
      +1932
      +1933        sqls = tuple(
      +1934            self.maybe_comment(self.sql(e), e, e.parent.comments or []) if i != 1 else self.sql(e)
      +1935            for i, e in enumerate(expression.flatten(unnest=False))
      +1936        )
      +1937
      +1938        sep = "\n" if self.text_width(sqls) > self.max_text_width else " "
      +1939        return f"{sep}{op} ".join(sqls)
       
      @@ -9742,8 +9833,8 @@ Default: True
      -
      1937    def bitwiseand_sql(self, expression: exp.BitwiseAnd) -> str:
      -1938        return self.binary(expression, "&")
      +            
      1941    def bitwiseand_sql(self, expression: exp.BitwiseAnd) -> str:
      +1942        return self.binary(expression, "&")
       
      @@ -9761,8 +9852,8 @@ Default: True
      -
      1940    def bitwiseleftshift_sql(self, expression: exp.BitwiseLeftShift) -> str:
      -1941        return self.binary(expression, "<<")
      +            
      1944    def bitwiseleftshift_sql(self, expression: exp.BitwiseLeftShift) -> str:
      +1945        return self.binary(expression, "<<")
       
      @@ -9780,8 +9871,8 @@ Default: True
      -
      1943    def bitwisenot_sql(self, expression: exp.BitwiseNot) -> str:
      -1944        return f"~{self.sql(expression, 'this')}"
      +            
      1947    def bitwisenot_sql(self, expression: exp.BitwiseNot) -> str:
      +1948        return f"~{self.sql(expression, 'this')}"
       
      @@ -9799,8 +9890,8 @@ Default: True
      -
      1946    def bitwiseor_sql(self, expression: exp.BitwiseOr) -> str:
      -1947        return self.binary(expression, "|")
      +            
      1950    def bitwiseor_sql(self, expression: exp.BitwiseOr) -> str:
      +1951        return self.binary(expression, "|")
       
      @@ -9818,8 +9909,8 @@ Default: True
      -
      1949    def bitwiserightshift_sql(self, expression: exp.BitwiseRightShift) -> str:
      -1950        return self.binary(expression, ">>")
      +            
      1953    def bitwiserightshift_sql(self, expression: exp.BitwiseRightShift) -> str:
      +1954        return self.binary(expression, ">>")
       
      @@ -9837,8 +9928,8 @@ Default: True
      -
      1952    def bitwisexor_sql(self, expression: exp.BitwiseXor) -> str:
      -1953        return self.binary(expression, "^")
      +            
      1956    def bitwisexor_sql(self, expression: exp.BitwiseXor) -> str:
      +1957        return self.binary(expression, "^")
       
      @@ -9856,8 +9947,8 @@ Default: True
      -
      1955    def cast_sql(self, expression: exp.Cast) -> str:
      -1956        return f"CAST({self.sql(expression, 'this')} AS {self.sql(expression, 'to')})"
      +            
      1959    def cast_sql(self, expression: exp.Cast) -> str:
      +1960        return f"CAST({self.sql(expression, 'this')} AS {self.sql(expression, 'to')})"
       
      @@ -9875,9 +9966,9 @@ Default: True
      -
      1958    def currentdate_sql(self, expression: exp.CurrentDate) -> str:
      -1959        zone = self.sql(expression, "this")
      -1960        return f"CURRENT_DATE({zone})" if zone else "CURRENT_DATE"
      +            
      1962    def currentdate_sql(self, expression: exp.CurrentDate) -> str:
      +1963        zone = self.sql(expression, "this")
      +1964        return f"CURRENT_DATE({zone})" if zone else "CURRENT_DATE"
       
      @@ -9895,8 +9986,8 @@ Default: True
      -
      1962    def collate_sql(self, expression: exp.Collate) -> str:
      -1963        return self.binary(expression, "COLLATE")
      +            
      1966    def collate_sql(self, expression: exp.Collate) -> str:
      +1967        return self.binary(expression, "COLLATE")
       
      @@ -9914,8 +10005,8 @@ Default: True
      -
      1965    def command_sql(self, expression: exp.Command) -> str:
      -1966        return f"{self.sql(expression, 'this').upper()} {expression.text('expression').strip()}"
      +            
      1969    def command_sql(self, expression: exp.Command) -> str:
      +1970        return f"{self.sql(expression, 'this').upper()} {expression.text('expression').strip()}"
       
      @@ -9933,12 +10024,12 @@ Default: True
      -
      1968    def comment_sql(self, expression: exp.Comment) -> str:
      -1969        this = self.sql(expression, "this")
      -1970        kind = expression.args["kind"]
      -1971        exists_sql = " IF EXISTS " if expression.args.get("exists") else " "
      -1972        expression_sql = self.sql(expression, "expression")
      -1973        return f"COMMENT{exists_sql}ON {kind} {this} IS {expression_sql}"
      +            
      1972    def comment_sql(self, expression: exp.Comment) -> str:
      +1973        this = self.sql(expression, "this")
      +1974        kind = expression.args["kind"]
      +1975        exists_sql = " IF EXISTS " if expression.args.get("exists") else " "
      +1976        expression_sql = self.sql(expression, "expression")
      +1977        return f"COMMENT{exists_sql}ON {kind} {this} IS {expression_sql}"
       
      @@ -9956,16 +10047,16 @@ Default: True
      -
      1975    def mergetreettlaction_sql(self, expression: exp.MergeTreeTTLAction) -> str:
      -1976        this = self.sql(expression, "this")
      -1977        delete = " DELETE" if expression.args.get("delete") else ""
      -1978        recompress = self.sql(expression, "recompress")
      -1979        recompress = f" RECOMPRESS {recompress}" if recompress else ""
      -1980        to_disk = self.sql(expression, "to_disk")
      -1981        to_disk = f" TO DISK {to_disk}" if to_disk else ""
      -1982        to_volume = self.sql(expression, "to_volume")
      -1983        to_volume = f" TO VOLUME {to_volume}" if to_volume else ""
      -1984        return f"{this}{delete}{recompress}{to_disk}{to_volume}"
      +            
      1979    def mergetreettlaction_sql(self, expression: exp.MergeTreeTTLAction) -> str:
      +1980        this = self.sql(expression, "this")
      +1981        delete = " DELETE" if expression.args.get("delete") else ""
      +1982        recompress = self.sql(expression, "recompress")
      +1983        recompress = f" RECOMPRESS {recompress}" if recompress else ""
      +1984        to_disk = self.sql(expression, "to_disk")
      +1985        to_disk = f" TO DISK {to_disk}" if to_disk else ""
      +1986        to_volume = self.sql(expression, "to_volume")
      +1987        to_volume = f" TO VOLUME {to_volume}" if to_volume else ""
      +1988        return f"{this}{delete}{recompress}{to_disk}{to_volume}"
       
      @@ -9983,16 +10074,16 @@ Default: True
      -
      1986    def mergetreettl_sql(self, expression: exp.MergeTreeTTL) -> str:
      -1987        where = self.sql(expression, "where")
      -1988        group = self.sql(expression, "group")
      -1989        aggregates = self.expressions(expression, key="aggregates")
      -1990        aggregates = self.seg("SET") + self.seg(aggregates) if aggregates else ""
      -1991
      -1992        if not (where or group or aggregates) and len(expression.expressions) == 1:
      -1993            return f"TTL {self.expressions(expression, flat=True)}"
      -1994
      -1995        return f"TTL{self.seg(self.expressions(expression))}{where}{group}{aggregates}"
      +            
      1990    def mergetreettl_sql(self, expression: exp.MergeTreeTTL) -> str:
      +1991        where = self.sql(expression, "where")
      +1992        group = self.sql(expression, "group")
      +1993        aggregates = self.expressions(expression, key="aggregates")
      +1994        aggregates = self.seg("SET") + self.seg(aggregates) if aggregates else ""
      +1995
      +1996        if not (where or group or aggregates) and len(expression.expressions) == 1:
      +1997            return f"TTL {self.expressions(expression, flat=True)}"
      +1998
      +1999        return f"TTL{self.seg(self.expressions(expression))}{where}{group}{aggregates}"
       
      @@ -10010,8 +10101,8 @@ Default: True
      -
      1997    def transaction_sql(self, expression: exp.Transaction) -> str:
      -1998        return "BEGIN"
      +            
      2001    def transaction_sql(self, expression: exp.Transaction) -> str:
      +2002        return "BEGIN"
       
      @@ -10029,12 +10120,12 @@ Default: True
      -
      2000    def commit_sql(self, expression: exp.Commit) -> str:
      -2001        chain = expression.args.get("chain")
      -2002        if chain is not None:
      -2003            chain = " AND CHAIN" if chain else " AND NO CHAIN"
      -2004
      -2005        return f"COMMIT{chain or ''}"
      +            
      2004    def commit_sql(self, expression: exp.Commit) -> str:
      +2005        chain = expression.args.get("chain")
      +2006        if chain is not None:
      +2007            chain = " AND CHAIN" if chain else " AND NO CHAIN"
      +2008
      +2009        return f"COMMIT{chain or ''}"
       
      @@ -10052,10 +10143,10 @@ Default: True
      -
      2007    def rollback_sql(self, expression: exp.Rollback) -> str:
      -2008        savepoint = expression.args.get("savepoint")
      -2009        savepoint = f" TO {savepoint}" if savepoint else ""
      -2010        return f"ROLLBACK{savepoint}"
      +            
      2011    def rollback_sql(self, expression: exp.Rollback) -> str:
      +2012        savepoint = expression.args.get("savepoint")
      +2013        savepoint = f" TO {savepoint}" if savepoint else ""
      +2014        return f"ROLLBACK{savepoint}"
       
      @@ -10073,25 +10164,25 @@ Default: True
      -
      2012    def altercolumn_sql(self, expression: exp.AlterColumn) -> str:
      -2013        this = self.sql(expression, "this")
      -2014
      -2015        dtype = self.sql(expression, "dtype")
      -2016        if dtype:
      -2017            collate = self.sql(expression, "collate")
      -2018            collate = f" COLLATE {collate}" if collate else ""
      -2019            using = self.sql(expression, "using")
      -2020            using = f" USING {using}" if using else ""
      -2021            return f"ALTER COLUMN {this} TYPE {dtype}{collate}{using}"
      -2022
      -2023        default = self.sql(expression, "default")
      -2024        if default:
      -2025            return f"ALTER COLUMN {this} SET DEFAULT {default}"
      +            
      2016    def altercolumn_sql(self, expression: exp.AlterColumn) -> str:
      +2017        this = self.sql(expression, "this")
      +2018
      +2019        dtype = self.sql(expression, "dtype")
      +2020        if dtype:
      +2021            collate = self.sql(expression, "collate")
      +2022            collate = f" COLLATE {collate}" if collate else ""
      +2023            using = self.sql(expression, "using")
      +2024            using = f" USING {using}" if using else ""
      +2025            return f"ALTER COLUMN {this} TYPE {dtype}{collate}{using}"
       2026
      -2027        if not expression.args.get("drop"):
      -2028            self.unsupported("Unsupported ALTER COLUMN syntax")
      -2029
      -2030        return f"ALTER COLUMN {this} DROP DEFAULT"
      +2027        default = self.sql(expression, "default")
      +2028        if default:
      +2029            return f"ALTER COLUMN {this} SET DEFAULT {default}"
      +2030
      +2031        if not expression.args.get("drop"):
      +2032            self.unsupported("Unsupported ALTER COLUMN syntax")
      +2033
      +2034        return f"ALTER COLUMN {this} DROP DEFAULT"
       
      @@ -10109,14 +10200,14 @@ Default: True
      -
      2032    def renametable_sql(self, expression: exp.RenameTable) -> str:
      -2033        if not self.RENAME_TABLE_WITH_DB:
      -2034            # Remove db from tables
      -2035            expression = expression.transform(
      -2036                lambda n: exp.table_(n.this) if isinstance(n, exp.Table) else n
      -2037            )
      -2038        this = self.sql(expression, "this")
      -2039        return f"RENAME TO {this}"
      +            
      2036    def renametable_sql(self, expression: exp.RenameTable) -> str:
      +2037        if not self.RENAME_TABLE_WITH_DB:
      +2038            # Remove db from tables
      +2039            expression = expression.transform(
      +2040                lambda n: exp.table_(n.this) if isinstance(n, exp.Table) else n
      +2041            )
      +2042        this = self.sql(expression, "this")
      +2043        return f"RENAME TO {this}"
       
      @@ -10134,20 +10225,20 @@ Default: True
      -
      2041    def altertable_sql(self, expression: exp.AlterTable) -> str:
      -2042        actions = expression.args["actions"]
      -2043
      -2044        if isinstance(actions[0], exp.ColumnDef):
      -2045            actions = self.expressions(expression, key="actions", prefix="ADD COLUMN ")
      -2046        elif isinstance(actions[0], exp.Schema):
      -2047            actions = self.expressions(expression, key="actions", prefix="ADD COLUMNS ")
      -2048        elif isinstance(actions[0], exp.Delete):
      -2049            actions = self.expressions(expression, key="actions", flat=True)
      -2050        else:
      -2051            actions = self.expressions(expression, key="actions")
      -2052
      -2053        exists = " IF EXISTS" if expression.args.get("exists") else ""
      -2054        return f"ALTER TABLE{exists} {self.sql(expression, 'this')} {actions}"
      +            
      2045    def altertable_sql(self, expression: exp.AlterTable) -> str:
      +2046        actions = expression.args["actions"]
      +2047
      +2048        if isinstance(actions[0], exp.ColumnDef):
      +2049            actions = self.expressions(expression, key="actions", prefix="ADD COLUMN ")
      +2050        elif isinstance(actions[0], exp.Schema):
      +2051            actions = self.expressions(expression, key="actions", prefix="ADD COLUMNS ")
      +2052        elif isinstance(actions[0], exp.Delete):
      +2053            actions = self.expressions(expression, key="actions", flat=True)
      +2054        else:
      +2055            actions = self.expressions(expression, key="actions")
      +2056
      +2057        exists = " IF EXISTS" if expression.args.get("exists") else ""
      +2058        return f"ALTER TABLE{exists} {self.sql(expression, 'this')} {actions}"
       
      @@ -10165,10 +10256,10 @@ Default: True
      -
      2056    def droppartition_sql(self, expression: exp.DropPartition) -> str:
      -2057        expressions = self.expressions(expression)
      -2058        exists = " IF EXISTS " if expression.args.get("exists") else " "
      -2059        return f"DROP{exists}{expressions}"
      +            
      2060    def droppartition_sql(self, expression: exp.DropPartition) -> str:
      +2061        expressions = self.expressions(expression)
      +2062        exists = " IF EXISTS " if expression.args.get("exists") else " "
      +2063        return f"DROP{exists}{expressions}"
       
      @@ -10186,16 +10277,16 @@ Default: True
      -
      2061    def addconstraint_sql(self, expression: exp.AddConstraint) -> str:
      -2062        this = self.sql(expression, "this")
      -2063        expression_ = self.sql(expression, "expression")
      -2064        add_constraint = f"ADD CONSTRAINT {this}" if this else "ADD"
      -2065
      -2066        enforced = expression.args.get("enforced")
      -2067        if enforced is not None:
      -2068            return f"{add_constraint} CHECK ({expression_}){' ENFORCED' if enforced else ''}"
      +            
      2065    def addconstraint_sql(self, expression: exp.AddConstraint) -> str:
      +2066        this = self.sql(expression, "this")
      +2067        expression_ = self.sql(expression, "expression")
      +2068        add_constraint = f"ADD CONSTRAINT {this}" if this else "ADD"
       2069
      -2070        return f"{add_constraint} {expression_}"
      +2070        enforced = expression.args.get("enforced")
      +2071        if enforced is not None:
      +2072            return f"{add_constraint} CHECK ({expression_}){' ENFORCED' if enforced else ''}"
      +2073
      +2074        return f"{add_constraint} {expression_}"
       
      @@ -10213,13 +10304,13 @@ Default: True
      -
      2072    def distinct_sql(self, expression: exp.Distinct) -> str:
      -2073        this = self.expressions(expression, flat=True)
      -2074        this = f" {this}" if this else ""
      -2075
      -2076        on = self.sql(expression, "on")
      -2077        on = f" ON {on}" if on else ""
      -2078        return f"DISTINCT{this}{on}"
      +            
      2076    def distinct_sql(self, expression: exp.Distinct) -> str:
      +2077        this = self.expressions(expression, flat=True)
      +2078        this = f" {this}" if this else ""
      +2079
      +2080        on = self.sql(expression, "on")
      +2081        on = f" ON {on}" if on else ""
      +2082        return f"DISTINCT{this}{on}"
       
      @@ -10237,8 +10328,8 @@ Default: True
      -
      2080    def ignorenulls_sql(self, expression: exp.IgnoreNulls) -> str:
      -2081        return f"{self.sql(expression, 'this')} IGNORE NULLS"
      +            
      2084    def ignorenulls_sql(self, expression: exp.IgnoreNulls) -> str:
      +2085        return f"{self.sql(expression, 'this')} IGNORE NULLS"
       
      @@ -10256,8 +10347,8 @@ Default: True
      -
      2083    def respectnulls_sql(self, expression: exp.RespectNulls) -> str:
      -2084        return f"{self.sql(expression, 'this')} RESPECT NULLS"
      +            
      2087    def respectnulls_sql(self, expression: exp.RespectNulls) -> str:
      +2088        return f"{self.sql(expression, 'this')} RESPECT NULLS"
       
      @@ -10275,13 +10366,13 @@ Default: True
      -
      2086    def intdiv_sql(self, expression: exp.IntDiv) -> str:
      -2087        return self.sql(
      -2088            exp.Cast(
      -2089                this=exp.Div(this=expression.this, expression=expression.expression),
      -2090                to=exp.DataType(this=exp.DataType.Type.INT),
      -2091            )
      -2092        )
      +            
      2090    def intdiv_sql(self, expression: exp.IntDiv) -> str:
      +2091        return self.sql(
      +2092            exp.Cast(
      +2093                this=exp.Div(this=expression.this, expression=expression.expression),
      +2094                to=exp.DataType(this=exp.DataType.Type.INT),
      +2095            )
      +2096        )
       
      @@ -10299,8 +10390,29 @@ Default: True
      -
      2094    def dpipe_sql(self, expression: exp.DPipe) -> str:
      -2095        return self.binary(expression, "||")
      +            
      2098    def dpipe_sql(self, expression: exp.DPipe) -> str:
      +2099        return self.binary(expression, "||")
      +
      + + + + +
      +
      + +
      + + def + safedpipe_sql(self, expression: sqlglot.expressions.SafeDPipe) -> str: + + + +
      + +
      2101    def safedpipe_sql(self, expression: exp.SafeDPipe) -> str:
      +2102        if self.STRICT_STRING_CONCAT:
      +2103            return self.func("CONCAT", *(exp.cast(e, "text") for e in expression.flatten()))
      +2104        return self.dpipe_sql(expression)
       
      @@ -10318,8 +10430,8 @@ Default: True
      -
      2097    def div_sql(self, expression: exp.Div) -> str:
      -2098        return self.binary(expression, "/")
      +            
      2106    def div_sql(self, expression: exp.Div) -> str:
      +2107        return self.binary(expression, "/")
       
      @@ -10337,8 +10449,8 @@ Default: True
      -
      2100    def overlaps_sql(self, expression: exp.Overlaps) -> str:
      -2101        return self.binary(expression, "OVERLAPS")
      +            
      2109    def overlaps_sql(self, expression: exp.Overlaps) -> str:
      +2110        return self.binary(expression, "OVERLAPS")
       
      @@ -10356,8 +10468,8 @@ Default: True
      -
      2103    def distance_sql(self, expression: exp.Distance) -> str:
      -2104        return self.binary(expression, "<->")
      +            
      2112    def distance_sql(self, expression: exp.Distance) -> str:
      +2113        return self.binary(expression, "<->")
       
      @@ -10375,8 +10487,8 @@ Default: True
      -
      2106    def dot_sql(self, expression: exp.Dot) -> str:
      -2107        return f"{self.sql(expression, 'this')}.{self.sql(expression, 'expression')}"
      +            
      2115    def dot_sql(self, expression: exp.Dot) -> str:
      +2116        return f"{self.sql(expression, 'this')}.{self.sql(expression, 'expression')}"
       
      @@ -10394,8 +10506,8 @@ Default: True
      -
      2109    def eq_sql(self, expression: exp.EQ) -> str:
      -2110        return self.binary(expression, "=")
      +            
      2118    def eq_sql(self, expression: exp.EQ) -> str:
      +2119        return self.binary(expression, "=")
       
      @@ -10413,8 +10525,8 @@ Default: True
      -
      2112    def escape_sql(self, expression: exp.Escape) -> str:
      -2113        return self.binary(expression, "ESCAPE")
      +            
      2121    def escape_sql(self, expression: exp.Escape) -> str:
      +2122        return self.binary(expression, "ESCAPE")
       
      @@ -10432,8 +10544,8 @@ Default: True
      -
      2115    def glob_sql(self, expression: exp.Glob) -> str:
      -2116        return self.binary(expression, "GLOB")
      +            
      2124    def glob_sql(self, expression: exp.Glob) -> str:
      +2125        return self.binary(expression, "GLOB")
       
      @@ -10451,8 +10563,8 @@ Default: True
      -
      2118    def gt_sql(self, expression: exp.GT) -> str:
      -2119        return self.binary(expression, ">")
      +            
      2127    def gt_sql(self, expression: exp.GT) -> str:
      +2128        return self.binary(expression, ">")
       
      @@ -10470,8 +10582,8 @@ Default: True
      -
      2121    def gte_sql(self, expression: exp.GTE) -> str:
      -2122        return self.binary(expression, ">=")
      +            
      2130    def gte_sql(self, expression: exp.GTE) -> str:
      +2131        return self.binary(expression, ">=")
       
      @@ -10489,8 +10601,8 @@ Default: True
      -
      2124    def ilike_sql(self, expression: exp.ILike) -> str:
      -2125        return self.binary(expression, "ILIKE")
      +            
      2133    def ilike_sql(self, expression: exp.ILike) -> str:
      +2134        return self.binary(expression, "ILIKE")
       
      @@ -10508,8 +10620,8 @@ Default: True
      -
      2127    def ilikeany_sql(self, expression: exp.ILikeAny) -> str:
      -2128        return self.binary(expression, "ILIKE ANY")
      +            
      2136    def ilikeany_sql(self, expression: exp.ILikeAny) -> str:
      +2137        return self.binary(expression, "ILIKE ANY")
       
      @@ -10527,8 +10639,12 @@ Default: True
      -
      2130    def is_sql(self, expression: exp.Is) -> str:
      -2131        return self.binary(expression, "IS")
      +            
      2139    def is_sql(self, expression: exp.Is) -> str:
      +2140        if not self.IS_BOOL_ALLOWED and isinstance(expression.expression, exp.Boolean):
      +2141            return self.sql(
      +2142                expression.this if expression.expression.this else exp.not_(expression.this)
      +2143            )
      +2144        return self.binary(expression, "IS")
       
      @@ -10546,8 +10662,8 @@ Default: True
      -
      2133    def like_sql(self, expression: exp.Like) -> str:
      -2134        return self.binary(expression, "LIKE")
      +            
      2146    def like_sql(self, expression: exp.Like) -> str:
      +2147        return self.binary(expression, "LIKE")
       
      @@ -10565,8 +10681,8 @@ Default: True
      -
      2136    def likeany_sql(self, expression: exp.LikeAny) -> str:
      -2137        return self.binary(expression, "LIKE ANY")
      +            
      2149    def likeany_sql(self, expression: exp.LikeAny) -> str:
      +2150        return self.binary(expression, "LIKE ANY")
       
      @@ -10584,8 +10700,8 @@ Default: True
      -
      2139    def similarto_sql(self, expression: exp.SimilarTo) -> str:
      -2140        return self.binary(expression, "SIMILAR TO")
      +            
      2152    def similarto_sql(self, expression: exp.SimilarTo) -> str:
      +2153        return self.binary(expression, "SIMILAR TO")
       
      @@ -10603,8 +10719,8 @@ Default: True
      -
      2142    def lt_sql(self, expression: exp.LT) -> str:
      -2143        return self.binary(expression, "<")
      +            
      2155    def lt_sql(self, expression: exp.LT) -> str:
      +2156        return self.binary(expression, "<")
       
      @@ -10622,8 +10738,8 @@ Default: True
      -
      2145    def lte_sql(self, expression: exp.LTE) -> str:
      -2146        return self.binary(expression, "<=")
      +            
      2158    def lte_sql(self, expression: exp.LTE) -> str:
      +2159        return self.binary(expression, "<=")
       
      @@ -10641,8 +10757,8 @@ Default: True
      -
      2148    def mod_sql(self, expression: exp.Mod) -> str:
      -2149        return self.binary(expression, "%")
      +            
      2161    def mod_sql(self, expression: exp.Mod) -> str:
      +2162        return self.binary(expression, "%")
       
      @@ -10660,8 +10776,8 @@ Default: True
      -
      2151    def mul_sql(self, expression: exp.Mul) -> str:
      -2152        return self.binary(expression, "*")
      +            
      2164    def mul_sql(self, expression: exp.Mul) -> str:
      +2165        return self.binary(expression, "*")
       
      @@ -10679,8 +10795,8 @@ Default: True
      -
      2154    def neq_sql(self, expression: exp.NEQ) -> str:
      -2155        return self.binary(expression, "<>")
      +            
      2167    def neq_sql(self, expression: exp.NEQ) -> str:
      +2168        return self.binary(expression, "<>")
       
      @@ -10698,8 +10814,8 @@ Default: True
      -
      2157    def nullsafeeq_sql(self, expression: exp.NullSafeEQ) -> str:
      -2158        return self.binary(expression, "IS NOT DISTINCT FROM")
      +            
      2170    def nullsafeeq_sql(self, expression: exp.NullSafeEQ) -> str:
      +2171        return self.binary(expression, "IS NOT DISTINCT FROM")
       
      @@ -10717,8 +10833,8 @@ Default: True
      -
      2160    def nullsafeneq_sql(self, expression: exp.NullSafeNEQ) -> str:
      -2161        return self.binary(expression, "IS DISTINCT FROM")
      +            
      2173    def nullsafeneq_sql(self, expression: exp.NullSafeNEQ) -> str:
      +2174        return self.binary(expression, "IS DISTINCT FROM")
       
      @@ -10736,8 +10852,8 @@ Default: True
      -
      2163    def or_sql(self, expression: exp.Or) -> str:
      -2164        return self.connector_sql(expression, "OR")
      +            
      2176    def or_sql(self, expression: exp.Or) -> str:
      +2177        return self.connector_sql(expression, "OR")
       
      @@ -10755,8 +10871,8 @@ Default: True
      -
      2166    def slice_sql(self, expression: exp.Slice) -> str:
      -2167        return self.binary(expression, ":")
      +            
      2179    def slice_sql(self, expression: exp.Slice) -> str:
      +2180        return self.binary(expression, ":")
       
      @@ -10774,8 +10890,8 @@ Default: True
      -
      2169    def sub_sql(self, expression: exp.Sub) -> str:
      -2170        return self.binary(expression, "-")
      +            
      2182    def sub_sql(self, expression: exp.Sub) -> str:
      +2183        return self.binary(expression, "-")
       
      @@ -10793,8 +10909,8 @@ Default: True
      -
      2172    def trycast_sql(self, expression: exp.TryCast) -> str:
      -2173        return f"TRY_CAST({self.sql(expression, 'this')} AS {self.sql(expression, 'to')})"
      +            
      2185    def trycast_sql(self, expression: exp.TryCast) -> str:
      +2186        return f"TRY_CAST({self.sql(expression, 'this')} AS {self.sql(expression, 'to')})"
       
      @@ -10812,12 +10928,12 @@ Default: True
      -
      2175    def use_sql(self, expression: exp.Use) -> str:
      -2176        kind = self.sql(expression, "kind")
      -2177        kind = f" {kind}" if kind else ""
      -2178        this = self.sql(expression, "this")
      -2179        this = f" {this}" if this else ""
      -2180        return f"USE{kind}{this}"
      +            
      2188    def use_sql(self, expression: exp.Use) -> str:
      +2189        kind = self.sql(expression, "kind")
      +2190        kind = f" {kind}" if kind else ""
      +2191        this = self.sql(expression, "this")
      +2192        this = f" {this}" if this else ""
      +2193        return f"USE{kind}{this}"
       
      @@ -10835,9 +10951,9 @@ Default: True
      -
      2182    def binary(self, expression: exp.Binary, op: str) -> str:
      -2183        op = self.maybe_comment(op, comments=expression.comments)
      -2184        return f"{self.sql(expression, 'this')} {op} {self.sql(expression, 'expression')}"
      +            
      2195    def binary(self, expression: exp.Binary, op: str) -> str:
      +2196        op = self.maybe_comment(op, comments=expression.comments)
      +2197        return f"{self.sql(expression, 'this')} {op} {self.sql(expression, 'expression')}"
       
      @@ -10855,16 +10971,16 @@ Default: True
      -
      2186    def function_fallback_sql(self, expression: exp.Func) -> str:
      -2187        args = []
      -2188        for arg_value in expression.args.values():
      -2189            if isinstance(arg_value, list):
      -2190                for value in arg_value:
      -2191                    args.append(value)
      -2192            else:
      -2193                args.append(arg_value)
      -2194
      -2195        return self.func(expression.sql_name(), *args)
      +            
      2199    def function_fallback_sql(self, expression: exp.Func) -> str:
      +2200        args = []
      +2201        for arg_value in expression.args.values():
      +2202            if isinstance(arg_value, list):
      +2203                for value in arg_value:
      +2204                    args.append(value)
      +2205            else:
      +2206                args.append(arg_value)
      +2207
      +2208        return self.func(expression.sql_name(), *args)
       
      @@ -10876,14 +10992,20 @@ Default: True
      def - func( self, name: str, *args: Union[str, sqlglot.expressions.Expression, NoneType]) -> str: + func( self, name: str, *args: Union[str, sqlglot.expressions.Expression, NoneType], prefix: str = '(', suffix: str = ')') -> str:
      -
      2197    def func(self, name: str, *args: t.Optional[exp.Expression | str]) -> str:
      -2198        return f"{self.normalize_func(name)}({self.format_args(*args)})"
      +            
      2210    def func(
      +2211        self,
      +2212        name: str,
      +2213        *args: t.Optional[exp.Expression | str],
      +2214        prefix: str = "(",
      +2215        suffix: str = ")",
      +2216    ) -> str:
      +2217        return f"{self.normalize_func(name)}{prefix}{self.format_args(*args)}{suffix}"
       
      @@ -10901,11 +11023,11 @@ Default: True
      -
      2200    def format_args(self, *args: t.Optional[str | exp.Expression]) -> str:
      -2201        arg_sqls = tuple(self.sql(arg) for arg in args if arg is not None)
      -2202        if self.pretty and self.text_width(arg_sqls) > self._max_text_width:
      -2203            return self.indent("\n" + f",\n".join(arg_sqls) + "\n", skip_first=True, skip_last=True)
      -2204        return ", ".join(arg_sqls)
      +            
      2219    def format_args(self, *args: t.Optional[str | exp.Expression]) -> str:
      +2220        arg_sqls = tuple(self.sql(arg) for arg in args if arg is not None)
      +2221        if self.pretty and self.text_width(arg_sqls) > self.max_text_width:
      +2222            return self.indent("\n" + f",\n".join(arg_sqls) + "\n", skip_first=True, skip_last=True)
      +2223        return ", ".join(arg_sqls)
       
      @@ -10923,8 +11045,8 @@ Default: True
      -
      2206    def text_width(self, args: t.Iterable) -> int:
      -2207        return sum(len(arg) for arg in args)
      +            
      2225    def text_width(self, args: t.Iterable) -> int:
      +2226        return sum(len(arg) for arg in args)
       
      @@ -10942,8 +11064,10 @@ Default: True
      -
      2209    def format_time(self, expression: exp.Expression) -> t.Optional[str]:
      -2210        return format_time(self.sql(expression, "format"), self.time_mapping, self.time_trie)
      +            
      2228    def format_time(self, expression: exp.Expression) -> t.Optional[str]:
      +2229        return format_time(
      +2230            self.sql(expression, "format"), self.INVERSE_TIME_MAPPING, self.INVERSE_TIME_TRIE
      +2231        )
       
      @@ -10961,47 +11085,47 @@ Default: True
      -
      2212    def expressions(
      -2213        self,
      -2214        expression: t.Optional[exp.Expression] = None,
      -2215        key: t.Optional[str] = None,
      -2216        sqls: t.Optional[t.List[str]] = None,
      -2217        flat: bool = False,
      -2218        indent: bool = True,
      -2219        sep: str = ", ",
      -2220        prefix: str = "",
      -2221    ) -> str:
      -2222        expressions = expression.args.get(key or "expressions") if expression else sqls
      -2223
      -2224        if not expressions:
      -2225            return ""
      -2226
      -2227        if flat:
      -2228            return sep.join(self.sql(e) for e in expressions)
      -2229
      -2230        num_sqls = len(expressions)
      -2231
      -2232        # These are calculated once in case we have the leading_comma / pretty option set, correspondingly
      -2233        pad = " " * self.pad
      -2234        stripped_sep = sep.strip()
      -2235
      -2236        result_sqls = []
      -2237        for i, e in enumerate(expressions):
      -2238            sql = self.sql(e, comment=False)
      -2239            comments = self.maybe_comment("", e) if isinstance(e, exp.Expression) else ""
      -2240
      -2241            if self.pretty:
      -2242                if self._leading_comma:
      -2243                    result_sqls.append(f"{sep if i > 0 else pad}{prefix}{sql}{comments}")
      -2244                else:
      -2245                    result_sqls.append(
      -2246                        f"{prefix}{sql}{stripped_sep if i + 1 < num_sqls else ''}{comments}"
      -2247                    )
      -2248            else:
      -2249                result_sqls.append(f"{prefix}{sql}{comments}{sep if i + 1 < num_sqls else ''}")
      +            
      2233    def expressions(
      +2234        self,
      +2235        expression: t.Optional[exp.Expression] = None,
      +2236        key: t.Optional[str] = None,
      +2237        sqls: t.Optional[t.List[str]] = None,
      +2238        flat: bool = False,
      +2239        indent: bool = True,
      +2240        sep: str = ", ",
      +2241        prefix: str = "",
      +2242    ) -> str:
      +2243        expressions = expression.args.get(key or "expressions") if expression else sqls
      +2244
      +2245        if not expressions:
      +2246            return ""
      +2247
      +2248        if flat:
      +2249            return sep.join(self.sql(e) for e in expressions)
       2250
      -2251        result_sql = "\n".join(result_sqls) if self.pretty else "".join(result_sqls)
      -2252        return self.indent(result_sql, skip_first=False) if indent else result_sql
      +2251        num_sqls = len(expressions)
      +2252
      +2253        # These are calculated once in case we have the leading_comma / pretty option set, correspondingly
      +2254        pad = " " * self.pad
      +2255        stripped_sep = sep.strip()
      +2256
      +2257        result_sqls = []
      +2258        for i, e in enumerate(expressions):
      +2259            sql = self.sql(e, comment=False)
      +2260            comments = self.maybe_comment("", e) if isinstance(e, exp.Expression) else ""
      +2261
      +2262            if self.pretty:
      +2263                if self.leading_comma:
      +2264                    result_sqls.append(f"{sep if i > 0 else pad}{prefix}{sql}{comments}")
      +2265                else:
      +2266                    result_sqls.append(
      +2267                        f"{prefix}{sql}{stripped_sep if i + 1 < num_sqls else ''}{comments}"
      +2268                    )
      +2269            else:
      +2270                result_sqls.append(f"{prefix}{sql}{comments}{sep if i + 1 < num_sqls else ''}")
      +2271
      +2272        result_sql = "\n".join(result_sqls) if self.pretty else "".join(result_sqls)
      +2273        return self.indent(result_sql, skip_first=False) if indent else result_sql
       
      @@ -11019,12 +11143,12 @@ Default: True
      -
      2254    def op_expressions(self, op: str, expression: exp.Expression, flat: bool = False) -> str:
      -2255        flat = flat or isinstance(expression.parent, exp.Properties)
      -2256        expressions_sql = self.expressions(expression, flat=flat)
      -2257        if flat:
      -2258            return f"{op} {expressions_sql}"
      -2259        return f"{self.seg(op)}{self.sep() if expressions_sql else ''}{expressions_sql}"
      +            
      2275    def op_expressions(self, op: str, expression: exp.Expression, flat: bool = False) -> str:
      +2276        flat = flat or isinstance(expression.parent, exp.Properties)
      +2277        expressions_sql = self.expressions(expression, flat=flat)
      +2278        if flat:
      +2279            return f"{op} {expressions_sql}"
      +2280        return f"{self.seg(op)}{self.sep() if expressions_sql else ''}{expressions_sql}"
       
      @@ -11042,11 +11166,11 @@ Default: True
      -
      2261    def naked_property(self, expression: exp.Property) -> str:
      -2262        property_name = exp.Properties.PROPERTY_TO_NAME.get(expression.__class__)
      -2263        if not property_name:
      -2264            self.unsupported(f"Unsupported property {expression.__class__.__name__}")
      -2265        return f"{property_name} {self.sql(expression, 'this')}"
      +            
      2282    def naked_property(self, expression: exp.Property) -> str:
      +2283        property_name = exp.Properties.PROPERTY_TO_NAME.get(expression.__class__)
      +2284        if not property_name:
      +2285            self.unsupported(f"Unsupported property {expression.__class__.__name__}")
      +2286        return f"{property_name} {self.sql(expression, 'this')}"
       
      @@ -11064,12 +11188,12 @@ Default: True
      -
      2267    def set_operation(self, expression: exp.Expression, op: str) -> str:
      -2268        this = self.sql(expression, "this")
      -2269        op = self.seg(op)
      -2270        return self.query_modifiers(
      -2271            expression, f"{this}{op}{self.sep()}{self.sql(expression, 'expression')}"
      -2272        )
      +            
      2288    def set_operation(self, expression: exp.Expression, op: str) -> str:
      +2289        this = self.sql(expression, "this")
      +2290        op = self.seg(op)
      +2291        return self.query_modifiers(
      +2292            expression, f"{this}{op}{self.sep()}{self.sql(expression, 'expression')}"
      +2293        )
       
      @@ -11087,8 +11211,8 @@ Default: True
      -
      2274    def tag_sql(self, expression: exp.Tag) -> str:
      -2275        return f"{expression.args.get('prefix')}{self.sql(expression.this)}{expression.args.get('postfix')}"
      +            
      2295    def tag_sql(self, expression: exp.Tag) -> str:
      +2296        return f"{expression.args.get('prefix')}{self.sql(expression.this)}{expression.args.get('postfix')}"
       
      @@ -11106,8 +11230,8 @@ Default: True
      -
      2277    def token_sql(self, token_type: TokenType) -> str:
      -2278        return self.TOKEN_MAPPING.get(token_type, token_type.name)
      +            
      2298    def token_sql(self, token_type: TokenType) -> str:
      +2299        return self.TOKEN_MAPPING.get(token_type, token_type.name)
       
      @@ -11125,13 +11249,13 @@ Default: True
      -
      2280    def userdefinedfunction_sql(self, expression: exp.UserDefinedFunction) -> str:
      -2281        this = self.sql(expression, "this")
      -2282        expressions = self.no_identify(self.expressions, expression)
      -2283        expressions = (
      -2284            self.wrap(expressions) if expression.args.get("wrapped") else f" {expressions}"
      -2285        )
      -2286        return f"{this}{expressions}"
      +            
      2301    def userdefinedfunction_sql(self, expression: exp.UserDefinedFunction) -> str:
      +2302        this = self.sql(expression, "this")
      +2303        expressions = self.no_identify(self.expressions, expression)
      +2304        expressions = (
      +2305            self.wrap(expressions) if expression.args.get("wrapped") else f" {expressions}"
      +2306        )
      +2307        return f"{this}{expressions}"
       
      @@ -11149,10 +11273,10 @@ Default: True
      -
      2288    def joinhint_sql(self, expression: exp.JoinHint) -> str:
      -2289        this = self.sql(expression, "this")
      -2290        expressions = self.expressions(expression, flat=True)
      -2291        return f"{this}({expressions})"
      +            
      2309    def joinhint_sql(self, expression: exp.JoinHint) -> str:
      +2310        this = self.sql(expression, "this")
      +2311        expressions = self.expressions(expression, flat=True)
      +2312        return f"{this}({expressions})"
       
      @@ -11170,8 +11294,8 @@ Default: True
      -
      2293    def kwarg_sql(self, expression: exp.Kwarg) -> str:
      -2294        return self.binary(expression, "=>")
      +            
      2314    def kwarg_sql(self, expression: exp.Kwarg) -> str:
      +2315        return self.binary(expression, "=>")
       
      @@ -11189,25 +11313,25 @@ Default: True
      -
      2296    def when_sql(self, expression: exp.When) -> str:
      -2297        matched = "MATCHED" if expression.args["matched"] else "NOT MATCHED"
      -2298        source = " BY SOURCE" if self.MATCHED_BY_SOURCE and expression.args.get("source") else ""
      -2299        condition = self.sql(expression, "condition")
      -2300        condition = f" AND {condition}" if condition else ""
      -2301
      -2302        then_expression = expression.args.get("then")
      -2303        if isinstance(then_expression, exp.Insert):
      -2304            then = f"INSERT {self.sql(then_expression, 'this')}"
      -2305            if "expression" in then_expression.args:
      -2306                then += f" VALUES {self.sql(then_expression, 'expression')}"
      -2307        elif isinstance(then_expression, exp.Update):
      -2308            if isinstance(then_expression.args.get("expressions"), exp.Star):
      -2309                then = f"UPDATE {self.sql(then_expression, 'expressions')}"
      -2310            else:
      -2311                then = f"UPDATE SET {self.expressions(then_expression, flat=True)}"
      -2312        else:
      -2313            then = self.sql(then_expression)
      -2314        return f"WHEN {matched}{source}{condition} THEN {then}"
      +            
      2317    def when_sql(self, expression: exp.When) -> str:
      +2318        matched = "MATCHED" if expression.args["matched"] else "NOT MATCHED"
      +2319        source = " BY SOURCE" if self.MATCHED_BY_SOURCE and expression.args.get("source") else ""
      +2320        condition = self.sql(expression, "condition")
      +2321        condition = f" AND {condition}" if condition else ""
      +2322
      +2323        then_expression = expression.args.get("then")
      +2324        if isinstance(then_expression, exp.Insert):
      +2325            then = f"INSERT {self.sql(then_expression, 'this')}"
      +2326            if "expression" in then_expression.args:
      +2327                then += f" VALUES {self.sql(then_expression, 'expression')}"
      +2328        elif isinstance(then_expression, exp.Update):
      +2329            if isinstance(then_expression.args.get("expressions"), exp.Star):
      +2330                then = f"UPDATE {self.sql(then_expression, 'expressions')}"
      +2331            else:
      +2332                then = f"UPDATE SET {self.expressions(then_expression, flat=True)}"
      +2333        else:
      +2334            then = self.sql(then_expression)
      +2335        return f"WHEN {matched}{source}{condition} THEN {then}"
       
      @@ -11225,11 +11349,11 @@ Default: True
      -
      2316    def merge_sql(self, expression: exp.Merge) -> str:
      -2317        this = self.sql(expression, "this")
      -2318        using = f"USING {self.sql(expression, 'using')}"
      -2319        on = f"ON {self.sql(expression, 'on')}"
      -2320        return f"MERGE INTO {this} {using} {on} {self.expressions(expression, sep=' ')}"
      +            
      2337    def merge_sql(self, expression: exp.Merge) -> str:
      +2338        this = self.sql(expression, "this")
      +2339        using = f"USING {self.sql(expression, 'using')}"
      +2340        on = f"ON {self.sql(expression, 'on')}"
      +2341        return f"MERGE INTO {this} {using} {on} {self.expressions(expression, sep=' ')}"
       
      @@ -11247,11 +11371,11 @@ Default: True
      -
      2322    def tochar_sql(self, expression: exp.ToChar) -> str:
      -2323        if expression.args.get("format"):
      -2324            self.unsupported("Format argument unsupported for TO_CHAR/TO_VARCHAR function")
      -2325
      -2326        return self.sql(exp.cast(expression.this, "text"))
      +            
      2343    def tochar_sql(self, expression: exp.ToChar) -> str:
      +2344        if expression.args.get("format"):
      +2345            self.unsupported("Format argument unsupported for TO_CHAR/TO_VARCHAR function")
      +2346
      +2347        return self.sql(exp.cast(expression.this, "text"))
       
      @@ -11269,12 +11393,12 @@ Default: True
      -
      2328    def dictproperty_sql(self, expression: exp.DictProperty) -> str:
      -2329        this = self.sql(expression, "this")
      -2330        kind = self.sql(expression, "kind")
      -2331        settings_sql = self.expressions(expression, key="settings", sep=" ")
      -2332        args = f"({self.sep('')}{settings_sql}{self.seg(')', sep='')}" if settings_sql else "()"
      -2333        return f"{this}({kind}{args})"
      +            
      2349    def dictproperty_sql(self, expression: exp.DictProperty) -> str:
      +2350        this = self.sql(expression, "this")
      +2351        kind = self.sql(expression, "kind")
      +2352        settings_sql = self.expressions(expression, key="settings", sep=" ")
      +2353        args = f"({self.sep('')}{settings_sql}{self.seg(')', sep='')}" if settings_sql else "()"
      +2354        return f"{this}({kind}{args})"
       
      @@ -11292,11 +11416,11 @@ Default: True
      -
      2335    def dictrange_sql(self, expression: exp.DictRange) -> str:
      -2336        this = self.sql(expression, "this")
      -2337        max = self.sql(expression, "max")
      -2338        min = self.sql(expression, "min")
      -2339        return f"{this}(MIN {min} MAX {max})"
      +            
      2356    def dictrange_sql(self, expression: exp.DictRange) -> str:
      +2357        this = self.sql(expression, "this")
      +2358        max = self.sql(expression, "max")
      +2359        min = self.sql(expression, "min")
      +2360        return f"{this}(MIN {min} MAX {max})"
       
      @@ -11314,8 +11438,27 @@ Default: True
      -
      2341    def dictsubproperty_sql(self, expression: exp.DictSubProperty) -> str:
      -2342        return f"{self.sql(expression, 'this')} {self.sql(expression, 'value')}"
      +            
      2362    def dictsubproperty_sql(self, expression: exp.DictSubProperty) -> str:
      +2363        return f"{self.sql(expression, 'this')} {self.sql(expression, 'value')}"
      +
      + + + + +
      +
      + +
      + + def + oncluster_sql(self, expression: sqlglot.expressions.OnCluster) -> str: + + + +
      + +
      2365    def oncluster_sql(self, expression: exp.OnCluster) -> str:
      +2366        return ""
       
      @@ -11334,13 +11477,13 @@ Default: True
      -
      2345def cached_generator(
      -2346    cache: t.Optional[t.Dict[int, str]] = None
      -2347) -> t.Callable[[exp.Expression], str]:
      -2348    """Returns a cached generator."""
      -2349    cache = {} if cache is None else cache
      -2350    generator = Generator(normalize=True, identify="safe")
      -2351    return lambda e: generator.generate(e, cache)
      +            
      2369def cached_generator(
      +2370    cache: t.Optional[t.Dict[int, str]] = None
      +2371) -> t.Callable[[exp.Expression], str]:
      +2372    """Returns a cached generator."""
      +2373    cache = {} if cache is None else cache
      +2374    generator = Generator(normalize=True, identify="safe")
      +2375    return lambda e: generator.generate(e, cache)
       
      diff --git a/docs/sqlglot/optimizer/annotate_types.html b/docs/sqlglot/optimizer/annotate_types.html index 786c144..412335d 100644 --- a/docs/sqlglot/optimizer/annotate_types.html +++ b/docs/sqlglot/optimizer/annotate_types.html @@ -68,406 +68,396 @@ -
        1from sqlglot import exp
      -  2from sqlglot.helper import ensure_list, subclasses
      -  3from sqlglot.optimizer.scope import Scope, traverse_scope
      -  4from sqlglot.schema import ensure_schema
      -  5
      -  6
      -  7def annotate_types(expression, schema=None, annotators=None, coerces_to=None):
      -  8    """
      -  9    Recursively infer & annotate types in an expression syntax tree against a schema.
      - 10    Assumes that we've already executed the optimizer's qualify_columns step.
      - 11
      - 12    Example:
      - 13        >>> import sqlglot
      - 14        >>> schema = {"y": {"cola": "SMALLINT"}}
      - 15        >>> sql = "SELECT x.cola + 2.5 AS cola FROM (SELECT y.cola AS cola FROM y AS y) AS x"
      - 16        >>> annotated_expr = annotate_types(sqlglot.parse_one(sql), schema=schema)
      - 17        >>> annotated_expr.expressions[0].type.this  # Get the type of "x.cola + 2.5 AS cola"
      - 18        <Type.DOUBLE: 'DOUBLE'>
      - 19
      - 20    Args:
      - 21        expression (sqlglot.Expression): Expression to annotate.
      - 22        schema (dict|sqlglot.optimizer.Schema): Database schema.
      - 23        annotators (dict): Maps expression type to corresponding annotation function.
      - 24        coerces_to (dict): Maps expression type to set of types that it can be coerced into.
      - 25    Returns:
      - 26        sqlglot.Expression: expression annotated with types
      - 27    """
      - 28
      - 29    schema = ensure_schema(schema)
      - 30
      - 31    return TypeAnnotator(schema, annotators, coerces_to).annotate(expression)
      - 32
      - 33
      - 34class TypeAnnotator:
      - 35    ANNOTATORS = {
      - 36        **{
      - 37            expr_type: lambda self, expr: self._annotate_unary(expr)
      - 38            for expr_type in subclasses(exp.__name__, exp.Unary)
      - 39        },
      - 40        **{
      - 41            expr_type: lambda self, expr: self._annotate_binary(expr)
      - 42            for expr_type in subclasses(exp.__name__, exp.Binary)
      - 43        },
      - 44        exp.Cast: lambda self, expr: self._annotate_with_type(expr, expr.args["to"]),
      - 45        exp.TryCast: lambda self, expr: self._annotate_with_type(expr, expr.args["to"]),
      - 46        exp.DataType: lambda self, expr: self._annotate_with_type(expr, expr.copy()),
      - 47        exp.Alias: lambda self, expr: self._annotate_unary(expr),
      - 48        exp.Between: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BOOLEAN),
      - 49        exp.In: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BOOLEAN),
      - 50        exp.Literal: lambda self, expr: self._annotate_literal(expr),
      - 51        exp.Boolean: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BOOLEAN),
      - 52        exp.Null: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.NULL),
      - 53        exp.Anonymous: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.UNKNOWN),
      - 54        exp.ApproxDistinct: lambda self, expr: self._annotate_with_type(
      - 55            expr, exp.DataType.Type.BIGINT
      - 56        ),
      - 57        exp.Avg: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      - 58        exp.Min: lambda self, expr: self._annotate_by_args(expr, "this", "expressions"),
      - 59        exp.Max: lambda self, expr: self._annotate_by_args(expr, "this", "expressions"),
      - 60        exp.Sum: lambda self, expr: self._annotate_by_args(
      - 61            expr, "this", "expressions", promote=True
      - 62        ),
      - 63        exp.Ceil: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
      - 64        exp.Count: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BIGINT),
      - 65        exp.CurrentDate: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE),
      - 66        exp.CurrentDatetime: lambda self, expr: self._annotate_with_type(
      - 67            expr, exp.DataType.Type.DATETIME
      - 68        ),
      - 69        exp.CurrentTime: lambda self, expr: self._annotate_with_type(
      - 70            expr, exp.DataType.Type.TIMESTAMP
      - 71        ),
      - 72        exp.CurrentTimestamp: lambda self, expr: self._annotate_with_type(
      - 73            expr, exp.DataType.Type.TIMESTAMP
      - 74        ),
      - 75        exp.DateAdd: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE),
      - 76        exp.DateSub: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE),
      - 77        exp.DateDiff: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
      - 78        exp.DatetimeAdd: lambda self, expr: self._annotate_with_type(
      - 79            expr, exp.DataType.Type.DATETIME
      - 80        ),
      - 81        exp.DatetimeSub: lambda self, expr: self._annotate_with_type(
      - 82            expr, exp.DataType.Type.DATETIME
      - 83        ),
      - 84        exp.DatetimeDiff: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
      - 85        exp.Extract: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
      - 86        exp.TimestampAdd: lambda self, expr: self._annotate_with_type(
      - 87            expr, exp.DataType.Type.TIMESTAMP
      - 88        ),
      - 89        exp.TimestampSub: lambda self, expr: self._annotate_with_type(
      - 90            expr, exp.DataType.Type.TIMESTAMP
      - 91        ),
      - 92        exp.TimestampDiff: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
      - 93        exp.TimeAdd: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TIMESTAMP),
      - 94        exp.TimeSub: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TIMESTAMP),
      - 95        exp.TimeDiff: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
      - 96        exp.DateStrToDate: lambda self, expr: self._annotate_with_type(
      - 97            expr, exp.DataType.Type.DATE
      - 98        ),
      - 99        exp.DateToDateStr: lambda self, expr: self._annotate_with_type(
      -100            expr, exp.DataType.Type.VARCHAR
      -101        ),
      -102        exp.DateToDi: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
      -103        exp.Day: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TINYINT),
      -104        exp.DiToDate: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE),
      -105        exp.Exp: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -106        exp.Floor: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
      -107        exp.Case: lambda self, expr: self._annotate_by_args(expr, "default", "ifs"),
      -108        exp.If: lambda self, expr: self._annotate_by_args(expr, "true", "false"),
      -109        exp.Coalesce: lambda self, expr: self._annotate_by_args(expr, "this", "expressions"),
      -110        exp.IfNull: lambda self, expr: self._annotate_by_args(expr, "this", "expression"),
      -111        exp.Concat: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
      -112        exp.ConcatWs: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
      -113        exp.GroupConcat: lambda self, expr: self._annotate_with_type(
      -114            expr, exp.DataType.Type.VARCHAR
      -115        ),
      -116        exp.ArrayConcat: lambda self, expr: self._annotate_with_type(
      -117            expr, exp.DataType.Type.VARCHAR
      -118        ),
      -119        exp.ArraySize: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BIGINT),
      -120        exp.Map: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.MAP),
      -121        exp.VarMap: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.MAP),
      -122        exp.Initcap: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
      -123        exp.Interval: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INTERVAL),
      -124        exp.Least: lambda self, expr: self._annotate_by_args(expr, "expressions"),
      -125        exp.Length: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BIGINT),
      -126        exp.Levenshtein: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
      -127        exp.Ln: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -128        exp.Log: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -129        exp.Log2: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -130        exp.Log10: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -131        exp.Lower: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
      -132        exp.Month: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TINYINT),
      -133        exp.Pow: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -134        exp.Quantile: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -135        exp.ApproxQuantile: lambda self, expr: self._annotate_with_type(
      -136            expr, exp.DataType.Type.DOUBLE
      -137        ),
      -138        exp.RegexpLike: lambda self, expr: self._annotate_with_type(
      -139            expr, exp.DataType.Type.BOOLEAN
      -140        ),
      -141        exp.Round: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -142        exp.SafeDivide: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -143        exp.Substring: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
      -144        exp.StrPosition: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
      -145        exp.StrToDate: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE),
      -146        exp.StrToTime: lambda self, expr: self._annotate_with_type(
      -147            expr, exp.DataType.Type.TIMESTAMP
      -148        ),
      -149        exp.Sqrt: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -150        exp.Stddev: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -151        exp.StddevPop: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -152        exp.StddevSamp: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -153        exp.TimeToStr: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
      -154        exp.TimeToTimeStr: lambda self, expr: self._annotate_with_type(
      -155            expr, exp.DataType.Type.VARCHAR
      -156        ),
      -157        exp.TimeStrToDate: lambda self, expr: self._annotate_with_type(
      -158            expr, exp.DataType.Type.DATE
      -159        ),
      -160        exp.TimeStrToTime: lambda self, expr: self._annotate_with_type(
      -161            expr, exp.DataType.Type.TIMESTAMP
      -162        ),
      -163        exp.Trim: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
      -164        exp.TsOrDsToDateStr: lambda self, expr: self._annotate_with_type(
      -165            expr, exp.DataType.Type.VARCHAR
      -166        ),
      -167        exp.TsOrDsToDate: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE),
      -168        exp.TsOrDiToDi: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
      -169        exp.UnixToStr: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
      -170        exp.UnixToTime: lambda self, expr: self._annotate_with_type(
      -171            expr, exp.DataType.Type.TIMESTAMP
      -172        ),
      -173        exp.UnixToTimeStr: lambda self, expr: self._annotate_with_type(
      -174            expr, exp.DataType.Type.VARCHAR
      -175        ),
      -176        exp.Upper: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
      -177        exp.Variance: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -178        exp.VariancePop: lambda self, expr: self._annotate_with_type(
      -179            expr, exp.DataType.Type.DOUBLE
      -180        ),
      -181        exp.Week: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TINYINT),
      -182        exp.Year: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TINYINT),
      -183    }
      -184
      -185    # Reference: https://spark.apache.org/docs/3.2.0/sql-ref-ansi-compliance.html
      -186    COERCES_TO = {
      -187        # CHAR < NCHAR < VARCHAR < NVARCHAR < TEXT
      -188        exp.DataType.Type.TEXT: set(),
      -189        exp.DataType.Type.NVARCHAR: {exp.DataType.Type.TEXT},
      -190        exp.DataType.Type.VARCHAR: {exp.DataType.Type.NVARCHAR, exp.DataType.Type.TEXT},
      -191        exp.DataType.Type.NCHAR: {
      -192            exp.DataType.Type.VARCHAR,
      -193            exp.DataType.Type.NVARCHAR,
      -194            exp.DataType.Type.TEXT,
      -195        },
      -196        exp.DataType.Type.CHAR: {
      -197            exp.DataType.Type.NCHAR,
      -198            exp.DataType.Type.VARCHAR,
      -199            exp.DataType.Type.NVARCHAR,
      -200            exp.DataType.Type.TEXT,
      -201        },
      -202        # TINYINT < SMALLINT < INT < BIGINT < DECIMAL < FLOAT < DOUBLE
      -203        exp.DataType.Type.DOUBLE: set(),
      -204        exp.DataType.Type.FLOAT: {exp.DataType.Type.DOUBLE},
      -205        exp.DataType.Type.DECIMAL: {exp.DataType.Type.FLOAT, exp.DataType.Type.DOUBLE},
      -206        exp.DataType.Type.BIGINT: {
      -207            exp.DataType.Type.DECIMAL,
      -208            exp.DataType.Type.FLOAT,
      -209            exp.DataType.Type.DOUBLE,
      -210        },
      -211        exp.DataType.Type.INT: {
      -212            exp.DataType.Type.BIGINT,
      -213            exp.DataType.Type.DECIMAL,
      -214            exp.DataType.Type.FLOAT,
      -215            exp.DataType.Type.DOUBLE,
      -216        },
      -217        exp.DataType.Type.SMALLINT: {
      -218            exp.DataType.Type.INT,
      -219            exp.DataType.Type.BIGINT,
      -220            exp.DataType.Type.DECIMAL,
      -221            exp.DataType.Type.FLOAT,
      -222            exp.DataType.Type.DOUBLE,
      -223        },
      -224        exp.DataType.Type.TINYINT: {
      -225            exp.DataType.Type.SMALLINT,
      -226            exp.DataType.Type.INT,
      -227            exp.DataType.Type.BIGINT,
      -228            exp.DataType.Type.DECIMAL,
      -229            exp.DataType.Type.FLOAT,
      -230            exp.DataType.Type.DOUBLE,
      -231        },
      -232        # DATE < DATETIME < TIMESTAMP < TIMESTAMPTZ < TIMESTAMPLTZ
      -233        exp.DataType.Type.TIMESTAMPLTZ: set(),
      -234        exp.DataType.Type.TIMESTAMPTZ: {exp.DataType.Type.TIMESTAMPLTZ},
      -235        exp.DataType.Type.TIMESTAMP: {
      -236            exp.DataType.Type.TIMESTAMPTZ,
      -237            exp.DataType.Type.TIMESTAMPLTZ,
      -238        },
      -239        exp.DataType.Type.DATETIME: {
      -240            exp.DataType.Type.TIMESTAMP,
      -241            exp.DataType.Type.TIMESTAMPTZ,
      -242            exp.DataType.Type.TIMESTAMPLTZ,
      -243        },
      -244        exp.DataType.Type.DATE: {
      -245            exp.DataType.Type.DATETIME,
      -246            exp.DataType.Type.TIMESTAMP,
      -247            exp.DataType.Type.TIMESTAMPTZ,
      -248            exp.DataType.Type.TIMESTAMPLTZ,
      -249        },
      -250    }
      -251
      -252    TRAVERSABLES = (exp.Select, exp.Union, exp.UDTF, exp.Subquery)
      -253
      -254    def __init__(self, schema=None, annotators=None, coerces_to=None):
      -255        self.schema = schema
      -256        self.annotators = annotators or self.ANNOTATORS
      -257        self.coerces_to = coerces_to or self.COERCES_TO
      -258
      -259    def annotate(self, expression):
      -260        if isinstance(expression, self.TRAVERSABLES):
      -261            for scope in traverse_scope(expression):
      -262                selects = {}
      -263                for name, source in scope.sources.items():
      -264                    if not isinstance(source, Scope):
      -265                        continue
      -266                    if isinstance(source.expression, exp.UDTF):
      -267                        values = []
      -268
      -269                        if isinstance(source.expression, exp.Lateral):
      -270                            if isinstance(source.expression.this, exp.Explode):
      -271                                values = [source.expression.this.this]
      -272                        else:
      -273                            values = source.expression.expressions[0].expressions
      -274
      -275                        if not values:
      -276                            continue
      -277
      -278                        selects[name] = {
      -279                            alias: column
      -280                            for alias, column in zip(
      -281                                source.expression.alias_column_names,
      -282                                values,
      -283                            )
      -284                        }
      -285                    else:
      -286                        selects[name] = {
      -287                            select.alias_or_name: select for select in source.expression.selects
      -288                        }
      -289                # First annotate the current scope's column references
      -290                for col in scope.columns:
      -291                    if not col.table:
      -292                        continue
      -293
      -294                    source = scope.sources.get(col.table)
      -295                    if isinstance(source, exp.Table):
      -296                        col.type = self.schema.get_column_type(source, col)
      -297                    elif source and col.table in selects and col.name in selects[col.table]:
      -298                        col.type = selects[col.table][col.name].type
      -299                # Then (possibly) annotate the remaining expressions in the scope
      -300                self._maybe_annotate(scope.expression)
      -301        return self._maybe_annotate(expression)  # This takes care of non-traversable expressions
      -302
      -303    def _maybe_annotate(self, expression):
      -304        if expression.type:
      -305            return expression  # We've already inferred the expression's type
      -306
      -307        annotator = self.annotators.get(expression.__class__)
      -308
      -309        return (
      -310            annotator(self, expression)
      -311            if annotator
      -312            else self._annotate_with_type(expression, exp.DataType.Type.UNKNOWN)
      -313        )
      +                        
        1from __future__ import annotations
      +  2
      +  3import typing as t
      +  4
      +  5from sqlglot import exp
      +  6from sqlglot._typing import E
      +  7from sqlglot.helper import ensure_list, subclasses
      +  8from sqlglot.optimizer.scope import Scope, traverse_scope
      +  9from sqlglot.schema import Schema, ensure_schema
      + 10
      + 11if t.TYPE_CHECKING:
      + 12    B = t.TypeVar("B", bound=exp.Binary)
      + 13
      + 14
      + 15def annotate_types(
      + 16    expression: E,
      + 17    schema: t.Optional[t.Dict | Schema] = None,
      + 18    annotators: t.Optional[t.Dict[t.Type[E], t.Callable[[TypeAnnotator, E], E]]] = None,
      + 19    coerces_to: t.Optional[t.Dict[exp.DataType.Type, t.Set[exp.DataType.Type]]] = None,
      + 20) -> E:
      + 21    """
      + 22    Infers the types of an expression, annotating its AST accordingly.
      + 23
      + 24    Example:
      + 25        >>> import sqlglot
      + 26        >>> schema = {"y": {"cola": "SMALLINT"}}
      + 27        >>> sql = "SELECT x.cola + 2.5 AS cola FROM (SELECT y.cola AS cola FROM y AS y) AS x"
      + 28        >>> annotated_expr = annotate_types(sqlglot.parse_one(sql), schema=schema)
      + 29        >>> annotated_expr.expressions[0].type.this  # Get the type of "x.cola + 2.5 AS cola"
      + 30        <Type.DOUBLE: 'DOUBLE'>
      + 31
      + 32    Args:
      + 33        expression: Expression to annotate.
      + 34        schema: Database schema.
      + 35        annotators: Maps expression type to corresponding annotation function.
      + 36        coerces_to: Maps expression type to set of types that it can be coerced into.
      + 37
      + 38    Returns:
      + 39        The expression annotated with types.
      + 40    """
      + 41
      + 42    schema = ensure_schema(schema)
      + 43
      + 44    return TypeAnnotator(schema, annotators, coerces_to).annotate(expression)
      + 45
      + 46
      + 47def _annotate_with_type_lambda(data_type: exp.DataType.Type) -> t.Callable[[TypeAnnotator, E], E]:
      + 48    return lambda self, e: self._annotate_with_type(e, data_type)
      + 49
      + 50
      + 51class _TypeAnnotator(type):
      + 52    def __new__(cls, clsname, bases, attrs):
      + 53        klass = super().__new__(cls, clsname, bases, attrs)
      + 54
      + 55        # Highest-to-lowest type precedence, as specified in Spark's docs (ANSI):
      + 56        # https://spark.apache.org/docs/3.2.0/sql-ref-ansi-compliance.html
      + 57        text_precedence = (
      + 58            exp.DataType.Type.TEXT,
      + 59            exp.DataType.Type.NVARCHAR,
      + 60            exp.DataType.Type.VARCHAR,
      + 61            exp.DataType.Type.NCHAR,
      + 62            exp.DataType.Type.CHAR,
      + 63        )
      + 64        numeric_precedence = (
      + 65            exp.DataType.Type.DOUBLE,
      + 66            exp.DataType.Type.FLOAT,
      + 67            exp.DataType.Type.DECIMAL,
      + 68            exp.DataType.Type.BIGINT,
      + 69            exp.DataType.Type.INT,
      + 70            exp.DataType.Type.SMALLINT,
      + 71            exp.DataType.Type.TINYINT,
      + 72        )
      + 73        timelike_precedence = (
      + 74            exp.DataType.Type.TIMESTAMPLTZ,
      + 75            exp.DataType.Type.TIMESTAMPTZ,
      + 76            exp.DataType.Type.TIMESTAMP,
      + 77            exp.DataType.Type.DATETIME,
      + 78            exp.DataType.Type.DATE,
      + 79        )
      + 80
      + 81        for type_precedence in (text_precedence, numeric_precedence, timelike_precedence):
      + 82            coerces_to = set()
      + 83            for data_type in type_precedence:
      + 84                klass.COERCES_TO[data_type] = coerces_to.copy()
      + 85                coerces_to |= {data_type}
      + 86
      + 87        return klass
      + 88
      + 89
      + 90class TypeAnnotator(metaclass=_TypeAnnotator):
      + 91    TYPE_TO_EXPRESSIONS: t.Dict[exp.DataType.Type, t.Set[t.Type[exp.Expression]]] = {
      + 92        exp.DataType.Type.BIGINT: {
      + 93            exp.ApproxDistinct,
      + 94            exp.ArraySize,
      + 95            exp.Count,
      + 96            exp.Length,
      + 97        },
      + 98        exp.DataType.Type.BOOLEAN: {
      + 99            exp.Between,
      +100            exp.Boolean,
      +101            exp.In,
      +102            exp.RegexpLike,
      +103        },
      +104        exp.DataType.Type.DATE: {
      +105            exp.CurrentDate,
      +106            exp.Date,
      +107            exp.DateAdd,
      +108            exp.DateStrToDate,
      +109            exp.DateSub,
      +110            exp.DateTrunc,
      +111            exp.DiToDate,
      +112            exp.StrToDate,
      +113            exp.TimeStrToDate,
      +114            exp.TsOrDsToDate,
      +115        },
      +116        exp.DataType.Type.DATETIME: {
      +117            exp.CurrentDatetime,
      +118            exp.DatetimeAdd,
      +119            exp.DatetimeSub,
      +120        },
      +121        exp.DataType.Type.DOUBLE: {
      +122            exp.ApproxQuantile,
      +123            exp.Avg,
      +124            exp.Exp,
      +125            exp.Ln,
      +126            exp.Log,
      +127            exp.Log2,
      +128            exp.Log10,
      +129            exp.Pow,
      +130            exp.Quantile,
      +131            exp.Round,
      +132            exp.SafeDivide,
      +133            exp.Sqrt,
      +134            exp.Stddev,
      +135            exp.StddevPop,
      +136            exp.StddevSamp,
      +137            exp.Variance,
      +138            exp.VariancePop,
      +139        },
      +140        exp.DataType.Type.INT: {
      +141            exp.Ceil,
      +142            exp.DateDiff,
      +143            exp.DatetimeDiff,
      +144            exp.Extract,
      +145            exp.TimestampDiff,
      +146            exp.TimeDiff,
      +147            exp.DateToDi,
      +148            exp.Floor,
      +149            exp.Levenshtein,
      +150            exp.StrPosition,
      +151            exp.TsOrDiToDi,
      +152        },
      +153        exp.DataType.Type.TIMESTAMP: {
      +154            exp.CurrentTime,
      +155            exp.CurrentTimestamp,
      +156            exp.StrToTime,
      +157            exp.TimeAdd,
      +158            exp.TimeStrToTime,
      +159            exp.TimeSub,
      +160            exp.TimestampAdd,
      +161            exp.TimestampSub,
      +162            exp.UnixToTime,
      +163        },
      +164        exp.DataType.Type.TINYINT: {
      +165            exp.Day,
      +166            exp.Month,
      +167            exp.Week,
      +168            exp.Year,
      +169        },
      +170        exp.DataType.Type.VARCHAR: {
      +171            exp.ArrayConcat,
      +172            exp.Concat,
      +173            exp.ConcatWs,
      +174            exp.DateToDateStr,
      +175            exp.GroupConcat,
      +176            exp.Initcap,
      +177            exp.Lower,
      +178            exp.SafeConcat,
      +179            exp.Substring,
      +180            exp.TimeToStr,
      +181            exp.TimeToTimeStr,
      +182            exp.Trim,
      +183            exp.TsOrDsToDateStr,
      +184            exp.UnixToStr,
      +185            exp.UnixToTimeStr,
      +186            exp.Upper,
      +187        },
      +188    }
      +189
      +190    ANNOTATORS = {
      +191        **{
      +192            expr_type: lambda self, e: self._annotate_unary(e)
      +193            for expr_type in subclasses(exp.__name__, (exp.Unary, exp.Alias))
      +194        },
      +195        **{
      +196            expr_type: lambda self, e: self._annotate_binary(e)
      +197            for expr_type in subclasses(exp.__name__, exp.Binary)
      +198        },
      +199        **{
      +200            expr_type: _annotate_with_type_lambda(data_type)
      +201            for data_type, expressions in TYPE_TO_EXPRESSIONS.items()
      +202            for expr_type in expressions
      +203        },
      +204        exp.Anonymous: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.UNKNOWN),
      +205        exp.Cast: lambda self, e: self._annotate_with_type(e, e.args["to"]),
      +206        exp.Case: lambda self, e: self._annotate_by_args(e, "default", "ifs"),
      +207        exp.Coalesce: lambda self, e: self._annotate_by_args(e, "this", "expressions"),
      +208        exp.DataType: lambda self, e: self._annotate_with_type(e, e.copy()),
      +209        exp.If: lambda self, e: self._annotate_by_args(e, "true", "false"),
      +210        exp.Interval: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.INTERVAL),
      +211        exp.Least: lambda self, e: self._annotate_by_args(e, "expressions"),
      +212        exp.Literal: lambda self, e: self._annotate_literal(e),
      +213        exp.Map: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.MAP),
      +214        exp.Max: lambda self, e: self._annotate_by_args(e, "this", "expressions"),
      +215        exp.Min: lambda self, e: self._annotate_by_args(e, "this", "expressions"),
      +216        exp.Null: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.NULL),
      +217        exp.Sum: lambda self, e: self._annotate_by_args(e, "this", "expressions", promote=True),
      +218        exp.TryCast: lambda self, e: self._annotate_with_type(e, e.args["to"]),
      +219        exp.VarMap: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.MAP),
      +220    }
      +221
      +222    # Specifies what types a given type can be coerced into (autofilled)
      +223    COERCES_TO: t.Dict[exp.DataType.Type, t.Set[exp.DataType.Type]] = {}
      +224
      +225    def __init__(
      +226        self,
      +227        schema: Schema,
      +228        annotators: t.Optional[t.Dict[t.Type[E], t.Callable[[TypeAnnotator, E], E]]] = None,
      +229        coerces_to: t.Optional[t.Dict[exp.DataType.Type, t.Set[exp.DataType.Type]]] = None,
      +230    ) -> None:
      +231        self.schema = schema
      +232        self.annotators = annotators or self.ANNOTATORS
      +233        self.coerces_to = coerces_to or self.COERCES_TO
      +234
      +235    def annotate(self, expression: E) -> E:
      +236        for scope in traverse_scope(expression):
      +237            selects = {}
      +238            for name, source in scope.sources.items():
      +239                if not isinstance(source, Scope):
      +240                    continue
      +241                if isinstance(source.expression, exp.UDTF):
      +242                    values = []
      +243
      +244                    if isinstance(source.expression, exp.Lateral):
      +245                        if isinstance(source.expression.this, exp.Explode):
      +246                            values = [source.expression.this.this]
      +247                    else:
      +248                        values = source.expression.expressions[0].expressions
      +249
      +250                    if not values:
      +251                        continue
      +252
      +253                    selects[name] = {
      +254                        alias: column
      +255                        for alias, column in zip(
      +256                            source.expression.alias_column_names,
      +257                            values,
      +258                        )
      +259                    }
      +260                else:
      +261                    selects[name] = {
      +262                        select.alias_or_name: select for select in source.expression.selects
      +263                    }
      +264
      +265            # First annotate the current scope's column references
      +266            for col in scope.columns:
      +267                if not col.table:
      +268                    continue
      +269
      +270                source = scope.sources.get(col.table)
      +271                if isinstance(source, exp.Table):
      +272                    col.type = self.schema.get_column_type(source, col)
      +273                elif source and col.table in selects and col.name in selects[col.table]:
      +274                    col.type = selects[col.table][col.name].type
      +275
      +276            # Then (possibly) annotate the remaining expressions in the scope
      +277            self._maybe_annotate(scope.expression)
      +278
      +279        return self._maybe_annotate(expression)  # This takes care of non-traversable expressions
      +280
      +281    def _maybe_annotate(self, expression: E) -> E:
      +282        if expression.type:
      +283            return expression  # We've already inferred the expression's type
      +284
      +285        annotator = self.annotators.get(expression.__class__)
      +286
      +287        return (
      +288            annotator(self, expression)
      +289            if annotator
      +290            else self._annotate_with_type(expression, exp.DataType.Type.UNKNOWN)
      +291        )
      +292
      +293    def _annotate_args(self, expression: E) -> E:
      +294        for _, value in expression.iter_expressions():
      +295            self._maybe_annotate(value)
      +296
      +297        return expression
      +298
      +299    def _maybe_coerce(
      +300        self, type1: exp.DataType | exp.DataType.Type, type2: exp.DataType | exp.DataType.Type
      +301    ) -> exp.DataType.Type:
      +302        # We propagate the NULL / UNKNOWN types upwards if found
      +303        if isinstance(type1, exp.DataType):
      +304            type1 = type1.this
      +305        if isinstance(type2, exp.DataType):
      +306            type2 = type2.this
      +307
      +308        if exp.DataType.Type.NULL in (type1, type2):
      +309            return exp.DataType.Type.NULL
      +310        if exp.DataType.Type.UNKNOWN in (type1, type2):
      +311            return exp.DataType.Type.UNKNOWN
      +312
      +313        return type2 if type2 in self.coerces_to.get(type1, {}) else type1  # type: ignore
       314
      -315    def _annotate_args(self, expression):
      -316        for _, value in expression.iter_expressions():
      -317            self._maybe_annotate(value)
      +315    # Note: the following "no_type_check" decorators were added because mypy was yelling due
      +316    # to assigning Type values to expression.type (since its getter returns Optional[DataType]).
      +317    # This is a known mypy issue: https://github.com/python/mypy/issues/3004
       318
      -319        return expression
      -320
      -321    def _maybe_coerce(self, type1, type2):
      -322        # We propagate the NULL / UNKNOWN types upwards if found
      -323        if isinstance(type1, exp.DataType):
      -324            type1 = type1.this
      -325        if isinstance(type2, exp.DataType):
      -326            type2 = type2.this
      -327
      -328        if exp.DataType.Type.NULL in (type1, type2):
      -329            return exp.DataType.Type.NULL
      -330        if exp.DataType.Type.UNKNOWN in (type1, type2):
      -331            return exp.DataType.Type.UNKNOWN
      -332
      -333        return type2 if type2 in self.coerces_to.get(type1, {}) else type1
      -334
      -335    def _annotate_binary(self, expression):
      -336        self._annotate_args(expression)
      -337
      -338        left_type = expression.left.type.this
      -339        right_type = expression.right.type.this
      -340
      -341        if isinstance(expression, exp.Connector):
      -342            if left_type == exp.DataType.Type.NULL and right_type == exp.DataType.Type.NULL:
      -343                expression.type = exp.DataType.Type.NULL
      -344            elif exp.DataType.Type.NULL in (left_type, right_type):
      -345                expression.type = exp.DataType.build(
      -346                    "NULLABLE", expressions=exp.DataType.build("BOOLEAN")
      -347                )
      -348            else:
      -349                expression.type = exp.DataType.Type.BOOLEAN
      -350        elif isinstance(expression, exp.Predicate):
      -351            expression.type = exp.DataType.Type.BOOLEAN
      -352        else:
      -353            expression.type = self._maybe_coerce(left_type, right_type)
      -354
      -355        return expression
      -356
      -357    def _annotate_unary(self, expression):
      -358        self._annotate_args(expression)
      -359
      -360        if isinstance(expression, exp.Condition) and not isinstance(expression, exp.Paren):
      -361            expression.type = exp.DataType.Type.BOOLEAN
      -362        else:
      -363            expression.type = expression.this.type
      -364
      -365        return expression
      -366
      -367    def _annotate_literal(self, expression):
      -368        if expression.is_string:
      -369            expression.type = exp.DataType.Type.VARCHAR
      -370        elif expression.is_int:
      -371            expression.type = exp.DataType.Type.INT
      -372        else:
      -373            expression.type = exp.DataType.Type.DOUBLE
      -374
      -375        return expression
      -376
      -377    def _annotate_with_type(self, expression, target_type):
      -378        expression.type = target_type
      -379        return self._annotate_args(expression)
      -380
      -381    def _annotate_by_args(self, expression, *args, promote=False):
      -382        self._annotate_args(expression)
      -383        expressions = []
      -384        for arg in args:
      -385            arg_expr = expression.args.get(arg)
      -386            expressions.extend(expr for expr in ensure_list(arg_expr) if expr)
      -387
      -388        last_datatype = None
      -389        for expr in expressions:
      -390            last_datatype = self._maybe_coerce(last_datatype or expr.type, expr.type)
      -391
      -392        expression.type = last_datatype or exp.DataType.Type.UNKNOWN
      -393
      -394        if promote:
      -395            if expression.type.this in exp.DataType.INTEGER_TYPES:
      -396                expression.type = exp.DataType.Type.BIGINT
      -397            elif expression.type.this in exp.DataType.FLOAT_TYPES:
      -398                expression.type = exp.DataType.Type.DOUBLE
      -399
      -400        return expression
      +319    @t.no_type_check
      +320    def _annotate_binary(self, expression: B) -> B:
      +321        self._annotate_args(expression)
      +322
      +323        left_type = expression.left.type.this
      +324        right_type = expression.right.type.this
      +325
      +326        if isinstance(expression, exp.Connector):
      +327            if left_type == exp.DataType.Type.NULL and right_type == exp.DataType.Type.NULL:
      +328                expression.type = exp.DataType.Type.NULL
      +329            elif exp.DataType.Type.NULL in (left_type, right_type):
      +330                expression.type = exp.DataType.build(
      +331                    "NULLABLE", expressions=exp.DataType.build("BOOLEAN")
      +332                )
      +333            else:
      +334                expression.type = exp.DataType.Type.BOOLEAN
      +335        elif isinstance(expression, exp.Predicate):
      +336            expression.type = exp.DataType.Type.BOOLEAN
      +337        else:
      +338            expression.type = self._maybe_coerce(left_type, right_type)
      +339
      +340        return expression
      +341
      +342    @t.no_type_check
      +343    def _annotate_unary(self, expression: E) -> E:
      +344        self._annotate_args(expression)
      +345
      +346        if isinstance(expression, exp.Condition) and not isinstance(expression, exp.Paren):
      +347            expression.type = exp.DataType.Type.BOOLEAN
      +348        else:
      +349            expression.type = expression.this.type
      +350
      +351        return expression
      +352
      +353    @t.no_type_check
      +354    def _annotate_literal(self, expression: exp.Literal) -> exp.Literal:
      +355        if expression.is_string:
      +356            expression.type = exp.DataType.Type.VARCHAR
      +357        elif expression.is_int:
      +358            expression.type = exp.DataType.Type.INT
      +359        else:
      +360            expression.type = exp.DataType.Type.DOUBLE
      +361
      +362        return expression
      +363
      +364    @t.no_type_check
      +365    def _annotate_with_type(self, expression: E, target_type: exp.DataType.Type) -> E:
      +366        expression.type = target_type
      +367        return self._annotate_args(expression)
      +368
      +369    @t.no_type_check
      +370    def _annotate_by_args(self, expression: E, *args: str, promote: bool = False) -> E:
      +371        self._annotate_args(expression)
      +372
      +373        expressions: t.List[exp.Expression] = []
      +374        for arg in args:
      +375            arg_expr = expression.args.get(arg)
      +376            expressions.extend(expr for expr in ensure_list(arg_expr) if expr)
      +377
      +378        last_datatype = None
      +379        for expr in expressions:
      +380            last_datatype = self._maybe_coerce(last_datatype or expr.type, expr.type)
      +381
      +382        expression.type = last_datatype or exp.DataType.Type.UNKNOWN
      +383
      +384        if promote:
      +385            if expression.type.this in exp.DataType.INTEGER_TYPES:
      +386                expression.type = exp.DataType.Type.BIGINT
      +387            elif expression.type.this in exp.DataType.FLOAT_TYPES:
      +388                expression.type = exp.DataType.Type.DOUBLE
      +389
      +390        return expression
       
      @@ -477,42 +467,46 @@
      def - annotate_types(expression, schema=None, annotators=None, coerces_to=None): + annotate_types( expression: ~E, schema: Union[Dict, sqlglot.schema.Schema, NoneType] = None, annotators: Optional[Dict[Type[~E], Callable[[sqlglot.optimizer.annotate_types.TypeAnnotator, ~E], ~E]]] = None, coerces_to: Optional[Dict[sqlglot.expressions.DataType.Type, Set[sqlglot.expressions.DataType.Type]]] = None) -> ~E:
      -
       8def annotate_types(expression, schema=None, annotators=None, coerces_to=None):
      - 9    """
      -10    Recursively infer & annotate types in an expression syntax tree against a schema.
      -11    Assumes that we've already executed the optimizer's qualify_columns step.
      -12
      -13    Example:
      -14        >>> import sqlglot
      -15        >>> schema = {"y": {"cola": "SMALLINT"}}
      -16        >>> sql = "SELECT x.cola + 2.5 AS cola FROM (SELECT y.cola AS cola FROM y AS y) AS x"
      -17        >>> annotated_expr = annotate_types(sqlglot.parse_one(sql), schema=schema)
      -18        >>> annotated_expr.expressions[0].type.this  # Get the type of "x.cola + 2.5 AS cola"
      -19        <Type.DOUBLE: 'DOUBLE'>
      -20
      -21    Args:
      -22        expression (sqlglot.Expression): Expression to annotate.
      -23        schema (dict|sqlglot.optimizer.Schema): Database schema.
      -24        annotators (dict): Maps expression type to corresponding annotation function.
      -25        coerces_to (dict): Maps expression type to set of types that it can be coerced into.
      -26    Returns:
      -27        sqlglot.Expression: expression annotated with types
      -28    """
      -29
      -30    schema = ensure_schema(schema)
      -31
      -32    return TypeAnnotator(schema, annotators, coerces_to).annotate(expression)
      +            
      16def annotate_types(
      +17    expression: E,
      +18    schema: t.Optional[t.Dict | Schema] = None,
      +19    annotators: t.Optional[t.Dict[t.Type[E], t.Callable[[TypeAnnotator, E], E]]] = None,
      +20    coerces_to: t.Optional[t.Dict[exp.DataType.Type, t.Set[exp.DataType.Type]]] = None,
      +21) -> E:
      +22    """
      +23    Infers the types of an expression, annotating its AST accordingly.
      +24
      +25    Example:
      +26        >>> import sqlglot
      +27        >>> schema = {"y": {"cola": "SMALLINT"}}
      +28        >>> sql = "SELECT x.cola + 2.5 AS cola FROM (SELECT y.cola AS cola FROM y AS y) AS x"
      +29        >>> annotated_expr = annotate_types(sqlglot.parse_one(sql), schema=schema)
      +30        >>> annotated_expr.expressions[0].type.this  # Get the type of "x.cola + 2.5 AS cola"
      +31        <Type.DOUBLE: 'DOUBLE'>
      +32
      +33    Args:
      +34        expression: Expression to annotate.
      +35        schema: Database schema.
      +36        annotators: Maps expression type to corresponding annotation function.
      +37        coerces_to: Maps expression type to set of types that it can be coerced into.
      +38
      +39    Returns:
      +40        The expression annotated with types.
      +41    """
      +42
      +43    schema = ensure_schema(schema)
      +44
      +45    return TypeAnnotator(schema, annotators, coerces_to).annotate(expression)
       
      -

      Recursively infer & annotate types in an expression syntax tree against a schema. -Assumes that we've already executed the optimizer's qualify_columns step.

      +

      Infers the types of an expression, annotating its AST accordingly.

      Example:
      @@ -531,16 +525,16 @@ Assumes that we've already executed the optimizer's qualify_columns step.

      Arguments:
        -
      • expression (sqlglot.Expression): Expression to annotate.
      • -
      • schema (dict|sqlglot.optimizer.Schema): Database schema.
      • -
      • annotators (dict): Maps expression type to corresponding annotation function.
      • -
      • coerces_to (dict): Maps expression type to set of types that it can be coerced into.
      • +
      • expression: Expression to annotate.
      • +
      • schema: Database schema.
      • +
      • annotators: Maps expression type to corresponding annotation function.
      • +
      • coerces_to: Maps expression type to set of types that it can be coerced into.
      Returns:
      -

      sqlglot.Expression: expression annotated with types

      +

      The expression annotated with types.

      @@ -557,373 +551,307 @@ Assumes that we've already executed the optimizer's qualify_columns step.

      -
       35class TypeAnnotator:
      - 36    ANNOTATORS = {
      - 37        **{
      - 38            expr_type: lambda self, expr: self._annotate_unary(expr)
      - 39            for expr_type in subclasses(exp.__name__, exp.Unary)
      - 40        },
      - 41        **{
      - 42            expr_type: lambda self, expr: self._annotate_binary(expr)
      - 43            for expr_type in subclasses(exp.__name__, exp.Binary)
      - 44        },
      - 45        exp.Cast: lambda self, expr: self._annotate_with_type(expr, expr.args["to"]),
      - 46        exp.TryCast: lambda self, expr: self._annotate_with_type(expr, expr.args["to"]),
      - 47        exp.DataType: lambda self, expr: self._annotate_with_type(expr, expr.copy()),
      - 48        exp.Alias: lambda self, expr: self._annotate_unary(expr),
      - 49        exp.Between: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BOOLEAN),
      - 50        exp.In: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BOOLEAN),
      - 51        exp.Literal: lambda self, expr: self._annotate_literal(expr),
      - 52        exp.Boolean: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BOOLEAN),
      - 53        exp.Null: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.NULL),
      - 54        exp.Anonymous: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.UNKNOWN),
      - 55        exp.ApproxDistinct: lambda self, expr: self._annotate_with_type(
      - 56            expr, exp.DataType.Type.BIGINT
      - 57        ),
      - 58        exp.Avg: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      - 59        exp.Min: lambda self, expr: self._annotate_by_args(expr, "this", "expressions"),
      - 60        exp.Max: lambda self, expr: self._annotate_by_args(expr, "this", "expressions"),
      - 61        exp.Sum: lambda self, expr: self._annotate_by_args(
      - 62            expr, "this", "expressions", promote=True
      - 63        ),
      - 64        exp.Ceil: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
      - 65        exp.Count: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BIGINT),
      - 66        exp.CurrentDate: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE),
      - 67        exp.CurrentDatetime: lambda self, expr: self._annotate_with_type(
      - 68            expr, exp.DataType.Type.DATETIME
      - 69        ),
      - 70        exp.CurrentTime: lambda self, expr: self._annotate_with_type(
      - 71            expr, exp.DataType.Type.TIMESTAMP
      - 72        ),
      - 73        exp.CurrentTimestamp: lambda self, expr: self._annotate_with_type(
      - 74            expr, exp.DataType.Type.TIMESTAMP
      - 75        ),
      - 76        exp.DateAdd: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE),
      - 77        exp.DateSub: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE),
      - 78        exp.DateDiff: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
      - 79        exp.DatetimeAdd: lambda self, expr: self._annotate_with_type(
      - 80            expr, exp.DataType.Type.DATETIME
      - 81        ),
      - 82        exp.DatetimeSub: lambda self, expr: self._annotate_with_type(
      - 83            expr, exp.DataType.Type.DATETIME
      - 84        ),
      - 85        exp.DatetimeDiff: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
      - 86        exp.Extract: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
      - 87        exp.TimestampAdd: lambda self, expr: self._annotate_with_type(
      - 88            expr, exp.DataType.Type.TIMESTAMP
      - 89        ),
      - 90        exp.TimestampSub: lambda self, expr: self._annotate_with_type(
      - 91            expr, exp.DataType.Type.TIMESTAMP
      - 92        ),
      - 93        exp.TimestampDiff: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
      - 94        exp.TimeAdd: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TIMESTAMP),
      - 95        exp.TimeSub: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TIMESTAMP),
      - 96        exp.TimeDiff: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
      - 97        exp.DateStrToDate: lambda self, expr: self._annotate_with_type(
      - 98            expr, exp.DataType.Type.DATE
      - 99        ),
      -100        exp.DateToDateStr: lambda self, expr: self._annotate_with_type(
      -101            expr, exp.DataType.Type.VARCHAR
      -102        ),
      -103        exp.DateToDi: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
      -104        exp.Day: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TINYINT),
      -105        exp.DiToDate: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE),
      -106        exp.Exp: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -107        exp.Floor: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
      -108        exp.Case: lambda self, expr: self._annotate_by_args(expr, "default", "ifs"),
      -109        exp.If: lambda self, expr: self._annotate_by_args(expr, "true", "false"),
      -110        exp.Coalesce: lambda self, expr: self._annotate_by_args(expr, "this", "expressions"),
      -111        exp.IfNull: lambda self, expr: self._annotate_by_args(expr, "this", "expression"),
      -112        exp.Concat: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
      -113        exp.ConcatWs: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
      -114        exp.GroupConcat: lambda self, expr: self._annotate_with_type(
      -115            expr, exp.DataType.Type.VARCHAR
      -116        ),
      -117        exp.ArrayConcat: lambda self, expr: self._annotate_with_type(
      -118            expr, exp.DataType.Type.VARCHAR
      -119        ),
      -120        exp.ArraySize: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BIGINT),
      -121        exp.Map: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.MAP),
      -122        exp.VarMap: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.MAP),
      -123        exp.Initcap: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
      -124        exp.Interval: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INTERVAL),
      -125        exp.Least: lambda self, expr: self._annotate_by_args(expr, "expressions"),
      -126        exp.Length: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BIGINT),
      -127        exp.Levenshtein: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
      -128        exp.Ln: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -129        exp.Log: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -130        exp.Log2: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -131        exp.Log10: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -132        exp.Lower: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
      -133        exp.Month: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TINYINT),
      -134        exp.Pow: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -135        exp.Quantile: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -136        exp.ApproxQuantile: lambda self, expr: self._annotate_with_type(
      -137            expr, exp.DataType.Type.DOUBLE
      -138        ),
      -139        exp.RegexpLike: lambda self, expr: self._annotate_with_type(
      -140            expr, exp.DataType.Type.BOOLEAN
      -141        ),
      -142        exp.Round: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -143        exp.SafeDivide: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -144        exp.Substring: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
      -145        exp.StrPosition: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
      -146        exp.StrToDate: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE),
      -147        exp.StrToTime: lambda self, expr: self._annotate_with_type(
      -148            expr, exp.DataType.Type.TIMESTAMP
      -149        ),
      -150        exp.Sqrt: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -151        exp.Stddev: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -152        exp.StddevPop: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -153        exp.StddevSamp: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -154        exp.TimeToStr: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
      -155        exp.TimeToTimeStr: lambda self, expr: self._annotate_with_type(
      -156            expr, exp.DataType.Type.VARCHAR
      -157        ),
      -158        exp.TimeStrToDate: lambda self, expr: self._annotate_with_type(
      -159            expr, exp.DataType.Type.DATE
      -160        ),
      -161        exp.TimeStrToTime: lambda self, expr: self._annotate_with_type(
      -162            expr, exp.DataType.Type.TIMESTAMP
      -163        ),
      -164        exp.Trim: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
      -165        exp.TsOrDsToDateStr: lambda self, expr: self._annotate_with_type(
      -166            expr, exp.DataType.Type.VARCHAR
      -167        ),
      -168        exp.TsOrDsToDate: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE),
      -169        exp.TsOrDiToDi: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
      -170        exp.UnixToStr: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
      -171        exp.UnixToTime: lambda self, expr: self._annotate_with_type(
      -172            expr, exp.DataType.Type.TIMESTAMP
      -173        ),
      -174        exp.UnixToTimeStr: lambda self, expr: self._annotate_with_type(
      -175            expr, exp.DataType.Type.VARCHAR
      -176        ),
      -177        exp.Upper: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
      -178        exp.Variance: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
      -179        exp.VariancePop: lambda self, expr: self._annotate_with_type(
      -180            expr, exp.DataType.Type.DOUBLE
      -181        ),
      -182        exp.Week: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TINYINT),
      -183        exp.Year: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TINYINT),
      -184    }
      -185
      -186    # Reference: https://spark.apache.org/docs/3.2.0/sql-ref-ansi-compliance.html
      -187    COERCES_TO = {
      -188        # CHAR < NCHAR < VARCHAR < NVARCHAR < TEXT
      -189        exp.DataType.Type.TEXT: set(),
      -190        exp.DataType.Type.NVARCHAR: {exp.DataType.Type.TEXT},
      -191        exp.DataType.Type.VARCHAR: {exp.DataType.Type.NVARCHAR, exp.DataType.Type.TEXT},
      -192        exp.DataType.Type.NCHAR: {
      -193            exp.DataType.Type.VARCHAR,
      -194            exp.DataType.Type.NVARCHAR,
      -195            exp.DataType.Type.TEXT,
      -196        },
      -197        exp.DataType.Type.CHAR: {
      -198            exp.DataType.Type.NCHAR,
      -199            exp.DataType.Type.VARCHAR,
      -200            exp.DataType.Type.NVARCHAR,
      -201            exp.DataType.Type.TEXT,
      -202        },
      -203        # TINYINT < SMALLINT < INT < BIGINT < DECIMAL < FLOAT < DOUBLE
      -204        exp.DataType.Type.DOUBLE: set(),
      -205        exp.DataType.Type.FLOAT: {exp.DataType.Type.DOUBLE},
      -206        exp.DataType.Type.DECIMAL: {exp.DataType.Type.FLOAT, exp.DataType.Type.DOUBLE},
      -207        exp.DataType.Type.BIGINT: {
      -208            exp.DataType.Type.DECIMAL,
      -209            exp.DataType.Type.FLOAT,
      -210            exp.DataType.Type.DOUBLE,
      -211        },
      -212        exp.DataType.Type.INT: {
      -213            exp.DataType.Type.BIGINT,
      -214            exp.DataType.Type.DECIMAL,
      -215            exp.DataType.Type.FLOAT,
      -216            exp.DataType.Type.DOUBLE,
      -217        },
      -218        exp.DataType.Type.SMALLINT: {
      -219            exp.DataType.Type.INT,
      -220            exp.DataType.Type.BIGINT,
      -221            exp.DataType.Type.DECIMAL,
      -222            exp.DataType.Type.FLOAT,
      -223            exp.DataType.Type.DOUBLE,
      -224        },
      -225        exp.DataType.Type.TINYINT: {
      -226            exp.DataType.Type.SMALLINT,
      -227            exp.DataType.Type.INT,
      -228            exp.DataType.Type.BIGINT,
      -229            exp.DataType.Type.DECIMAL,
      -230            exp.DataType.Type.FLOAT,
      -231            exp.DataType.Type.DOUBLE,
      -232        },
      -233        # DATE < DATETIME < TIMESTAMP < TIMESTAMPTZ < TIMESTAMPLTZ
      -234        exp.DataType.Type.TIMESTAMPLTZ: set(),
      -235        exp.DataType.Type.TIMESTAMPTZ: {exp.DataType.Type.TIMESTAMPLTZ},
      -236        exp.DataType.Type.TIMESTAMP: {
      -237            exp.DataType.Type.TIMESTAMPTZ,
      -238            exp.DataType.Type.TIMESTAMPLTZ,
      -239        },
      -240        exp.DataType.Type.DATETIME: {
      -241            exp.DataType.Type.TIMESTAMP,
      -242            exp.DataType.Type.TIMESTAMPTZ,
      -243            exp.DataType.Type.TIMESTAMPLTZ,
      -244        },
      -245        exp.DataType.Type.DATE: {
      -246            exp.DataType.Type.DATETIME,
      -247            exp.DataType.Type.TIMESTAMP,
      -248            exp.DataType.Type.TIMESTAMPTZ,
      -249            exp.DataType.Type.TIMESTAMPLTZ,
      -250        },
      -251    }
      -252
      -253    TRAVERSABLES = (exp.Select, exp.Union, exp.UDTF, exp.Subquery)
      -254
      -255    def __init__(self, schema=None, annotators=None, coerces_to=None):
      -256        self.schema = schema
      -257        self.annotators = annotators or self.ANNOTATORS
      -258        self.coerces_to = coerces_to or self.COERCES_TO
      -259
      -260    def annotate(self, expression):
      -261        if isinstance(expression, self.TRAVERSABLES):
      -262            for scope in traverse_scope(expression):
      -263                selects = {}
      -264                for name, source in scope.sources.items():
      -265                    if not isinstance(source, Scope):
      -266                        continue
      -267                    if isinstance(source.expression, exp.UDTF):
      -268                        values = []
      -269
      -270                        if isinstance(source.expression, exp.Lateral):
      -271                            if isinstance(source.expression.this, exp.Explode):
      -272                                values = [source.expression.this.this]
      -273                        else:
      -274                            values = source.expression.expressions[0].expressions
      -275
      -276                        if not values:
      -277                            continue
      -278
      -279                        selects[name] = {
      -280                            alias: column
      -281                            for alias, column in zip(
      -282                                source.expression.alias_column_names,
      -283                                values,
      -284                            )
      -285                        }
      -286                    else:
      -287                        selects[name] = {
      -288                            select.alias_or_name: select for select in source.expression.selects
      -289                        }
      -290                # First annotate the current scope's column references
      -291                for col in scope.columns:
      -292                    if not col.table:
      -293                        continue
      -294
      -295                    source = scope.sources.get(col.table)
      -296                    if isinstance(source, exp.Table):
      -297                        col.type = self.schema.get_column_type(source, col)
      -298                    elif source and col.table in selects and col.name in selects[col.table]:
      -299                        col.type = selects[col.table][col.name].type
      -300                # Then (possibly) annotate the remaining expressions in the scope
      -301                self._maybe_annotate(scope.expression)
      -302        return self._maybe_annotate(expression)  # This takes care of non-traversable expressions
      -303
      -304    def _maybe_annotate(self, expression):
      -305        if expression.type:
      -306            return expression  # We've already inferred the expression's type
      -307
      -308        annotator = self.annotators.get(expression.__class__)
      -309
      -310        return (
      -311            annotator(self, expression)
      -312            if annotator
      -313            else self._annotate_with_type(expression, exp.DataType.Type.UNKNOWN)
      -314        )
      +            
       91class TypeAnnotator(metaclass=_TypeAnnotator):
      + 92    TYPE_TO_EXPRESSIONS: t.Dict[exp.DataType.Type, t.Set[t.Type[exp.Expression]]] = {
      + 93        exp.DataType.Type.BIGINT: {
      + 94            exp.ApproxDistinct,
      + 95            exp.ArraySize,
      + 96            exp.Count,
      + 97            exp.Length,
      + 98        },
      + 99        exp.DataType.Type.BOOLEAN: {
      +100            exp.Between,
      +101            exp.Boolean,
      +102            exp.In,
      +103            exp.RegexpLike,
      +104        },
      +105        exp.DataType.Type.DATE: {
      +106            exp.CurrentDate,
      +107            exp.Date,
      +108            exp.DateAdd,
      +109            exp.DateStrToDate,
      +110            exp.DateSub,
      +111            exp.DateTrunc,
      +112            exp.DiToDate,
      +113            exp.StrToDate,
      +114            exp.TimeStrToDate,
      +115            exp.TsOrDsToDate,
      +116        },
      +117        exp.DataType.Type.DATETIME: {
      +118            exp.CurrentDatetime,
      +119            exp.DatetimeAdd,
      +120            exp.DatetimeSub,
      +121        },
      +122        exp.DataType.Type.DOUBLE: {
      +123            exp.ApproxQuantile,
      +124            exp.Avg,
      +125            exp.Exp,
      +126            exp.Ln,
      +127            exp.Log,
      +128            exp.Log2,
      +129            exp.Log10,
      +130            exp.Pow,
      +131            exp.Quantile,
      +132            exp.Round,
      +133            exp.SafeDivide,
      +134            exp.Sqrt,
      +135            exp.Stddev,
      +136            exp.StddevPop,
      +137            exp.StddevSamp,
      +138            exp.Variance,
      +139            exp.VariancePop,
      +140        },
      +141        exp.DataType.Type.INT: {
      +142            exp.Ceil,
      +143            exp.DateDiff,
      +144            exp.DatetimeDiff,
      +145            exp.Extract,
      +146            exp.TimestampDiff,
      +147            exp.TimeDiff,
      +148            exp.DateToDi,
      +149            exp.Floor,
      +150            exp.Levenshtein,
      +151            exp.StrPosition,
      +152            exp.TsOrDiToDi,
      +153        },
      +154        exp.DataType.Type.TIMESTAMP: {
      +155            exp.CurrentTime,
      +156            exp.CurrentTimestamp,
      +157            exp.StrToTime,
      +158            exp.TimeAdd,
      +159            exp.TimeStrToTime,
      +160            exp.TimeSub,
      +161            exp.TimestampAdd,
      +162            exp.TimestampSub,
      +163            exp.UnixToTime,
      +164        },
      +165        exp.DataType.Type.TINYINT: {
      +166            exp.Day,
      +167            exp.Month,
      +168            exp.Week,
      +169            exp.Year,
      +170        },
      +171        exp.DataType.Type.VARCHAR: {
      +172            exp.ArrayConcat,
      +173            exp.Concat,
      +174            exp.ConcatWs,
      +175            exp.DateToDateStr,
      +176            exp.GroupConcat,
      +177            exp.Initcap,
      +178            exp.Lower,
      +179            exp.SafeConcat,
      +180            exp.Substring,
      +181            exp.TimeToStr,
      +182            exp.TimeToTimeStr,
      +183            exp.Trim,
      +184            exp.TsOrDsToDateStr,
      +185            exp.UnixToStr,
      +186            exp.UnixToTimeStr,
      +187            exp.Upper,
      +188        },
      +189    }
      +190
      +191    ANNOTATORS = {
      +192        **{
      +193            expr_type: lambda self, e: self._annotate_unary(e)
      +194            for expr_type in subclasses(exp.__name__, (exp.Unary, exp.Alias))
      +195        },
      +196        **{
      +197            expr_type: lambda self, e: self._annotate_binary(e)
      +198            for expr_type in subclasses(exp.__name__, exp.Binary)
      +199        },
      +200        **{
      +201            expr_type: _annotate_with_type_lambda(data_type)
      +202            for data_type, expressions in TYPE_TO_EXPRESSIONS.items()
      +203            for expr_type in expressions
      +204        },
      +205        exp.Anonymous: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.UNKNOWN),
      +206        exp.Cast: lambda self, e: self._annotate_with_type(e, e.args["to"]),
      +207        exp.Case: lambda self, e: self._annotate_by_args(e, "default", "ifs"),
      +208        exp.Coalesce: lambda self, e: self._annotate_by_args(e, "this", "expressions"),
      +209        exp.DataType: lambda self, e: self._annotate_with_type(e, e.copy()),
      +210        exp.If: lambda self, e: self._annotate_by_args(e, "true", "false"),
      +211        exp.Interval: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.INTERVAL),
      +212        exp.Least: lambda self, e: self._annotate_by_args(e, "expressions"),
      +213        exp.Literal: lambda self, e: self._annotate_literal(e),
      +214        exp.Map: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.MAP),
      +215        exp.Max: lambda self, e: self._annotate_by_args(e, "this", "expressions"),
      +216        exp.Min: lambda self, e: self._annotate_by_args(e, "this", "expressions"),
      +217        exp.Null: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.NULL),
      +218        exp.Sum: lambda self, e: self._annotate_by_args(e, "this", "expressions", promote=True),
      +219        exp.TryCast: lambda self, e: self._annotate_with_type(e, e.args["to"]),
      +220        exp.VarMap: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.MAP),
      +221    }
      +222
      +223    # Specifies what types a given type can be coerced into (autofilled)
      +224    COERCES_TO: t.Dict[exp.DataType.Type, t.Set[exp.DataType.Type]] = {}
      +225
      +226    def __init__(
      +227        self,
      +228        schema: Schema,
      +229        annotators: t.Optional[t.Dict[t.Type[E], t.Callable[[TypeAnnotator, E], E]]] = None,
      +230        coerces_to: t.Optional[t.Dict[exp.DataType.Type, t.Set[exp.DataType.Type]]] = None,
      +231    ) -> None:
      +232        self.schema = schema
      +233        self.annotators = annotators or self.ANNOTATORS
      +234        self.coerces_to = coerces_to or self.COERCES_TO
      +235
      +236    def annotate(self, expression: E) -> E:
      +237        for scope in traverse_scope(expression):
      +238            selects = {}
      +239            for name, source in scope.sources.items():
      +240                if not isinstance(source, Scope):
      +241                    continue
      +242                if isinstance(source.expression, exp.UDTF):
      +243                    values = []
      +244
      +245                    if isinstance(source.expression, exp.Lateral):
      +246                        if isinstance(source.expression.this, exp.Explode):
      +247                            values = [source.expression.this.this]
      +248                    else:
      +249                        values = source.expression.expressions[0].expressions
      +250
      +251                    if not values:
      +252                        continue
      +253
      +254                    selects[name] = {
      +255                        alias: column
      +256                        for alias, column in zip(
      +257                            source.expression.alias_column_names,
      +258                            values,
      +259                        )
      +260                    }
      +261                else:
      +262                    selects[name] = {
      +263                        select.alias_or_name: select for select in source.expression.selects
      +264                    }
      +265
      +266            # First annotate the current scope's column references
      +267            for col in scope.columns:
      +268                if not col.table:
      +269                    continue
      +270
      +271                source = scope.sources.get(col.table)
      +272                if isinstance(source, exp.Table):
      +273                    col.type = self.schema.get_column_type(source, col)
      +274                elif source and col.table in selects and col.name in selects[col.table]:
      +275                    col.type = selects[col.table][col.name].type
      +276
      +277            # Then (possibly) annotate the remaining expressions in the scope
      +278            self._maybe_annotate(scope.expression)
      +279
      +280        return self._maybe_annotate(expression)  # This takes care of non-traversable expressions
      +281
      +282    def _maybe_annotate(self, expression: E) -> E:
      +283        if expression.type:
      +284            return expression  # We've already inferred the expression's type
      +285
      +286        annotator = self.annotators.get(expression.__class__)
      +287
      +288        return (
      +289            annotator(self, expression)
      +290            if annotator
      +291            else self._annotate_with_type(expression, exp.DataType.Type.UNKNOWN)
      +292        )
      +293
      +294    def _annotate_args(self, expression: E) -> E:
      +295        for _, value in expression.iter_expressions():
      +296            self._maybe_annotate(value)
      +297
      +298        return expression
      +299
      +300    def _maybe_coerce(
      +301        self, type1: exp.DataType | exp.DataType.Type, type2: exp.DataType | exp.DataType.Type
      +302    ) -> exp.DataType.Type:
      +303        # We propagate the NULL / UNKNOWN types upwards if found
      +304        if isinstance(type1, exp.DataType):
      +305            type1 = type1.this
      +306        if isinstance(type2, exp.DataType):
      +307            type2 = type2.this
      +308
      +309        if exp.DataType.Type.NULL in (type1, type2):
      +310            return exp.DataType.Type.NULL
      +311        if exp.DataType.Type.UNKNOWN in (type1, type2):
      +312            return exp.DataType.Type.UNKNOWN
      +313
      +314        return type2 if type2 in self.coerces_to.get(type1, {}) else type1  # type: ignore
       315
      -316    def _annotate_args(self, expression):
      -317        for _, value in expression.iter_expressions():
      -318            self._maybe_annotate(value)
      +316    # Note: the following "no_type_check" decorators were added because mypy was yelling due
      +317    # to assigning Type values to expression.type (since its getter returns Optional[DataType]).
      +318    # This is a known mypy issue: https://github.com/python/mypy/issues/3004
       319
      -320        return expression
      -321
      -322    def _maybe_coerce(self, type1, type2):
      -323        # We propagate the NULL / UNKNOWN types upwards if found
      -324        if isinstance(type1, exp.DataType):
      -325            type1 = type1.this
      -326        if isinstance(type2, exp.DataType):
      -327            type2 = type2.this
      -328
      -329        if exp.DataType.Type.NULL in (type1, type2):
      -330            return exp.DataType.Type.NULL
      -331        if exp.DataType.Type.UNKNOWN in (type1, type2):
      -332            return exp.DataType.Type.UNKNOWN
      -333
      -334        return type2 if type2 in self.coerces_to.get(type1, {}) else type1
      -335
      -336    def _annotate_binary(self, expression):
      -337        self._annotate_args(expression)
      -338
      -339        left_type = expression.left.type.this
      -340        right_type = expression.right.type.this
      -341
      -342        if isinstance(expression, exp.Connector):
      -343            if left_type == exp.DataType.Type.NULL and right_type == exp.DataType.Type.NULL:
      -344                expression.type = exp.DataType.Type.NULL
      -345            elif exp.DataType.Type.NULL in (left_type, right_type):
      -346                expression.type = exp.DataType.build(
      -347                    "NULLABLE", expressions=exp.DataType.build("BOOLEAN")
      -348                )
      -349            else:
      -350                expression.type = exp.DataType.Type.BOOLEAN
      -351        elif isinstance(expression, exp.Predicate):
      -352            expression.type = exp.DataType.Type.BOOLEAN
      -353        else:
      -354            expression.type = self._maybe_coerce(left_type, right_type)
      -355
      -356        return expression
      -357
      -358    def _annotate_unary(self, expression):
      -359        self._annotate_args(expression)
      -360
      -361        if isinstance(expression, exp.Condition) and not isinstance(expression, exp.Paren):
      -362            expression.type = exp.DataType.Type.BOOLEAN
      -363        else:
      -364            expression.type = expression.this.type
      -365
      -366        return expression
      -367
      -368    def _annotate_literal(self, expression):
      -369        if expression.is_string:
      -370            expression.type = exp.DataType.Type.VARCHAR
      -371        elif expression.is_int:
      -372            expression.type = exp.DataType.Type.INT
      -373        else:
      -374            expression.type = exp.DataType.Type.DOUBLE
      -375
      -376        return expression
      -377
      -378    def _annotate_with_type(self, expression, target_type):
      -379        expression.type = target_type
      -380        return self._annotate_args(expression)
      -381
      -382    def _annotate_by_args(self, expression, *args, promote=False):
      -383        self._annotate_args(expression)
      -384        expressions = []
      -385        for arg in args:
      -386            arg_expr = expression.args.get(arg)
      -387            expressions.extend(expr for expr in ensure_list(arg_expr) if expr)
      -388
      -389        last_datatype = None
      -390        for expr in expressions:
      -391            last_datatype = self._maybe_coerce(last_datatype or expr.type, expr.type)
      -392
      -393        expression.type = last_datatype or exp.DataType.Type.UNKNOWN
      -394
      -395        if promote:
      -396            if expression.type.this in exp.DataType.INTEGER_TYPES:
      -397                expression.type = exp.DataType.Type.BIGINT
      -398            elif expression.type.this in exp.DataType.FLOAT_TYPES:
      -399                expression.type = exp.DataType.Type.DOUBLE
      -400
      -401        return expression
      +320    @t.no_type_check
      +321    def _annotate_binary(self, expression: B) -> B:
      +322        self._annotate_args(expression)
      +323
      +324        left_type = expression.left.type.this
      +325        right_type = expression.right.type.this
      +326
      +327        if isinstance(expression, exp.Connector):
      +328            if left_type == exp.DataType.Type.NULL and right_type == exp.DataType.Type.NULL:
      +329                expression.type = exp.DataType.Type.NULL
      +330            elif exp.DataType.Type.NULL in (left_type, right_type):
      +331                expression.type = exp.DataType.build(
      +332                    "NULLABLE", expressions=exp.DataType.build("BOOLEAN")
      +333                )
      +334            else:
      +335                expression.type = exp.DataType.Type.BOOLEAN
      +336        elif isinstance(expression, exp.Predicate):
      +337            expression.type = exp.DataType.Type.BOOLEAN
      +338        else:
      +339            expression.type = self._maybe_coerce(left_type, right_type)
      +340
      +341        return expression
      +342
      +343    @t.no_type_check
      +344    def _annotate_unary(self, expression: E) -> E:
      +345        self._annotate_args(expression)
      +346
      +347        if isinstance(expression, exp.Condition) and not isinstance(expression, exp.Paren):
      +348            expression.type = exp.DataType.Type.BOOLEAN
      +349        else:
      +350            expression.type = expression.this.type
      +351
      +352        return expression
      +353
      +354    @t.no_type_check
      +355    def _annotate_literal(self, expression: exp.Literal) -> exp.Literal:
      +356        if expression.is_string:
      +357            expression.type = exp.DataType.Type.VARCHAR
      +358        elif expression.is_int:
      +359            expression.type = exp.DataType.Type.INT
      +360        else:
      +361            expression.type = exp.DataType.Type.DOUBLE
      +362
      +363        return expression
      +364
      +365    @t.no_type_check
      +366    def _annotate_with_type(self, expression: E, target_type: exp.DataType.Type) -> E:
      +367        expression.type = target_type
      +368        return self._annotate_args(expression)
      +369
      +370    @t.no_type_check
      +371    def _annotate_by_args(self, expression: E, *args: str, promote: bool = False) -> E:
      +372        self._annotate_args(expression)
      +373
      +374        expressions: t.List[exp.Expression] = []
      +375        for arg in args:
      +376            arg_expr = expression.args.get(arg)
      +377            expressions.extend(expr for expr in ensure_list(arg_expr) if expr)
      +378
      +379        last_datatype = None
      +380        for expr in expressions:
      +381            last_datatype = self._maybe_coerce(last_datatype or expr.type, expr.type)
      +382
      +383        expression.type = last_datatype or exp.DataType.Type.UNKNOWN
      +384
      +385        if promote:
      +386            if expression.type.this in exp.DataType.INTEGER_TYPES:
      +387                expression.type = exp.DataType.Type.BIGINT
      +388            elif expression.type.this in exp.DataType.FLOAT_TYPES:
      +389                expression.type = exp.DataType.Type.DOUBLE
      +390
      +391        return expression
       
      @@ -933,16 +861,21 @@ Assumes that we've already executed the optimizer's qualify_columns step.

      - TypeAnnotator(schema=None, annotators=None, coerces_to=None) + TypeAnnotator( schema: sqlglot.schema.Schema, annotators: Optional[Dict[Type[~E], Callable[[sqlglot.optimizer.annotate_types.TypeAnnotator, ~E], ~E]]] = None, coerces_to: Optional[Dict[sqlglot.expressions.DataType.Type, Set[sqlglot.expressions.DataType.Type]]] = None)
      -
      255    def __init__(self, schema=None, annotators=None, coerces_to=None):
      -256        self.schema = schema
      -257        self.annotators = annotators or self.ANNOTATORS
      -258        self.coerces_to = coerces_to or self.COERCES_TO
      +            
      226    def __init__(
      +227        self,
      +228        schema: Schema,
      +229        annotators: t.Optional[t.Dict[t.Type[E], t.Callable[[TypeAnnotator, E], E]]] = None,
      +230        coerces_to: t.Optional[t.Dict[exp.DataType.Type, t.Set[exp.DataType.Type]]] = None,
      +231    ) -> None:
      +232        self.schema = schema
      +233        self.annotators = annotators or self.ANNOTATORS
      +234        self.coerces_to = coerces_to or self.COERCES_TO
       
      @@ -954,55 +887,57 @@ Assumes that we've already executed the optimizer's qualify_columns step.

      def - annotate(self, expression): + annotate(self, expression: ~E) -> ~E:
      -
      260    def annotate(self, expression):
      -261        if isinstance(expression, self.TRAVERSABLES):
      -262            for scope in traverse_scope(expression):
      -263                selects = {}
      -264                for name, source in scope.sources.items():
      -265                    if not isinstance(source, Scope):
      -266                        continue
      -267                    if isinstance(source.expression, exp.UDTF):
      -268                        values = []
      -269
      -270                        if isinstance(source.expression, exp.Lateral):
      -271                            if isinstance(source.expression.this, exp.Explode):
      -272                                values = [source.expression.this.this]
      -273                        else:
      -274                            values = source.expression.expressions[0].expressions
      -275
      -276                        if not values:
      -277                            continue
      -278
      -279                        selects[name] = {
      -280                            alias: column
      -281                            for alias, column in zip(
      -282                                source.expression.alias_column_names,
      -283                                values,
      -284                            )
      -285                        }
      -286                    else:
      -287                        selects[name] = {
      -288                            select.alias_or_name: select for select in source.expression.selects
      -289                        }
      -290                # First annotate the current scope's column references
      -291                for col in scope.columns:
      -292                    if not col.table:
      -293                        continue
      -294
      -295                    source = scope.sources.get(col.table)
      -296                    if isinstance(source, exp.Table):
      -297                        col.type = self.schema.get_column_type(source, col)
      -298                    elif source and col.table in selects and col.name in selects[col.table]:
      -299                        col.type = selects[col.table][col.name].type
      -300                # Then (possibly) annotate the remaining expressions in the scope
      -301                self._maybe_annotate(scope.expression)
      -302        return self._maybe_annotate(expression)  # This takes care of non-traversable expressions
      +            
      236    def annotate(self, expression: E) -> E:
      +237        for scope in traverse_scope(expression):
      +238            selects = {}
      +239            for name, source in scope.sources.items():
      +240                if not isinstance(source, Scope):
      +241                    continue
      +242                if isinstance(source.expression, exp.UDTF):
      +243                    values = []
      +244
      +245                    if isinstance(source.expression, exp.Lateral):
      +246                        if isinstance(source.expression.this, exp.Explode):
      +247                            values = [source.expression.this.this]
      +248                    else:
      +249                        values = source.expression.expressions[0].expressions
      +250
      +251                    if not values:
      +252                        continue
      +253
      +254                    selects[name] = {
      +255                        alias: column
      +256                        for alias, column in zip(
      +257                            source.expression.alias_column_names,
      +258                            values,
      +259                        )
      +260                    }
      +261                else:
      +262                    selects[name] = {
      +263                        select.alias_or_name: select for select in source.expression.selects
      +264                    }
      +265
      +266            # First annotate the current scope's column references
      +267            for col in scope.columns:
      +268                if not col.table:
      +269                    continue
      +270
      +271                source = scope.sources.get(col.table)
      +272                if isinstance(source, exp.Table):
      +273                    col.type = self.schema.get_column_type(source, col)
      +274                elif source and col.table in selects and col.name in selects[col.table]:
      +275                    col.type = selects[col.table][col.name].type
      +276
      +277            # Then (possibly) annotate the remaining expressions in the scope
      +278            self._maybe_annotate(scope.expression)
      +279
      +280        return self._maybe_annotate(expression)  # This takes care of non-traversable expressions
       
      diff --git a/docs/sqlglot/optimizer/canonicalize.html b/docs/sqlglot/optimizer/canonicalize.html index 28344cf..b5956e6 100644 --- a/docs/sqlglot/optimizer/canonicalize.html +++ b/docs/sqlglot/optimizer/canonicalize.html @@ -96,7 +96,7 @@
      26 27def add_text_to_concat(node: exp.Expression) -> exp.Expression: 28 if isinstance(node, exp.Add) and node.type and node.type.this in exp.DataType.TEXT_TYPES: -29 node = exp.Concat(this=node.this, expression=node.expression) +29 node = exp.Concat(expressions=[node.left, node.right]) 30 return node 31 32 @@ -216,7 +216,7 @@ conversions rely on type inference.

      28def add_text_to_concat(node: exp.Expression) -> exp.Expression:
       29    if isinstance(node, exp.Add) and node.type and node.type.this in exp.DataType.TEXT_TYPES:
      -30        node = exp.Concat(this=node.this, expression=node.expression)
      +30        node = exp.Concat(expressions=[node.left, node.right])
       31    return node
       
      diff --git a/docs/sqlglot/optimizer/isolate_table_selects.html b/docs/sqlglot/optimizer/isolate_table_selects.html index a282222..588fc12 100644 --- a/docs/sqlglot/optimizer/isolate_table_selects.html +++ b/docs/sqlglot/optimizer/isolate_table_selects.html @@ -79,7 +79,7 @@
      21 source.replace( 22 exp.select("*") 23 .from_( -24 alias(source, source.name or source.alias, table=True), +24 alias(source, source.alias_or_name, table=True), 25 copy=False, 26 ) 27 .subquery(source.alias, copy=False) @@ -118,7 +118,7 @@ 22 source.replace( 23 exp.select("*") 24 .from_( -25 alias(source, source.name or source.alias, table=True), +25 alias(source, source.alias_or_name, table=True), 26 copy=False, 27 ) 28 .subquery(source.alias, copy=False) diff --git a/docs/sqlglot/optimizer/optimizer.html b/docs/sqlglot/optimizer/optimizer.html index ee0c50d..1c9f314 100644 --- a/docs/sqlglot/optimizer/optimizer.html +++ b/docs/sqlglot/optimizer/optimizer.html @@ -136,7 +136,7 @@ 78 "schema": schema, 79 "dialect": dialect, 80 "isolate_tables": True, # needed for other optimizations to perform well -81 "quote_identifiers": False, # this happens in canonicalize +81 "quote_identifiers": False, 82 **kwargs, 83 } 84 @@ -159,7 +159,7 @@
      def - optimize( expression: str | sqlglot.expressions.Expression, schema: Union[dict, sqlglot.schema.Schema, NoneType] = None, db: Optional[str] = None, catalog: Optional[str] = None, dialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None, rules: Sequence[Callable] = (<function qualify at 0x7fe11813a560>, <function pushdown_projections at 0x7fe118139f30>, <function normalize at 0x7fe11814a0e0>, <function unnest_subqueries at 0x7fe11813a950>, <function pushdown_predicates at 0x7fe118138e50>, <function optimize_joins at 0x7fe118138af0>, <function eliminate_subqueries at 0x7fe11814bd90>, <function merge_subqueries at 0x7fe11814be20>, <function eliminate_joins at 0x7fe118149f30>, <function eliminate_ctes at 0x7fe118149e10>, <function quote_identifiers at 0x7fe118139ea0>, <function annotate_types at 0x7fe11818a560>, <function canonicalize at 0x7fe118149870>, <function simplify at 0x7fe11814a290>), **kwargs) -> sqlglot.expressions.Expression: + optimize( expression: str | sqlglot.expressions.Expression, schema: Union[dict, sqlglot.schema.Schema, NoneType] = None, db: Optional[str] = None, catalog: Optional[str] = None, dialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None, rules: Sequence[Callable] = (<function qualify at 0x7f5e613d37f0>, <function pushdown_projections at 0x7f5e613d31c0>, <function normalize at 0x7f5e614032e0>, <function unnest_subqueries at 0x7f5e613d3be0>, <function pushdown_predicates at 0x7f5e613d2050>, <function optimize_joins at 0x7f5e613d1cf0>, <function eliminate_subqueries at 0x7f5e613d0ee0>, <function merge_subqueries at 0x7f5e613d0f70>, <function eliminate_joins at 0x7f5e614031c0>, <function eliminate_ctes at 0x7f5e614030a0>, <function quote_identifiers at 0x7f5e613d3130>, <function annotate_types at 0x7f5e61453520>, <function canonicalize at 0x7f5e61402b00>, <function simplify at 0x7f5e61403490>), **kwargs) -> sqlglot.expressions.Expression: @@ -204,7 +204,7 @@ 79 "schema": schema, 80 "dialect": dialect, 81 "isolate_tables": True, # needed for other optimizations to perform well -82 "quote_identifiers": False, # this happens in canonicalize +82 "quote_identifiers": False, 83 **kwargs, 84 } 85 diff --git a/docs/sqlglot/optimizer/qualify_columns.html b/docs/sqlglot/optimizer/qualify_columns.html index 2c3b003..bc32369 100644 --- a/docs/sqlglot/optimizer/qualify_columns.html +++ b/docs/sqlglot/optimizer/qualify_columns.html @@ -96,7 +96,7 @@ 14 15def qualify_columns( 16 expression: exp.Expression, - 17 schema: dict | Schema, + 17 schema: t.Dict | Schema, 18 expand_alias_refs: bool = True, 19 infer_schema: t.Optional[bool] = None, 20) -> exp.Expression: @@ -175,7 +175,7 @@ 93 94def _expand_using(scope, resolver): 95 joins = list(scope.find_all(exp.Join)) - 96 names = {join.this.alias for join in joins} + 96 names = {join.this.alias_or_name for join in joins} 97 ordered = [key for key in scope.selected_sources if key not in names] 98 99 # Mapping of automatically joined column names to an ordered set of source names (dict). @@ -656,7 +656,7 @@
      def - qualify_columns( expression: sqlglot.expressions.Expression, schema: dict | sqlglot.schema.Schema, expand_alias_refs: bool = True, infer_schema: Optional[bool] = None) -> sqlglot.expressions.Expression: + qualify_columns( expression: sqlglot.expressions.Expression, schema: Union[Dict, sqlglot.schema.Schema], expand_alias_refs: bool = True, infer_schema: Optional[bool] = None) -> sqlglot.expressions.Expression: @@ -664,7 +664,7 @@
      16def qualify_columns(
       17    expression: exp.Expression,
      -18    schema: dict | Schema,
      +18    schema: t.Dict | Schema,
       19    expand_alias_refs: bool = True,
       20    infer_schema: t.Optional[bool] = None,
       21) -> exp.Expression:
      diff --git a/docs/sqlglot/optimizer/qualify_tables.html b/docs/sqlglot/optimizer/qualify_tables.html
      index e3e53ea..01280ad 100644
      --- a/docs/sqlglot/optimizer/qualify_tables.html
      +++ b/docs/sqlglot/optimizer/qualify_tables.html
      @@ -149,16 +149,18 @@
        91                        )
        92            elif isinstance(source, Scope) and source.is_udtf:
        93                udtf = source.expression
      - 94                table_alias = udtf.args.get("alias") or exp.TableAlias(this=next_alias_name())
      - 95                udtf.set("alias", table_alias)
      - 96
      - 97                if not table_alias.name:
      - 98                    table_alias.set("this", next_alias_name())
      - 99                if isinstance(udtf, exp.Values) and not table_alias.columns:
      -100                    for i, e in enumerate(udtf.expressions[0].expressions):
      -101                        table_alias.append("columns", exp.to_identifier(f"_col_{i}"))
      -102
      -103    return expression
      + 94                table_alias = udtf.args.get("alias") or exp.TableAlias(
      + 95                    this=exp.to_identifier(next_alias_name())
      + 96                )
      + 97                udtf.set("alias", table_alias)
      + 98
      + 99                if not table_alias.name:
      +100                    table_alias.set("this", exp.to_identifier(next_alias_name()))
      +101                if isinstance(udtf, exp.Values) and not table_alias.columns:
      +102                    for i, e in enumerate(udtf.expressions[0].expressions):
      +103                        table_alias.append("columns", exp.to_identifier(f"_col_{i}"))
      +104
      +105    return expression
       
      @@ -257,16 +259,18 @@ 92 ) 93 elif isinstance(source, Scope) and source.is_udtf: 94 udtf = source.expression - 95 table_alias = udtf.args.get("alias") or exp.TableAlias(this=next_alias_name()) - 96 udtf.set("alias", table_alias) - 97 - 98 if not table_alias.name: - 99 table_alias.set("this", next_alias_name()) -100 if isinstance(udtf, exp.Values) and not table_alias.columns: -101 for i, e in enumerate(udtf.expressions[0].expressions): -102 table_alias.append("columns", exp.to_identifier(f"_col_{i}")) -103 -104 return expression + 95 table_alias = udtf.args.get("alias") or exp.TableAlias( + 96 this=exp.to_identifier(next_alias_name()) + 97 ) + 98 udtf.set("alias", table_alias) + 99 +100 if not table_alias.name: +101 table_alias.set("this", exp.to_identifier(next_alias_name())) +102 if isinstance(udtf, exp.Values) and not table_alias.columns: +103 for i, e in enumerate(udtf.expressions[0].expressions): +104 table_alias.append("columns", exp.to_identifier(f"_col_{i}")) +105 +106 return expression
      diff --git a/docs/sqlglot/optimizer/scope.html b/docs/sqlglot/optimizer/scope.html index 15605b3..d9eab11 100644 --- a/docs/sqlglot/optimizer/scope.html +++ b/docs/sqlglot/optimizer/scope.html @@ -810,7 +810,7 @@
      620 table_name = expression.name 621 source_name = expression.alias_or_name 622 -623 if table_name in scope.sources: +623 if table_name in scope.sources and not expression.db: 624 # This is a reference to a parent source (e.g. a CTE), not an actual table, unless 625 # it is pivoted, because then we get back a new table and hence a new source. 626 pivots = expression.args.get("pivots") diff --git a/docs/sqlglot/parser.html b/docs/sqlglot/parser.html index 6963320..8edbe48 100644 --- a/docs/sqlglot/parser.html +++ b/docs/sqlglot/parser.html @@ -100,199 +100,199 @@ 6 7from sqlglot import exp 8from sqlglot.errors import ErrorLevel, ParseError, concat_messages, merge_errors - 9from sqlglot.helper import apply_index_offset, ensure_collection, ensure_list, seq_get - 10from sqlglot.tokens import Token, Tokenizer, TokenType - 11from sqlglot.trie import in_trie, new_trie - 12 - 13if t.TYPE_CHECKING: - 14 from sqlglot._typing import E - 15 - 16logger = logging.getLogger("sqlglot") - 17 + 9from sqlglot.helper import apply_index_offset, ensure_list, seq_get + 10from sqlglot.time import format_time + 11from sqlglot.tokens import Token, Tokenizer, TokenType + 12from sqlglot.trie import in_trie, new_trie + 13 + 14if t.TYPE_CHECKING: + 15 from sqlglot._typing import E + 16 + 17logger = logging.getLogger("sqlglot") 18 - 19def parse_var_map(args: t.List) -> exp.StarMap | exp.VarMap: - 20 if len(args) == 1 and args[0].is_star: - 21 return exp.StarMap(this=args[0]) - 22 - 23 keys = [] - 24 values = [] - 25 for i in range(0, len(args), 2): - 26 keys.append(args[i]) - 27 values.append(args[i + 1]) - 28 return exp.VarMap( - 29 keys=exp.Array(expressions=keys), - 30 values=exp.Array(expressions=values), - 31 ) - 32 - 33 - 34def parse_like(args: t.List) -> exp.Expression: - 35 like = exp.Like(this=seq_get(args, 1), expression=seq_get(args, 0)) - 36 return exp.Escape(this=like, expression=seq_get(args, 2)) if len(args) > 2 else like - 37 - 38 - 39def binary_range_parser( - 40 expr_type: t.Type[exp.Expression], - 41) -> t.Callable[[Parser, t.Optional[exp.Expression]], t.Optional[exp.Expression]]: - 42 return lambda self, this: self._parse_escape( - 43 self.expression(expr_type, this=this, expression=self._parse_bitwise()) - 44 ) - 45 - 46 - 47class _Parser(type): - 48 def __new__(cls, clsname, bases, attrs): - 49 klass = super().__new__(cls, clsname, bases, attrs) - 50 klass._show_trie = new_trie(key.split(" ") for key in klass.SHOW_PARSERS) - 51 klass._set_trie = new_trie(key.split(" ") for key in klass.SET_PARSERS) + 19 + 20def parse_var_map(args: t.List) -> exp.StarMap | exp.VarMap: + 21 if len(args) == 1 and args[0].is_star: + 22 return exp.StarMap(this=args[0]) + 23 + 24 keys = [] + 25 values = [] + 26 for i in range(0, len(args), 2): + 27 keys.append(args[i]) + 28 values.append(args[i + 1]) + 29 + 30 return exp.VarMap( + 31 keys=exp.Array(expressions=keys), + 32 values=exp.Array(expressions=values), + 33 ) + 34 + 35 + 36def parse_like(args: t.List) -> exp.Escape | exp.Like: + 37 like = exp.Like(this=seq_get(args, 1), expression=seq_get(args, 0)) + 38 return exp.Escape(this=like, expression=seq_get(args, 2)) if len(args) > 2 else like + 39 + 40 + 41def binary_range_parser( + 42 expr_type: t.Type[exp.Expression], + 43) -> t.Callable[[Parser, t.Optional[exp.Expression]], t.Optional[exp.Expression]]: + 44 return lambda self, this: self._parse_escape( + 45 self.expression(expr_type, this=this, expression=self._parse_bitwise()) + 46 ) + 47 + 48 + 49class _Parser(type): + 50 def __new__(cls, clsname, bases, attrs): + 51 klass = super().__new__(cls, clsname, bases, attrs) 52 - 53 return klass - 54 + 53 klass.SHOW_TRIE = new_trie(key.split(" ") for key in klass.SHOW_PARSERS) + 54 klass.SET_TRIE = new_trie(key.split(" ") for key in klass.SET_PARSERS) 55 - 56class Parser(metaclass=_Parser): - 57 """ - 58 Parser consumes a list of tokens produced by the `sqlglot.tokens.Tokenizer` and produces - 59 a parsed syntax tree. - 60 - 61 Args: - 62 error_level: the desired error level. - 63 Default: ErrorLevel.IMMEDIATE - 64 error_message_context: determines the amount of context to capture from a - 65 query string when displaying the error message (in number of characters). - 66 Default: 50. - 67 index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. - 68 Default: 0 - 69 alias_post_tablesample: If the table alias comes after tablesample. - 70 Default: False - 71 max_errors: Maximum number of error messages to include in a raised ParseError. - 72 This is only relevant if error_level is ErrorLevel.RAISE. - 73 Default: 3 - 74 null_ordering: Indicates the default null ordering method to use if not explicitly set. - 75 Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". - 76 Default: "nulls_are_small" - 77 """ - 78 - 79 FUNCTIONS: t.Dict[str, t.Callable] = { - 80 **{name: f.from_arg_list for f in exp.ALL_FUNCTIONS for name in f.sql_names()}, - 81 "DATE_TO_DATE_STR": lambda args: exp.Cast( - 82 this=seq_get(args, 0), - 83 to=exp.DataType(this=exp.DataType.Type.TEXT), - 84 ), - 85 "GLOB": lambda args: exp.Glob(this=seq_get(args, 1), expression=seq_get(args, 0)), - 86 "IFNULL": exp.Coalesce.from_arg_list, - 87 "LIKE": parse_like, - 88 "TIME_TO_TIME_STR": lambda args: exp.Cast( - 89 this=seq_get(args, 0), - 90 to=exp.DataType(this=exp.DataType.Type.TEXT), - 91 ), - 92 "TS_OR_DS_TO_DATE_STR": lambda args: exp.Substring( - 93 this=exp.Cast( - 94 this=seq_get(args, 0), - 95 to=exp.DataType(this=exp.DataType.Type.TEXT), - 96 ), - 97 start=exp.Literal.number(1), - 98 length=exp.Literal.number(10), - 99 ), - 100 "VAR_MAP": parse_var_map, - 101 } - 102 - 103 NO_PAREN_FUNCTIONS = { - 104 TokenType.CURRENT_DATE: exp.CurrentDate, - 105 TokenType.CURRENT_DATETIME: exp.CurrentDate, - 106 TokenType.CURRENT_TIME: exp.CurrentTime, - 107 TokenType.CURRENT_TIMESTAMP: exp.CurrentTimestamp, - 108 TokenType.CURRENT_USER: exp.CurrentUser, - 109 } - 110 - 111 JOIN_HINTS: t.Set[str] = set() - 112 - 113 NESTED_TYPE_TOKENS = { - 114 TokenType.ARRAY, - 115 TokenType.MAP, - 116 TokenType.NULLABLE, - 117 TokenType.STRUCT, - 118 } - 119 - 120 TYPE_TOKENS = { - 121 TokenType.BIT, - 122 TokenType.BOOLEAN, - 123 TokenType.TINYINT, - 124 TokenType.UTINYINT, - 125 TokenType.SMALLINT, - 126 TokenType.USMALLINT, - 127 TokenType.INT, - 128 TokenType.UINT, - 129 TokenType.BIGINT, - 130 TokenType.UBIGINT, - 131 TokenType.INT128, - 132 TokenType.UINT128, - 133 TokenType.INT256, - 134 TokenType.UINT256, - 135 TokenType.FLOAT, - 136 TokenType.DOUBLE, - 137 TokenType.CHAR, - 138 TokenType.NCHAR, - 139 TokenType.VARCHAR, - 140 TokenType.NVARCHAR, - 141 TokenType.TEXT, - 142 TokenType.MEDIUMTEXT, - 143 TokenType.LONGTEXT, - 144 TokenType.MEDIUMBLOB, - 145 TokenType.LONGBLOB, - 146 TokenType.BINARY, - 147 TokenType.VARBINARY, - 148 TokenType.JSON, - 149 TokenType.JSONB, - 150 TokenType.INTERVAL, - 151 TokenType.TIME, - 152 TokenType.TIMESTAMP, - 153 TokenType.TIMESTAMPTZ, - 154 TokenType.TIMESTAMPLTZ, - 155 TokenType.DATETIME, - 156 TokenType.DATETIME64, - 157 TokenType.DATE, - 158 TokenType.INT4RANGE, - 159 TokenType.INT4MULTIRANGE, - 160 TokenType.INT8RANGE, - 161 TokenType.INT8MULTIRANGE, - 162 TokenType.NUMRANGE, - 163 TokenType.NUMMULTIRANGE, - 164 TokenType.TSRANGE, - 165 TokenType.TSMULTIRANGE, - 166 TokenType.TSTZRANGE, - 167 TokenType.TSTZMULTIRANGE, - 168 TokenType.DATERANGE, - 169 TokenType.DATEMULTIRANGE, - 170 TokenType.DECIMAL, - 171 TokenType.BIGDECIMAL, - 172 TokenType.UUID, - 173 TokenType.GEOGRAPHY, - 174 TokenType.GEOMETRY, - 175 TokenType.HLLSKETCH, - 176 TokenType.HSTORE, - 177 TokenType.PSEUDO_TYPE, - 178 TokenType.SUPER, - 179 TokenType.SERIAL, - 180 TokenType.SMALLSERIAL, - 181 TokenType.BIGSERIAL, - 182 TokenType.XML, - 183 TokenType.UNIQUEIDENTIFIER, - 184 TokenType.MONEY, - 185 TokenType.SMALLMONEY, - 186 TokenType.ROWVERSION, - 187 TokenType.IMAGE, - 188 TokenType.VARIANT, - 189 TokenType.OBJECT, - 190 TokenType.INET, - 191 *NESTED_TYPE_TOKENS, - 192 } - 193 - 194 SUBQUERY_PREDICATES = { - 195 TokenType.ANY: exp.Any, - 196 TokenType.ALL: exp.All, - 197 TokenType.EXISTS: exp.Exists, - 198 TokenType.SOME: exp.Any, - 199 } - 200 - 201 RESERVED_KEYWORDS = {*Tokenizer.SINGLE_TOKENS.values(), TokenType.SELECT} + 56 return klass + 57 + 58 + 59class Parser(metaclass=_Parser): + 60 """ + 61 Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree. + 62 + 63 Args: + 64 error_level: The desired error level. + 65 Default: ErrorLevel.IMMEDIATE + 66 error_message_context: Determines the amount of context to capture from a + 67 query string when displaying the error message (in number of characters). + 68 Default: 100 + 69 max_errors: Maximum number of error messages to include in a raised ParseError. + 70 This is only relevant if error_level is ErrorLevel.RAISE. + 71 Default: 3 + 72 """ + 73 + 74 FUNCTIONS: t.Dict[str, t.Callable] = { + 75 **{name: f.from_arg_list for f in exp.ALL_FUNCTIONS for name in f.sql_names()}, + 76 "DATE_TO_DATE_STR": lambda args: exp.Cast( + 77 this=seq_get(args, 0), + 78 to=exp.DataType(this=exp.DataType.Type.TEXT), + 79 ), + 80 "GLOB": lambda args: exp.Glob(this=seq_get(args, 1), expression=seq_get(args, 0)), + 81 "LIKE": parse_like, + 82 "TIME_TO_TIME_STR": lambda args: exp.Cast( + 83 this=seq_get(args, 0), + 84 to=exp.DataType(this=exp.DataType.Type.TEXT), + 85 ), + 86 "TS_OR_DS_TO_DATE_STR": lambda args: exp.Substring( + 87 this=exp.Cast( + 88 this=seq_get(args, 0), + 89 to=exp.DataType(this=exp.DataType.Type.TEXT), + 90 ), + 91 start=exp.Literal.number(1), + 92 length=exp.Literal.number(10), + 93 ), + 94 "VAR_MAP": parse_var_map, + 95 } + 96 + 97 NO_PAREN_FUNCTIONS = { + 98 TokenType.CURRENT_DATE: exp.CurrentDate, + 99 TokenType.CURRENT_DATETIME: exp.CurrentDate, + 100 TokenType.CURRENT_TIME: exp.CurrentTime, + 101 TokenType.CURRENT_TIMESTAMP: exp.CurrentTimestamp, + 102 TokenType.CURRENT_USER: exp.CurrentUser, + 103 } + 104 + 105 NESTED_TYPE_TOKENS = { + 106 TokenType.ARRAY, + 107 TokenType.MAP, + 108 TokenType.NULLABLE, + 109 TokenType.STRUCT, + 110 } + 111 + 112 ENUM_TYPE_TOKENS = { + 113 TokenType.ENUM, + 114 } + 115 + 116 TYPE_TOKENS = { + 117 TokenType.BIT, + 118 TokenType.BOOLEAN, + 119 TokenType.TINYINT, + 120 TokenType.UTINYINT, + 121 TokenType.SMALLINT, + 122 TokenType.USMALLINT, + 123 TokenType.INT, + 124 TokenType.UINT, + 125 TokenType.BIGINT, + 126 TokenType.UBIGINT, + 127 TokenType.INT128, + 128 TokenType.UINT128, + 129 TokenType.INT256, + 130 TokenType.UINT256, + 131 TokenType.FLOAT, + 132 TokenType.DOUBLE, + 133 TokenType.CHAR, + 134 TokenType.NCHAR, + 135 TokenType.VARCHAR, + 136 TokenType.NVARCHAR, + 137 TokenType.TEXT, + 138 TokenType.MEDIUMTEXT, + 139 TokenType.LONGTEXT, + 140 TokenType.MEDIUMBLOB, + 141 TokenType.LONGBLOB, + 142 TokenType.BINARY, + 143 TokenType.VARBINARY, + 144 TokenType.JSON, + 145 TokenType.JSONB, + 146 TokenType.INTERVAL, + 147 TokenType.TIME, + 148 TokenType.TIMESTAMP, + 149 TokenType.TIMESTAMPTZ, + 150 TokenType.TIMESTAMPLTZ, + 151 TokenType.DATETIME, + 152 TokenType.DATETIME64, + 153 TokenType.DATE, + 154 TokenType.INT4RANGE, + 155 TokenType.INT4MULTIRANGE, + 156 TokenType.INT8RANGE, + 157 TokenType.INT8MULTIRANGE, + 158 TokenType.NUMRANGE, + 159 TokenType.NUMMULTIRANGE, + 160 TokenType.TSRANGE, + 161 TokenType.TSMULTIRANGE, + 162 TokenType.TSTZRANGE, + 163 TokenType.TSTZMULTIRANGE, + 164 TokenType.DATERANGE, + 165 TokenType.DATEMULTIRANGE, + 166 TokenType.DECIMAL, + 167 TokenType.BIGDECIMAL, + 168 TokenType.UUID, + 169 TokenType.GEOGRAPHY, + 170 TokenType.GEOMETRY, + 171 TokenType.HLLSKETCH, + 172 TokenType.HSTORE, + 173 TokenType.PSEUDO_TYPE, + 174 TokenType.SUPER, + 175 TokenType.SERIAL, + 176 TokenType.SMALLSERIAL, + 177 TokenType.BIGSERIAL, + 178 TokenType.XML, + 179 TokenType.UNIQUEIDENTIFIER, + 180 TokenType.MONEY, + 181 TokenType.SMALLMONEY, + 182 TokenType.ROWVERSION, + 183 TokenType.IMAGE, + 184 TokenType.VARIANT, + 185 TokenType.OBJECT, + 186 TokenType.INET, + 187 TokenType.ENUM, + 188 *NESTED_TYPE_TOKENS, + 189 } + 190 + 191 SUBQUERY_PREDICATES = { + 192 TokenType.ANY: exp.Any, + 193 TokenType.ALL: exp.All, + 194 TokenType.EXISTS: exp.Exists, + 195 TokenType.SOME: exp.Any, + 196 } + 197 + 198 RESERVED_KEYWORDS = { + 199 *Tokenizer.SINGLE_TOKENS.values(), + 200 TokenType.SELECT, + 201 } 202 203 DB_CREATABLES = { 204 TokenType.DATABASE, @@ -310,2751 +310,2751 @@ 216 *DB_CREATABLES, 217 } 218 - 219 ID_VAR_TOKENS = { - 220 TokenType.VAR, - 221 TokenType.ANTI, - 222 TokenType.APPLY, - 223 TokenType.ASC, - 224 TokenType.AUTO_INCREMENT, - 225 TokenType.BEGIN, - 226 TokenType.CACHE, - 227 TokenType.COLLATE, - 228 TokenType.COMMAND, - 229 TokenType.COMMENT, - 230 TokenType.COMMIT, - 231 TokenType.CONSTRAINT, - 232 TokenType.DEFAULT, - 233 TokenType.DELETE, - 234 TokenType.DESC, - 235 TokenType.DESCRIBE, - 236 TokenType.DICTIONARY, - 237 TokenType.DIV, - 238 TokenType.END, - 239 TokenType.EXECUTE, - 240 TokenType.ESCAPE, - 241 TokenType.FALSE, - 242 TokenType.FIRST, - 243 TokenType.FILTER, - 244 TokenType.FORMAT, - 245 TokenType.FULL, - 246 TokenType.IF, - 247 TokenType.IS, - 248 TokenType.ISNULL, - 249 TokenType.INTERVAL, - 250 TokenType.KEEP, - 251 TokenType.LEFT, - 252 TokenType.LOAD, - 253 TokenType.MERGE, - 254 TokenType.NATURAL, - 255 TokenType.NEXT, - 256 TokenType.OFFSET, - 257 TokenType.ORDINALITY, - 258 TokenType.OVERWRITE, - 259 TokenType.PARTITION, - 260 TokenType.PERCENT, - 261 TokenType.PIVOT, - 262 TokenType.PRAGMA, - 263 TokenType.RANGE, - 264 TokenType.REFERENCES, - 265 TokenType.RIGHT, - 266 TokenType.ROW, - 267 TokenType.ROWS, - 268 TokenType.SEMI, - 269 TokenType.SET, - 270 TokenType.SETTINGS, - 271 TokenType.SHOW, - 272 TokenType.TEMPORARY, - 273 TokenType.TOP, - 274 TokenType.TRUE, - 275 TokenType.UNIQUE, - 276 TokenType.UNPIVOT, - 277 TokenType.VOLATILE, - 278 TokenType.WINDOW, - 279 *CREATABLES, - 280 *SUBQUERY_PREDICATES, - 281 *TYPE_TOKENS, - 282 *NO_PAREN_FUNCTIONS, - 283 } - 284 - 285 INTERVAL_VARS = ID_VAR_TOKENS - {TokenType.END} - 286 - 287 TABLE_ALIAS_TOKENS = ID_VAR_TOKENS - { - 288 TokenType.APPLY, - 289 TokenType.ASOF, - 290 TokenType.FULL, - 291 TokenType.LEFT, - 292 TokenType.LOCK, - 293 TokenType.NATURAL, - 294 TokenType.OFFSET, - 295 TokenType.RIGHT, - 296 TokenType.WINDOW, - 297 } - 298 - 299 COMMENT_TABLE_ALIAS_TOKENS = TABLE_ALIAS_TOKENS - {TokenType.IS} - 300 - 301 UPDATE_ALIAS_TOKENS = TABLE_ALIAS_TOKENS - {TokenType.SET} - 302 - 303 TRIM_TYPES = {"LEADING", "TRAILING", "BOTH"} - 304 - 305 FUNC_TOKENS = { - 306 TokenType.COMMAND, - 307 TokenType.CURRENT_DATE, - 308 TokenType.CURRENT_DATETIME, - 309 TokenType.CURRENT_TIMESTAMP, - 310 TokenType.CURRENT_TIME, - 311 TokenType.CURRENT_USER, - 312 TokenType.FILTER, - 313 TokenType.FIRST, - 314 TokenType.FORMAT, - 315 TokenType.GLOB, - 316 TokenType.IDENTIFIER, - 317 TokenType.INDEX, - 318 TokenType.ISNULL, - 319 TokenType.ILIKE, - 320 TokenType.LIKE, - 321 TokenType.MERGE, - 322 TokenType.OFFSET, - 323 TokenType.PRIMARY_KEY, - 324 TokenType.RANGE, - 325 TokenType.REPLACE, - 326 TokenType.ROW, - 327 TokenType.UNNEST, - 328 TokenType.VAR, - 329 TokenType.LEFT, - 330 TokenType.RIGHT, - 331 TokenType.DATE, - 332 TokenType.DATETIME, - 333 TokenType.TABLE, - 334 TokenType.TIMESTAMP, - 335 TokenType.TIMESTAMPTZ, - 336 TokenType.WINDOW, - 337 *TYPE_TOKENS, - 338 *SUBQUERY_PREDICATES, - 339 } - 340 - 341 CONJUNCTION = { - 342 TokenType.AND: exp.And, - 343 TokenType.OR: exp.Or, - 344 } - 345 - 346 EQUALITY = { - 347 TokenType.EQ: exp.EQ, - 348 TokenType.NEQ: exp.NEQ, - 349 TokenType.NULLSAFE_EQ: exp.NullSafeEQ, - 350 } - 351 - 352 COMPARISON = { - 353 TokenType.GT: exp.GT, - 354 TokenType.GTE: exp.GTE, - 355 TokenType.LT: exp.LT, - 356 TokenType.LTE: exp.LTE, - 357 } - 358 - 359 BITWISE = { - 360 TokenType.AMP: exp.BitwiseAnd, - 361 TokenType.CARET: exp.BitwiseXor, - 362 TokenType.PIPE: exp.BitwiseOr, - 363 TokenType.DPIPE: exp.DPipe, - 364 } - 365 - 366 TERM = { - 367 TokenType.DASH: exp.Sub, - 368 TokenType.PLUS: exp.Add, - 369 TokenType.MOD: exp.Mod, - 370 TokenType.COLLATE: exp.Collate, - 371 } - 372 - 373 FACTOR = { - 374 TokenType.DIV: exp.IntDiv, - 375 TokenType.LR_ARROW: exp.Distance, - 376 TokenType.SLASH: exp.Div, - 377 TokenType.STAR: exp.Mul, - 378 } - 379 - 380 TIMESTAMPS = { - 381 TokenType.TIME, - 382 TokenType.TIMESTAMP, - 383 TokenType.TIMESTAMPTZ, - 384 TokenType.TIMESTAMPLTZ, - 385 } - 386 - 387 SET_OPERATIONS = { - 388 TokenType.UNION, - 389 TokenType.INTERSECT, - 390 TokenType.EXCEPT, - 391 } - 392 - 393 JOIN_METHODS = { - 394 TokenType.NATURAL, - 395 TokenType.ASOF, - 396 } - 397 - 398 JOIN_SIDES = { - 399 TokenType.LEFT, - 400 TokenType.RIGHT, - 401 TokenType.FULL, - 402 } - 403 - 404 JOIN_KINDS = { - 405 TokenType.INNER, - 406 TokenType.OUTER, - 407 TokenType.CROSS, - 408 TokenType.SEMI, - 409 TokenType.ANTI, - 410 } - 411 - 412 LAMBDAS = { - 413 TokenType.ARROW: lambda self, expressions: self.expression( - 414 exp.Lambda, - 415 this=self._replace_lambda( - 416 self._parse_conjunction(), - 417 {node.name for node in expressions}, - 418 ), - 419 expressions=expressions, - 420 ), - 421 TokenType.FARROW: lambda self, expressions: self.expression( - 422 exp.Kwarg, - 423 this=exp.Var(this=expressions[0].name), - 424 expression=self._parse_conjunction(), + 219 # Tokens that can represent identifiers + 220 ID_VAR_TOKENS = { + 221 TokenType.VAR, + 222 TokenType.ANTI, + 223 TokenType.APPLY, + 224 TokenType.ASC, + 225 TokenType.AUTO_INCREMENT, + 226 TokenType.BEGIN, + 227 TokenType.CACHE, + 228 TokenType.CASE, + 229 TokenType.COLLATE, + 230 TokenType.COMMAND, + 231 TokenType.COMMENT, + 232 TokenType.COMMIT, + 233 TokenType.CONSTRAINT, + 234 TokenType.DEFAULT, + 235 TokenType.DELETE, + 236 TokenType.DESC, + 237 TokenType.DESCRIBE, + 238 TokenType.DICTIONARY, + 239 TokenType.DIV, + 240 TokenType.END, + 241 TokenType.EXECUTE, + 242 TokenType.ESCAPE, + 243 TokenType.FALSE, + 244 TokenType.FIRST, + 245 TokenType.FILTER, + 246 TokenType.FORMAT, + 247 TokenType.FULL, + 248 TokenType.IF, + 249 TokenType.IS, + 250 TokenType.ISNULL, + 251 TokenType.INTERVAL, + 252 TokenType.KEEP, + 253 TokenType.LEFT, + 254 TokenType.LOAD, + 255 TokenType.MERGE, + 256 TokenType.NATURAL, + 257 TokenType.NEXT, + 258 TokenType.OFFSET, + 259 TokenType.ORDINALITY, + 260 TokenType.OVERWRITE, + 261 TokenType.PARTITION, + 262 TokenType.PERCENT, + 263 TokenType.PIVOT, + 264 TokenType.PRAGMA, + 265 TokenType.RANGE, + 266 TokenType.REFERENCES, + 267 TokenType.RIGHT, + 268 TokenType.ROW, + 269 TokenType.ROWS, + 270 TokenType.SEMI, + 271 TokenType.SET, + 272 TokenType.SETTINGS, + 273 TokenType.SHOW, + 274 TokenType.TEMPORARY, + 275 TokenType.TOP, + 276 TokenType.TRUE, + 277 TokenType.UNIQUE, + 278 TokenType.UNPIVOT, + 279 TokenType.UPDATE, + 280 TokenType.VOLATILE, + 281 TokenType.WINDOW, + 282 *CREATABLES, + 283 *SUBQUERY_PREDICATES, + 284 *TYPE_TOKENS, + 285 *NO_PAREN_FUNCTIONS, + 286 } + 287 + 288 INTERVAL_VARS = ID_VAR_TOKENS - {TokenType.END} + 289 + 290 TABLE_ALIAS_TOKENS = ID_VAR_TOKENS - { + 291 TokenType.APPLY, + 292 TokenType.ASOF, + 293 TokenType.FULL, + 294 TokenType.LEFT, + 295 TokenType.LOCK, + 296 TokenType.NATURAL, + 297 TokenType.OFFSET, + 298 TokenType.RIGHT, + 299 TokenType.WINDOW, + 300 } + 301 + 302 COMMENT_TABLE_ALIAS_TOKENS = TABLE_ALIAS_TOKENS - {TokenType.IS} + 303 + 304 UPDATE_ALIAS_TOKENS = TABLE_ALIAS_TOKENS - {TokenType.SET} + 305 + 306 TRIM_TYPES = {"LEADING", "TRAILING", "BOTH"} + 307 + 308 FUNC_TOKENS = { + 309 TokenType.COMMAND, + 310 TokenType.CURRENT_DATE, + 311 TokenType.CURRENT_DATETIME, + 312 TokenType.CURRENT_TIMESTAMP, + 313 TokenType.CURRENT_TIME, + 314 TokenType.CURRENT_USER, + 315 TokenType.FILTER, + 316 TokenType.FIRST, + 317 TokenType.FORMAT, + 318 TokenType.GLOB, + 319 TokenType.IDENTIFIER, + 320 TokenType.INDEX, + 321 TokenType.ISNULL, + 322 TokenType.ILIKE, + 323 TokenType.LIKE, + 324 TokenType.MERGE, + 325 TokenType.OFFSET, + 326 TokenType.PRIMARY_KEY, + 327 TokenType.RANGE, + 328 TokenType.REPLACE, + 329 TokenType.ROW, + 330 TokenType.UNNEST, + 331 TokenType.VAR, + 332 TokenType.LEFT, + 333 TokenType.RIGHT, + 334 TokenType.DATE, + 335 TokenType.DATETIME, + 336 TokenType.TABLE, + 337 TokenType.TIMESTAMP, + 338 TokenType.TIMESTAMPTZ, + 339 TokenType.WINDOW, + 340 *TYPE_TOKENS, + 341 *SUBQUERY_PREDICATES, + 342 } + 343 + 344 CONJUNCTION = { + 345 TokenType.AND: exp.And, + 346 TokenType.OR: exp.Or, + 347 } + 348 + 349 EQUALITY = { + 350 TokenType.EQ: exp.EQ, + 351 TokenType.NEQ: exp.NEQ, + 352 TokenType.NULLSAFE_EQ: exp.NullSafeEQ, + 353 } + 354 + 355 COMPARISON = { + 356 TokenType.GT: exp.GT, + 357 TokenType.GTE: exp.GTE, + 358 TokenType.LT: exp.LT, + 359 TokenType.LTE: exp.LTE, + 360 } + 361 + 362 BITWISE = { + 363 TokenType.AMP: exp.BitwiseAnd, + 364 TokenType.CARET: exp.BitwiseXor, + 365 TokenType.PIPE: exp.BitwiseOr, + 366 TokenType.DPIPE: exp.DPipe, + 367 } + 368 + 369 TERM = { + 370 TokenType.DASH: exp.Sub, + 371 TokenType.PLUS: exp.Add, + 372 TokenType.MOD: exp.Mod, + 373 TokenType.COLLATE: exp.Collate, + 374 } + 375 + 376 FACTOR = { + 377 TokenType.DIV: exp.IntDiv, + 378 TokenType.LR_ARROW: exp.Distance, + 379 TokenType.SLASH: exp.Div, + 380 TokenType.STAR: exp.Mul, + 381 } + 382 + 383 TIMESTAMPS = { + 384 TokenType.TIME, + 385 TokenType.TIMESTAMP, + 386 TokenType.TIMESTAMPTZ, + 387 TokenType.TIMESTAMPLTZ, + 388 } + 389 + 390 SET_OPERATIONS = { + 391 TokenType.UNION, + 392 TokenType.INTERSECT, + 393 TokenType.EXCEPT, + 394 } + 395 + 396 JOIN_METHODS = { + 397 TokenType.NATURAL, + 398 TokenType.ASOF, + 399 } + 400 + 401 JOIN_SIDES = { + 402 TokenType.LEFT, + 403 TokenType.RIGHT, + 404 TokenType.FULL, + 405 } + 406 + 407 JOIN_KINDS = { + 408 TokenType.INNER, + 409 TokenType.OUTER, + 410 TokenType.CROSS, + 411 TokenType.SEMI, + 412 TokenType.ANTI, + 413 } + 414 + 415 JOIN_HINTS: t.Set[str] = set() + 416 + 417 LAMBDAS = { + 418 TokenType.ARROW: lambda self, expressions: self.expression( + 419 exp.Lambda, + 420 this=self._replace_lambda( + 421 self._parse_conjunction(), + 422 {node.name for node in expressions}, + 423 ), + 424 expressions=expressions, 425 ), - 426 } - 427 - 428 COLUMN_OPERATORS = { - 429 TokenType.DOT: None, - 430 TokenType.DCOLON: lambda self, this, to: self.expression( - 431 exp.Cast if self.STRICT_CAST else exp.TryCast, - 432 this=this, - 433 to=to, - 434 ), - 435 TokenType.ARROW: lambda self, this, path: self.expression( - 436 exp.JSONExtract, + 426 TokenType.FARROW: lambda self, expressions: self.expression( + 427 exp.Kwarg, + 428 this=exp.var(expressions[0].name), + 429 expression=self._parse_conjunction(), + 430 ), + 431 } + 432 + 433 COLUMN_OPERATORS = { + 434 TokenType.DOT: None, + 435 TokenType.DCOLON: lambda self, this, to: self.expression( + 436 exp.Cast if self.STRICT_CAST else exp.TryCast, 437 this=this, - 438 expression=path, + 438 to=to, 439 ), - 440 TokenType.DARROW: lambda self, this, path: self.expression( - 441 exp.JSONExtractScalar, + 440 TokenType.ARROW: lambda self, this, path: self.expression( + 441 exp.JSONExtract, 442 this=this, 443 expression=path, 444 ), - 445 TokenType.HASH_ARROW: lambda self, this, path: self.expression( - 446 exp.JSONBExtract, + 445 TokenType.DARROW: lambda self, this, path: self.expression( + 446 exp.JSONExtractScalar, 447 this=this, 448 expression=path, 449 ), - 450 TokenType.DHASH_ARROW: lambda self, this, path: self.expression( - 451 exp.JSONBExtractScalar, + 450 TokenType.HASH_ARROW: lambda self, this, path: self.expression( + 451 exp.JSONBExtract, 452 this=this, 453 expression=path, 454 ), - 455 TokenType.PLACEHOLDER: lambda self, this, key: self.expression( - 456 exp.JSONBContains, + 455 TokenType.DHASH_ARROW: lambda self, this, path: self.expression( + 456 exp.JSONBExtractScalar, 457 this=this, - 458 expression=key, + 458 expression=path, 459 ), - 460 } - 461 - 462 EXPRESSION_PARSERS = { - 463 exp.Cluster: lambda self: self._parse_sort(exp.Cluster, "CLUSTER", "BY"), - 464 exp.Column: lambda self: self._parse_column(), - 465 exp.Condition: lambda self: self._parse_conjunction(), - 466 exp.DataType: lambda self: self._parse_types(), - 467 exp.Expression: lambda self: self._parse_statement(), - 468 exp.From: lambda self: self._parse_from(), - 469 exp.Group: lambda self: self._parse_group(), - 470 exp.Having: lambda self: self._parse_having(), - 471 exp.Identifier: lambda self: self._parse_id_var(), - 472 exp.Join: lambda self: self._parse_join(), - 473 exp.Lambda: lambda self: self._parse_lambda(), - 474 exp.Lateral: lambda self: self._parse_lateral(), - 475 exp.Limit: lambda self: self._parse_limit(), - 476 exp.Offset: lambda self: self._parse_offset(), - 477 exp.Order: lambda self: self._parse_order(), - 478 exp.Ordered: lambda self: self._parse_ordered(), - 479 exp.Properties: lambda self: self._parse_properties(), - 480 exp.Qualify: lambda self: self._parse_qualify(), - 481 exp.Returning: lambda self: self._parse_returning(), - 482 exp.Sort: lambda self: self._parse_sort(exp.Sort, "SORT", "BY"), - 483 exp.Table: lambda self: self._parse_table_parts(), - 484 exp.TableAlias: lambda self: self._parse_table_alias(), - 485 exp.Where: lambda self: self._parse_where(), - 486 exp.Window: lambda self: self._parse_named_window(), - 487 exp.With: lambda self: self._parse_with(), - 488 "JOIN_TYPE": lambda self: self._parse_join_parts(), - 489 } - 490 - 491 STATEMENT_PARSERS = { - 492 TokenType.ALTER: lambda self: self._parse_alter(), - 493 TokenType.BEGIN: lambda self: self._parse_transaction(), - 494 TokenType.CACHE: lambda self: self._parse_cache(), - 495 TokenType.COMMIT: lambda self: self._parse_commit_or_rollback(), - 496 TokenType.COMMENT: lambda self: self._parse_comment(), - 497 TokenType.CREATE: lambda self: self._parse_create(), - 498 TokenType.DELETE: lambda self: self._parse_delete(), - 499 TokenType.DESC: lambda self: self._parse_describe(), - 500 TokenType.DESCRIBE: lambda self: self._parse_describe(), - 501 TokenType.DROP: lambda self: self._parse_drop(), - 502 TokenType.END: lambda self: self._parse_commit_or_rollback(), - 503 TokenType.FROM: lambda self: exp.select("*").from_( - 504 t.cast(exp.From, self._parse_from(skip_from_token=True)) - 505 ), - 506 TokenType.INSERT: lambda self: self._parse_insert(), - 507 TokenType.LOAD: lambda self: self._parse_load(), - 508 TokenType.MERGE: lambda self: self._parse_merge(), - 509 TokenType.PIVOT: lambda self: self._parse_simplified_pivot(), - 510 TokenType.PRAGMA: lambda self: self.expression(exp.Pragma, this=self._parse_expression()), - 511 TokenType.ROLLBACK: lambda self: self._parse_commit_or_rollback(), - 512 TokenType.SET: lambda self: self._parse_set(), - 513 TokenType.UNCACHE: lambda self: self._parse_uncache(), - 514 TokenType.UPDATE: lambda self: self._parse_update(), - 515 TokenType.USE: lambda self: self.expression( - 516 exp.Use, - 517 kind=self._match_texts(("ROLE", "WAREHOUSE", "DATABASE", "SCHEMA")) - 518 and exp.Var(this=self._prev.text), - 519 this=self._parse_table(schema=False), - 520 ), - 521 } - 522 - 523 UNARY_PARSERS = { - 524 TokenType.PLUS: lambda self: self._parse_unary(), # Unary + is handled as a no-op - 525 TokenType.NOT: lambda self: self.expression(exp.Not, this=self._parse_equality()), - 526 TokenType.TILDA: lambda self: self.expression(exp.BitwiseNot, this=self._parse_unary()), - 527 TokenType.DASH: lambda self: self.expression(exp.Neg, this=self._parse_unary()), - 528 } - 529 - 530 PRIMARY_PARSERS = { - 531 TokenType.STRING: lambda self, token: self.expression( - 532 exp.Literal, this=token.text, is_string=True - 533 ), - 534 TokenType.NUMBER: lambda self, token: self.expression( - 535 exp.Literal, this=token.text, is_string=False - 536 ), - 537 TokenType.STAR: lambda self, _: self.expression( - 538 exp.Star, - 539 **{"except": self._parse_except(), "replace": self._parse_replace()}, - 540 ), - 541 TokenType.NULL: lambda self, _: self.expression(exp.Null), - 542 TokenType.TRUE: lambda self, _: self.expression(exp.Boolean, this=True), - 543 TokenType.FALSE: lambda self, _: self.expression(exp.Boolean, this=False), - 544 TokenType.BIT_STRING: lambda self, token: self.expression(exp.BitString, this=token.text), - 545 TokenType.HEX_STRING: lambda self, token: self.expression(exp.HexString, this=token.text), - 546 TokenType.BYTE_STRING: lambda self, token: self.expression(exp.ByteString, this=token.text), - 547 TokenType.INTRODUCER: lambda self, token: self._parse_introducer(token), - 548 TokenType.NATIONAL_STRING: lambda self, token: self.expression( - 549 exp.National, this=token.text - 550 ), - 551 TokenType.RAW_STRING: lambda self, token: self.expression(exp.RawString, this=token.text), - 552 TokenType.SESSION_PARAMETER: lambda self, _: self._parse_session_parameter(), - 553 } - 554 - 555 PLACEHOLDER_PARSERS = { - 556 TokenType.PLACEHOLDER: lambda self: self.expression(exp.Placeholder), - 557 TokenType.PARAMETER: lambda self: self._parse_parameter(), - 558 TokenType.COLON: lambda self: self.expression(exp.Placeholder, this=self._prev.text) - 559 if self._match_set((TokenType.NUMBER, TokenType.VAR)) - 560 else None, - 561 } - 562 - 563 RANGE_PARSERS = { - 564 TokenType.BETWEEN: lambda self, this: self._parse_between(this), - 565 TokenType.GLOB: binary_range_parser(exp.Glob), - 566 TokenType.ILIKE: binary_range_parser(exp.ILike), - 567 TokenType.IN: lambda self, this: self._parse_in(this), - 568 TokenType.IRLIKE: binary_range_parser(exp.RegexpILike), - 569 TokenType.IS: lambda self, this: self._parse_is(this), - 570 TokenType.LIKE: binary_range_parser(exp.Like), - 571 TokenType.OVERLAPS: binary_range_parser(exp.Overlaps), - 572 TokenType.RLIKE: binary_range_parser(exp.RegexpLike), - 573 TokenType.SIMILAR_TO: binary_range_parser(exp.SimilarTo), - 574 } - 575 - 576 PROPERTY_PARSERS: t.Dict[str, t.Callable] = { - 577 "ALGORITHM": lambda self: self._parse_property_assignment(exp.AlgorithmProperty), - 578 "AUTO_INCREMENT": lambda self: self._parse_property_assignment(exp.AutoIncrementProperty), - 579 "BLOCKCOMPRESSION": lambda self: self._parse_blockcompression(), - 580 "CHARACTER SET": lambda self: self._parse_character_set(), - 581 "CHECKSUM": lambda self: self._parse_checksum(), - 582 "CLUSTER": lambda self: self._parse_cluster(), - 583 "COLLATE": lambda self: self._parse_property_assignment(exp.CollateProperty), - 584 "COMMENT": lambda self: self._parse_property_assignment(exp.SchemaCommentProperty), - 585 "DATABLOCKSIZE": lambda self, **kwargs: self._parse_datablocksize(**kwargs), - 586 "DEFINER": lambda self: self._parse_definer(), - 587 "DETERMINISTIC": lambda self: self.expression( - 588 exp.StabilityProperty, this=exp.Literal.string("IMMUTABLE") - 589 ), - 590 "DISTKEY": lambda self: self._parse_distkey(), - 591 "DISTSTYLE": lambda self: self._parse_property_assignment(exp.DistStyleProperty), - 592 "ENGINE": lambda self: self._parse_property_assignment(exp.EngineProperty), - 593 "EXECUTE": lambda self: self._parse_property_assignment(exp.ExecuteAsProperty), - 594 "EXTERNAL": lambda self: self.expression(exp.ExternalProperty), - 595 "FALLBACK": lambda self, **kwargs: self._parse_fallback(**kwargs), - 596 "FORMAT": lambda self: self._parse_property_assignment(exp.FileFormatProperty), - 597 "FREESPACE": lambda self: self._parse_freespace(), - 598 "IMMUTABLE": lambda self: self.expression( - 599 exp.StabilityProperty, this=exp.Literal.string("IMMUTABLE") - 600 ), - 601 "JOURNAL": lambda self, **kwargs: self._parse_journal(**kwargs), - 602 "LANGUAGE": lambda self: self._parse_property_assignment(exp.LanguageProperty), - 603 "LAYOUT": lambda self: self._parse_dict_property(this="LAYOUT"), - 604 "LIFETIME": lambda self: self._parse_dict_range(this="LIFETIME"), - 605 "LIKE": lambda self: self._parse_create_like(), - 606 "LOCATION": lambda self: self._parse_property_assignment(exp.LocationProperty), - 607 "LOCK": lambda self: self._parse_locking(), - 608 "LOCKING": lambda self: self._parse_locking(), - 609 "LOG": lambda self, **kwargs: self._parse_log(**kwargs), - 610 "MATERIALIZED": lambda self: self.expression(exp.MaterializedProperty), - 611 "MERGEBLOCKRATIO": lambda self, **kwargs: self._parse_mergeblockratio(**kwargs), - 612 "MULTISET": lambda self: self.expression(exp.SetProperty, multi=True), - 613 "NO": lambda self: self._parse_no_property(), - 614 "ON": lambda self: self._parse_on_property(), - 615 "ORDER BY": lambda self: self._parse_order(skip_order_token=True), - 616 "PARTITION BY": lambda self: self._parse_partitioned_by(), - 617 "PARTITIONED BY": lambda self: self._parse_partitioned_by(), - 618 "PARTITIONED_BY": lambda self: self._parse_partitioned_by(), - 619 "PRIMARY KEY": lambda self: self._parse_primary_key(in_props=True), - 620 "RANGE": lambda self: self._parse_dict_range(this="RANGE"), - 621 "RETURNS": lambda self: self._parse_returns(), - 622 "ROW": lambda self: self._parse_row(), - 623 "ROW_FORMAT": lambda self: self._parse_property_assignment(exp.RowFormatProperty), - 624 "SET": lambda self: self.expression(exp.SetProperty, multi=False), - 625 "SETTINGS": lambda self: self.expression( - 626 exp.SettingsProperty, expressions=self._parse_csv(self._parse_set_item) - 627 ), - 628 "SORTKEY": lambda self: self._parse_sortkey(), - 629 "SOURCE": lambda self: self._parse_dict_property(this="SOURCE"), - 630 "STABLE": lambda self: self.expression( - 631 exp.StabilityProperty, this=exp.Literal.string("STABLE") + 460 TokenType.PLACEHOLDER: lambda self, this, key: self.expression( + 461 exp.JSONBContains, + 462 this=this, + 463 expression=key, + 464 ), + 465 } + 466 + 467 EXPRESSION_PARSERS = { + 468 exp.Cluster: lambda self: self._parse_sort(exp.Cluster, "CLUSTER", "BY"), + 469 exp.Column: lambda self: self._parse_column(), + 470 exp.Condition: lambda self: self._parse_conjunction(), + 471 exp.DataType: lambda self: self._parse_types(), + 472 exp.Expression: lambda self: self._parse_statement(), + 473 exp.From: lambda self: self._parse_from(), + 474 exp.Group: lambda self: self._parse_group(), + 475 exp.Having: lambda self: self._parse_having(), + 476 exp.Identifier: lambda self: self._parse_id_var(), + 477 exp.Join: lambda self: self._parse_join(), + 478 exp.Lambda: lambda self: self._parse_lambda(), + 479 exp.Lateral: lambda self: self._parse_lateral(), + 480 exp.Limit: lambda self: self._parse_limit(), + 481 exp.Offset: lambda self: self._parse_offset(), + 482 exp.Order: lambda self: self._parse_order(), + 483 exp.Ordered: lambda self: self._parse_ordered(), + 484 exp.Properties: lambda self: self._parse_properties(), + 485 exp.Qualify: lambda self: self._parse_qualify(), + 486 exp.Returning: lambda self: self._parse_returning(), + 487 exp.Sort: lambda self: self._parse_sort(exp.Sort, "SORT", "BY"), + 488 exp.Table: lambda self: self._parse_table_parts(), + 489 exp.TableAlias: lambda self: self._parse_table_alias(), + 490 exp.Where: lambda self: self._parse_where(), + 491 exp.Window: lambda self: self._parse_named_window(), + 492 exp.With: lambda self: self._parse_with(), + 493 "JOIN_TYPE": lambda self: self._parse_join_parts(), + 494 } + 495 + 496 STATEMENT_PARSERS = { + 497 TokenType.ALTER: lambda self: self._parse_alter(), + 498 TokenType.BEGIN: lambda self: self._parse_transaction(), + 499 TokenType.CACHE: lambda self: self._parse_cache(), + 500 TokenType.COMMIT: lambda self: self._parse_commit_or_rollback(), + 501 TokenType.COMMENT: lambda self: self._parse_comment(), + 502 TokenType.CREATE: lambda self: self._parse_create(), + 503 TokenType.DELETE: lambda self: self._parse_delete(), + 504 TokenType.DESC: lambda self: self._parse_describe(), + 505 TokenType.DESCRIBE: lambda self: self._parse_describe(), + 506 TokenType.DROP: lambda self: self._parse_drop(), + 507 TokenType.END: lambda self: self._parse_commit_or_rollback(), + 508 TokenType.FROM: lambda self: exp.select("*").from_( + 509 t.cast(exp.From, self._parse_from(skip_from_token=True)) + 510 ), + 511 TokenType.INSERT: lambda self: self._parse_insert(), + 512 TokenType.LOAD: lambda self: self._parse_load(), + 513 TokenType.MERGE: lambda self: self._parse_merge(), + 514 TokenType.PIVOT: lambda self: self._parse_simplified_pivot(), + 515 TokenType.PRAGMA: lambda self: self.expression(exp.Pragma, this=self._parse_expression()), + 516 TokenType.ROLLBACK: lambda self: self._parse_commit_or_rollback(), + 517 TokenType.SET: lambda self: self._parse_set(), + 518 TokenType.UNCACHE: lambda self: self._parse_uncache(), + 519 TokenType.UPDATE: lambda self: self._parse_update(), + 520 TokenType.USE: lambda self: self.expression( + 521 exp.Use, + 522 kind=self._match_texts(("ROLE", "WAREHOUSE", "DATABASE", "SCHEMA")) + 523 and exp.var(self._prev.text), + 524 this=self._parse_table(schema=False), + 525 ), + 526 } + 527 + 528 UNARY_PARSERS = { + 529 TokenType.PLUS: lambda self: self._parse_unary(), # Unary + is handled as a no-op + 530 TokenType.NOT: lambda self: self.expression(exp.Not, this=self._parse_equality()), + 531 TokenType.TILDA: lambda self: self.expression(exp.BitwiseNot, this=self._parse_unary()), + 532 TokenType.DASH: lambda self: self.expression(exp.Neg, this=self._parse_unary()), + 533 } + 534 + 535 PRIMARY_PARSERS = { + 536 TokenType.STRING: lambda self, token: self.expression( + 537 exp.Literal, this=token.text, is_string=True + 538 ), + 539 TokenType.NUMBER: lambda self, token: self.expression( + 540 exp.Literal, this=token.text, is_string=False + 541 ), + 542 TokenType.STAR: lambda self, _: self.expression( + 543 exp.Star, + 544 **{"except": self._parse_except(), "replace": self._parse_replace()}, + 545 ), + 546 TokenType.NULL: lambda self, _: self.expression(exp.Null), + 547 TokenType.TRUE: lambda self, _: self.expression(exp.Boolean, this=True), + 548 TokenType.FALSE: lambda self, _: self.expression(exp.Boolean, this=False), + 549 TokenType.BIT_STRING: lambda self, token: self.expression(exp.BitString, this=token.text), + 550 TokenType.HEX_STRING: lambda self, token: self.expression(exp.HexString, this=token.text), + 551 TokenType.BYTE_STRING: lambda self, token: self.expression(exp.ByteString, this=token.text), + 552 TokenType.INTRODUCER: lambda self, token: self._parse_introducer(token), + 553 TokenType.NATIONAL_STRING: lambda self, token: self.expression( + 554 exp.National, this=token.text + 555 ), + 556 TokenType.RAW_STRING: lambda self, token: self.expression(exp.RawString, this=token.text), + 557 TokenType.SESSION_PARAMETER: lambda self, _: self._parse_session_parameter(), + 558 } + 559 + 560 PLACEHOLDER_PARSERS = { + 561 TokenType.PLACEHOLDER: lambda self: self.expression(exp.Placeholder), + 562 TokenType.PARAMETER: lambda self: self._parse_parameter(), + 563 TokenType.COLON: lambda self: self.expression(exp.Placeholder, this=self._prev.text) + 564 if self._match_set((TokenType.NUMBER, TokenType.VAR)) + 565 else None, + 566 } + 567 + 568 RANGE_PARSERS = { + 569 TokenType.BETWEEN: lambda self, this: self._parse_between(this), + 570 TokenType.GLOB: binary_range_parser(exp.Glob), + 571 TokenType.ILIKE: binary_range_parser(exp.ILike), + 572 TokenType.IN: lambda self, this: self._parse_in(this), + 573 TokenType.IRLIKE: binary_range_parser(exp.RegexpILike), + 574 TokenType.IS: lambda self, this: self._parse_is(this), + 575 TokenType.LIKE: binary_range_parser(exp.Like), + 576 TokenType.OVERLAPS: binary_range_parser(exp.Overlaps), + 577 TokenType.RLIKE: binary_range_parser(exp.RegexpLike), + 578 TokenType.SIMILAR_TO: binary_range_parser(exp.SimilarTo), + 579 } + 580 + 581 PROPERTY_PARSERS: t.Dict[str, t.Callable] = { + 582 "ALGORITHM": lambda self: self._parse_property_assignment(exp.AlgorithmProperty), + 583 "AUTO_INCREMENT": lambda self: self._parse_property_assignment(exp.AutoIncrementProperty), + 584 "BLOCKCOMPRESSION": lambda self: self._parse_blockcompression(), + 585 "CHARACTER SET": lambda self: self._parse_character_set(), + 586 "CHECKSUM": lambda self: self._parse_checksum(), + 587 "CLUSTER": lambda self: self._parse_cluster(), + 588 "COLLATE": lambda self: self._parse_property_assignment(exp.CollateProperty), + 589 "COMMENT": lambda self: self._parse_property_assignment(exp.SchemaCommentProperty), + 590 "DATABLOCKSIZE": lambda self, **kwargs: self._parse_datablocksize(**kwargs), + 591 "DEFINER": lambda self: self._parse_definer(), + 592 "DETERMINISTIC": lambda self: self.expression( + 593 exp.StabilityProperty, this=exp.Literal.string("IMMUTABLE") + 594 ), + 595 "DISTKEY": lambda self: self._parse_distkey(), + 596 "DISTSTYLE": lambda self: self._parse_property_assignment(exp.DistStyleProperty), + 597 "ENGINE": lambda self: self._parse_property_assignment(exp.EngineProperty), + 598 "EXECUTE": lambda self: self._parse_property_assignment(exp.ExecuteAsProperty), + 599 "EXTERNAL": lambda self: self.expression(exp.ExternalProperty), + 600 "FALLBACK": lambda self, **kwargs: self._parse_fallback(**kwargs), + 601 "FORMAT": lambda self: self._parse_property_assignment(exp.FileFormatProperty), + 602 "FREESPACE": lambda self: self._parse_freespace(), + 603 "IMMUTABLE": lambda self: self.expression( + 604 exp.StabilityProperty, this=exp.Literal.string("IMMUTABLE") + 605 ), + 606 "JOURNAL": lambda self, **kwargs: self._parse_journal(**kwargs), + 607 "LANGUAGE": lambda self: self._parse_property_assignment(exp.LanguageProperty), + 608 "LAYOUT": lambda self: self._parse_dict_property(this="LAYOUT"), + 609 "LIFETIME": lambda self: self._parse_dict_range(this="LIFETIME"), + 610 "LIKE": lambda self: self._parse_create_like(), + 611 "LOCATION": lambda self: self._parse_property_assignment(exp.LocationProperty), + 612 "LOCK": lambda self: self._parse_locking(), + 613 "LOCKING": lambda self: self._parse_locking(), + 614 "LOG": lambda self, **kwargs: self._parse_log(**kwargs), + 615 "MATERIALIZED": lambda self: self.expression(exp.MaterializedProperty), + 616 "MERGEBLOCKRATIO": lambda self, **kwargs: self._parse_mergeblockratio(**kwargs), + 617 "MULTISET": lambda self: self.expression(exp.SetProperty, multi=True), + 618 "NO": lambda self: self._parse_no_property(), + 619 "ON": lambda self: self._parse_on_property(), + 620 "ORDER BY": lambda self: self._parse_order(skip_order_token=True), + 621 "PARTITION BY": lambda self: self._parse_partitioned_by(), + 622 "PARTITIONED BY": lambda self: self._parse_partitioned_by(), + 623 "PARTITIONED_BY": lambda self: self._parse_partitioned_by(), + 624 "PRIMARY KEY": lambda self: self._parse_primary_key(in_props=True), + 625 "RANGE": lambda self: self._parse_dict_range(this="RANGE"), + 626 "RETURNS": lambda self: self._parse_returns(), + 627 "ROW": lambda self: self._parse_row(), + 628 "ROW_FORMAT": lambda self: self._parse_property_assignment(exp.RowFormatProperty), + 629 "SET": lambda self: self.expression(exp.SetProperty, multi=False), + 630 "SETTINGS": lambda self: self.expression( + 631 exp.SettingsProperty, expressions=self._parse_csv(self._parse_set_item) 632 ), - 633 "STORED": lambda self: self._parse_stored(), - 634 "TBLPROPERTIES": lambda self: self._parse_wrapped_csv(self._parse_property), - 635 "TEMP": lambda self: self.expression(exp.TemporaryProperty), - 636 "TEMPORARY": lambda self: self.expression(exp.TemporaryProperty), - 637 "TRANSIENT": lambda self: self.expression(exp.TransientProperty), - 638 "TTL": lambda self: self._parse_ttl(), - 639 "USING": lambda self: self._parse_property_assignment(exp.FileFormatProperty), - 640 "VOLATILE": lambda self: self._parse_volatile_property(), - 641 "WITH": lambda self: self._parse_with_property(), - 642 } - 643 - 644 CONSTRAINT_PARSERS = { - 645 "AUTOINCREMENT": lambda self: self._parse_auto_increment(), - 646 "AUTO_INCREMENT": lambda self: self._parse_auto_increment(), - 647 "CASESPECIFIC": lambda self: self.expression(exp.CaseSpecificColumnConstraint, not_=False), - 648 "CHARACTER SET": lambda self: self.expression( - 649 exp.CharacterSetColumnConstraint, this=self._parse_var_or_string() - 650 ), - 651 "CHECK": lambda self: self.expression( - 652 exp.CheckColumnConstraint, this=self._parse_wrapped(self._parse_conjunction) - 653 ), - 654 "COLLATE": lambda self: self.expression( - 655 exp.CollateColumnConstraint, this=self._parse_var() + 633 "SORTKEY": lambda self: self._parse_sortkey(), + 634 "SOURCE": lambda self: self._parse_dict_property(this="SOURCE"), + 635 "STABLE": lambda self: self.expression( + 636 exp.StabilityProperty, this=exp.Literal.string("STABLE") + 637 ), + 638 "STORED": lambda self: self._parse_stored(), + 639 "TBLPROPERTIES": lambda self: self._parse_wrapped_csv(self._parse_property), + 640 "TEMP": lambda self: self.expression(exp.TemporaryProperty), + 641 "TEMPORARY": lambda self: self.expression(exp.TemporaryProperty), + 642 "TO": lambda self: self._parse_to_table(), + 643 "TRANSIENT": lambda self: self.expression(exp.TransientProperty), + 644 "TTL": lambda self: self._parse_ttl(), + 645 "USING": lambda self: self._parse_property_assignment(exp.FileFormatProperty), + 646 "VOLATILE": lambda self: self._parse_volatile_property(), + 647 "WITH": lambda self: self._parse_with_property(), + 648 } + 649 + 650 CONSTRAINT_PARSERS = { + 651 "AUTOINCREMENT": lambda self: self._parse_auto_increment(), + 652 "AUTO_INCREMENT": lambda self: self._parse_auto_increment(), + 653 "CASESPECIFIC": lambda self: self.expression(exp.CaseSpecificColumnConstraint, not_=False), + 654 "CHARACTER SET": lambda self: self.expression( + 655 exp.CharacterSetColumnConstraint, this=self._parse_var_or_string() 656 ), - 657 "COMMENT": lambda self: self.expression( - 658 exp.CommentColumnConstraint, this=self._parse_string() + 657 "CHECK": lambda self: self.expression( + 658 exp.CheckColumnConstraint, this=self._parse_wrapped(self._parse_conjunction) 659 ), - 660 "COMPRESS": lambda self: self._parse_compress(), - 661 "DEFAULT": lambda self: self.expression( - 662 exp.DefaultColumnConstraint, this=self._parse_bitwise() - 663 ), - 664 "ENCODE": lambda self: self.expression(exp.EncodeColumnConstraint, this=self._parse_var()), - 665 "FOREIGN KEY": lambda self: self._parse_foreign_key(), - 666 "FORMAT": lambda self: self.expression( - 667 exp.DateFormatColumnConstraint, this=self._parse_var_or_string() - 668 ), - 669 "GENERATED": lambda self: self._parse_generated_as_identity(), - 670 "IDENTITY": lambda self: self._parse_auto_increment(), - 671 "INLINE": lambda self: self._parse_inline(), - 672 "LIKE": lambda self: self._parse_create_like(), - 673 "NOT": lambda self: self._parse_not_constraint(), - 674 "NULL": lambda self: self.expression(exp.NotNullColumnConstraint, allow_null=True), - 675 "ON": lambda self: self._match(TokenType.UPDATE) - 676 and self.expression(exp.OnUpdateColumnConstraint, this=self._parse_function()), - 677 "PATH": lambda self: self.expression(exp.PathColumnConstraint, this=self._parse_string()), - 678 "PRIMARY KEY": lambda self: self._parse_primary_key(), - 679 "REFERENCES": lambda self: self._parse_references(match=False), - 680 "TITLE": lambda self: self.expression( - 681 exp.TitleColumnConstraint, this=self._parse_var_or_string() - 682 ), - 683 "TTL": lambda self: self.expression(exp.MergeTreeTTL, expressions=[self._parse_bitwise()]), - 684 "UNIQUE": lambda self: self._parse_unique(), - 685 "UPPERCASE": lambda self: self.expression(exp.UppercaseColumnConstraint), - 686 } - 687 - 688 ALTER_PARSERS = { - 689 "ADD": lambda self: self._parse_alter_table_add(), - 690 "ALTER": lambda self: self._parse_alter_table_alter(), - 691 "DELETE": lambda self: self.expression(exp.Delete, where=self._parse_where()), - 692 "DROP": lambda self: self._parse_alter_table_drop(), - 693 "RENAME": lambda self: self._parse_alter_table_rename(), - 694 } - 695 - 696 SCHEMA_UNNAMED_CONSTRAINTS = {"CHECK", "FOREIGN KEY", "LIKE", "PRIMARY KEY", "UNIQUE"} - 697 - 698 NO_PAREN_FUNCTION_PARSERS = { - 699 TokenType.ANY: lambda self: self.expression(exp.Any, this=self._parse_bitwise()), - 700 TokenType.CASE: lambda self: self._parse_case(), - 701 TokenType.IF: lambda self: self._parse_if(), - 702 TokenType.NEXT_VALUE_FOR: lambda self: self.expression( - 703 exp.NextValueFor, - 704 this=self._parse_column(), - 705 order=self._match(TokenType.OVER) and self._parse_wrapped(self._parse_order), - 706 ), - 707 } - 708 - 709 FUNCTIONS_WITH_ALIASED_ARGS = {"STRUCT"} - 710 - 711 FUNCTION_PARSERS: t.Dict[str, t.Callable] = { - 712 "CAST": lambda self: self._parse_cast(self.STRICT_CAST), - 713 "CONVERT": lambda self: self._parse_convert(self.STRICT_CAST), - 714 "DECODE": lambda self: self._parse_decode(), - 715 "EXTRACT": lambda self: self._parse_extract(), - 716 "JSON_OBJECT": lambda self: self._parse_json_object(), - 717 "LOG": lambda self: self._parse_logarithm(), - 718 "MATCH": lambda self: self._parse_match_against(), - 719 "OPENJSON": lambda self: self._parse_open_json(), - 720 "POSITION": lambda self: self._parse_position(), - 721 "SAFE_CAST": lambda self: self._parse_cast(False), - 722 "STRING_AGG": lambda self: self._parse_string_agg(), - 723 "SUBSTRING": lambda self: self._parse_substring(), - 724 "TRIM": lambda self: self._parse_trim(), - 725 "TRY_CAST": lambda self: self._parse_cast(False), - 726 "TRY_CONVERT": lambda self: self._parse_convert(False), - 727 } - 728 - 729 QUERY_MODIFIER_PARSERS = { - 730 "joins": lambda self: list(iter(self._parse_join, None)), - 731 "laterals": lambda self: list(iter(self._parse_lateral, None)), - 732 "match": lambda self: self._parse_match_recognize(), - 733 "where": lambda self: self._parse_where(), - 734 "group": lambda self: self._parse_group(), - 735 "having": lambda self: self._parse_having(), - 736 "qualify": lambda self: self._parse_qualify(), - 737 "windows": lambda self: self._parse_window_clause(), - 738 "order": lambda self: self._parse_order(), - 739 "limit": lambda self: self._parse_limit(), - 740 "offset": lambda self: self._parse_offset(), - 741 "locks": lambda self: self._parse_locks(), - 742 "sample": lambda self: self._parse_table_sample(as_modifier=True), - 743 } - 744 - 745 SET_PARSERS = { - 746 "GLOBAL": lambda self: self._parse_set_item_assignment("GLOBAL"), - 747 "LOCAL": lambda self: self._parse_set_item_assignment("LOCAL"), - 748 "SESSION": lambda self: self._parse_set_item_assignment("SESSION"), - 749 "TRANSACTION": lambda self: self._parse_set_transaction(), + 660 "COLLATE": lambda self: self.expression( + 661 exp.CollateColumnConstraint, this=self._parse_var() + 662 ), + 663 "COMMENT": lambda self: self.expression( + 664 exp.CommentColumnConstraint, this=self._parse_string() + 665 ), + 666 "COMPRESS": lambda self: self._parse_compress(), + 667 "DEFAULT": lambda self: self.expression( + 668 exp.DefaultColumnConstraint, this=self._parse_bitwise() + 669 ), + 670 "ENCODE": lambda self: self.expression(exp.EncodeColumnConstraint, this=self._parse_var()), + 671 "FOREIGN KEY": lambda self: self._parse_foreign_key(), + 672 "FORMAT": lambda self: self.expression( + 673 exp.DateFormatColumnConstraint, this=self._parse_var_or_string() + 674 ), + 675 "GENERATED": lambda self: self._parse_generated_as_identity(), + 676 "IDENTITY": lambda self: self._parse_auto_increment(), + 677 "INLINE": lambda self: self._parse_inline(), + 678 "LIKE": lambda self: self._parse_create_like(), + 679 "NOT": lambda self: self._parse_not_constraint(), + 680 "NULL": lambda self: self.expression(exp.NotNullColumnConstraint, allow_null=True), + 681 "ON": lambda self: self._match(TokenType.UPDATE) + 682 and self.expression(exp.OnUpdateColumnConstraint, this=self._parse_function()), + 683 "PATH": lambda self: self.expression(exp.PathColumnConstraint, this=self._parse_string()), + 684 "PRIMARY KEY": lambda self: self._parse_primary_key(), + 685 "REFERENCES": lambda self: self._parse_references(match=False), + 686 "TITLE": lambda self: self.expression( + 687 exp.TitleColumnConstraint, this=self._parse_var_or_string() + 688 ), + 689 "TTL": lambda self: self.expression(exp.MergeTreeTTL, expressions=[self._parse_bitwise()]), + 690 "UNIQUE": lambda self: self._parse_unique(), + 691 "UPPERCASE": lambda self: self.expression(exp.UppercaseColumnConstraint), + 692 } + 693 + 694 ALTER_PARSERS = { + 695 "ADD": lambda self: self._parse_alter_table_add(), + 696 "ALTER": lambda self: self._parse_alter_table_alter(), + 697 "DELETE": lambda self: self.expression(exp.Delete, where=self._parse_where()), + 698 "DROP": lambda self: self._parse_alter_table_drop(), + 699 "RENAME": lambda self: self._parse_alter_table_rename(), + 700 } + 701 + 702 SCHEMA_UNNAMED_CONSTRAINTS = {"CHECK", "FOREIGN KEY", "LIKE", "PRIMARY KEY", "UNIQUE"} + 703 + 704 NO_PAREN_FUNCTION_PARSERS = { + 705 TokenType.ANY: lambda self: self.expression(exp.Any, this=self._parse_bitwise()), + 706 TokenType.CASE: lambda self: self._parse_case(), + 707 TokenType.IF: lambda self: self._parse_if(), + 708 TokenType.NEXT_VALUE_FOR: lambda self: self.expression( + 709 exp.NextValueFor, + 710 this=self._parse_column(), + 711 order=self._match(TokenType.OVER) and self._parse_wrapped(self._parse_order), + 712 ), + 713 } + 714 + 715 FUNCTIONS_WITH_ALIASED_ARGS = {"STRUCT"} + 716 + 717 FUNCTION_PARSERS: t.Dict[str, t.Callable] = { + 718 "CAST": lambda self: self._parse_cast(self.STRICT_CAST), + 719 "CONCAT": lambda self: self._parse_concat(), + 720 "CONVERT": lambda self: self._parse_convert(self.STRICT_CAST), + 721 "DECODE": lambda self: self._parse_decode(), + 722 "EXTRACT": lambda self: self._parse_extract(), + 723 "JSON_OBJECT": lambda self: self._parse_json_object(), + 724 "LOG": lambda self: self._parse_logarithm(), + 725 "MATCH": lambda self: self._parse_match_against(), + 726 "OPENJSON": lambda self: self._parse_open_json(), + 727 "POSITION": lambda self: self._parse_position(), + 728 "SAFE_CAST": lambda self: self._parse_cast(False), + 729 "STRING_AGG": lambda self: self._parse_string_agg(), + 730 "SUBSTRING": lambda self: self._parse_substring(), + 731 "TRIM": lambda self: self._parse_trim(), + 732 "TRY_CAST": lambda self: self._parse_cast(False), + 733 "TRY_CONVERT": lambda self: self._parse_convert(False), + 734 } + 735 + 736 QUERY_MODIFIER_PARSERS = { + 737 "joins": lambda self: list(iter(self._parse_join, None)), + 738 "laterals": lambda self: list(iter(self._parse_lateral, None)), + 739 "match": lambda self: self._parse_match_recognize(), + 740 "where": lambda self: self._parse_where(), + 741 "group": lambda self: self._parse_group(), + 742 "having": lambda self: self._parse_having(), + 743 "qualify": lambda self: self._parse_qualify(), + 744 "windows": lambda self: self._parse_window_clause(), + 745 "order": lambda self: self._parse_order(), + 746 "limit": lambda self: self._parse_limit(), + 747 "offset": lambda self: self._parse_offset(), + 748 "locks": lambda self: self._parse_locks(), + 749 "sample": lambda self: self._parse_table_sample(as_modifier=True), 750 } 751 - 752 SHOW_PARSERS: t.Dict[str, t.Callable] = {} - 753 - 754 TYPE_LITERAL_PARSERS: t.Dict[exp.DataType.Type, t.Callable] = {} - 755 - 756 MODIFIABLES = (exp.Subquery, exp.Subqueryable, exp.Table) - 757 - 758 TRANSACTION_KIND = {"DEFERRED", "IMMEDIATE", "EXCLUSIVE"} - 759 - 760 TRANSACTION_CHARACTERISTICS = { - 761 "ISOLATION LEVEL REPEATABLE READ", - 762 "ISOLATION LEVEL READ COMMITTED", - 763 "ISOLATION LEVEL READ UNCOMMITTED", - 764 "ISOLATION LEVEL SERIALIZABLE", - 765 "READ WRITE", - 766 "READ ONLY", - 767 } + 752 SET_PARSERS = { + 753 "GLOBAL": lambda self: self._parse_set_item_assignment("GLOBAL"), + 754 "LOCAL": lambda self: self._parse_set_item_assignment("LOCAL"), + 755 "SESSION": lambda self: self._parse_set_item_assignment("SESSION"), + 756 "TRANSACTION": lambda self: self._parse_set_transaction(), + 757 } + 758 + 759 SHOW_PARSERS: t.Dict[str, t.Callable] = {} + 760 + 761 TYPE_LITERAL_PARSERS: t.Dict[exp.DataType.Type, t.Callable] = {} + 762 + 763 MODIFIABLES = (exp.Subquery, exp.Subqueryable, exp.Table) + 764 + 765 DDL_SELECT_TOKENS = {TokenType.SELECT, TokenType.WITH, TokenType.L_PAREN} + 766 + 767 PRE_VOLATILE_TOKENS = {TokenType.CREATE, TokenType.REPLACE, TokenType.UNIQUE} 768 - 769 INSERT_ALTERNATIVES = {"ABORT", "FAIL", "IGNORE", "REPLACE", "ROLLBACK"} - 770 - 771 CLONE_KINDS = {"TIMESTAMP", "OFFSET", "STATEMENT"} - 772 - 773 WINDOW_ALIAS_TOKENS = ID_VAR_TOKENS - {TokenType.ROWS} - 774 WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER} - 775 WINDOW_SIDES = {"FOLLOWING", "PRECEDING"} - 776 - 777 ADD_CONSTRAINT_TOKENS = {TokenType.CONSTRAINT, TokenType.PRIMARY_KEY, TokenType.FOREIGN_KEY} + 769 TRANSACTION_KIND = {"DEFERRED", "IMMEDIATE", "EXCLUSIVE"} + 770 TRANSACTION_CHARACTERISTICS = { + 771 "ISOLATION LEVEL REPEATABLE READ", + 772 "ISOLATION LEVEL READ COMMITTED", + 773 "ISOLATION LEVEL READ UNCOMMITTED", + 774 "ISOLATION LEVEL SERIALIZABLE", + 775 "READ WRITE", + 776 "READ ONLY", + 777 } 778 - 779 STRICT_CAST = True + 779 INSERT_ALTERNATIVES = {"ABORT", "FAIL", "IGNORE", "REPLACE", "ROLLBACK"} 780 - 781 CONVERT_TYPE_FIRST = False + 781 CLONE_KINDS = {"TIMESTAMP", "OFFSET", "STATEMENT"} 782 - 783 PREFIXED_PIVOT_COLUMNS = False - 784 IDENTIFY_PIVOT_STRINGS = False - 785 - 786 LOG_BASE_FIRST = True - 787 LOG_DEFAULTS_TO_LN = False + 783 WINDOW_ALIAS_TOKENS = ID_VAR_TOKENS - {TokenType.ROWS} + 784 WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER} + 785 WINDOW_SIDES = {"FOLLOWING", "PRECEDING"} + 786 + 787 ADD_CONSTRAINT_TOKENS = {TokenType.CONSTRAINT, TokenType.PRIMARY_KEY, TokenType.FOREIGN_KEY} 788 - 789 __slots__ = ( - 790 "error_level", - 791 "error_message_context", - 792 "sql", - 793 "errors", - 794 "index_offset", - 795 "unnest_column_only", - 796 "alias_post_tablesample", - 797 "max_errors", - 798 "null_ordering", - 799 "_tokens", - 800 "_index", - 801 "_curr", - 802 "_next", - 803 "_prev", - 804 "_prev_comments", - 805 "_show_trie", - 806 "_set_trie", - 807 ) - 808 - 809 def __init__( - 810 self, - 811 error_level: t.Optional[ErrorLevel] = None, - 812 error_message_context: int = 100, - 813 index_offset: int = 0, - 814 unnest_column_only: bool = False, - 815 alias_post_tablesample: bool = False, - 816 max_errors: int = 3, - 817 null_ordering: t.Optional[str] = None, - 818 ): - 819 self.error_level = error_level or ErrorLevel.IMMEDIATE - 820 self.error_message_context = error_message_context - 821 self.index_offset = index_offset - 822 self.unnest_column_only = unnest_column_only - 823 self.alias_post_tablesample = alias_post_tablesample - 824 self.max_errors = max_errors - 825 self.null_ordering = null_ordering - 826 self.reset() + 789 STRICT_CAST = True + 790 + 791 CONCAT_NULL_OUTPUTS_STRING = False # A NULL arg in CONCAT yields NULL by default + 792 + 793 CONVERT_TYPE_FIRST = False + 794 + 795 PREFIXED_PIVOT_COLUMNS = False + 796 IDENTIFY_PIVOT_STRINGS = False + 797 + 798 LOG_BASE_FIRST = True + 799 LOG_DEFAULTS_TO_LN = False + 800 + 801 __slots__ = ( + 802 "error_level", + 803 "error_message_context", + 804 "max_errors", + 805 "sql", + 806 "errors", + 807 "_tokens", + 808 "_index", + 809 "_curr", + 810 "_next", + 811 "_prev", + 812 "_prev_comments", + 813 ) + 814 + 815 # Autofilled + 816 INDEX_OFFSET: int = 0 + 817 UNNEST_COLUMN_ONLY: bool = False + 818 ALIAS_POST_TABLESAMPLE: bool = False + 819 STRICT_STRING_CONCAT = False + 820 NULL_ORDERING: str = "nulls_are_small" + 821 SHOW_TRIE: t.Dict = {} + 822 SET_TRIE: t.Dict = {} + 823 FORMAT_MAPPING: t.Dict[str, str] = {} + 824 FORMAT_TRIE: t.Dict = {} + 825 TIME_MAPPING: t.Dict[str, str] = {} + 826 TIME_TRIE: t.Dict = {} 827 - 828 def reset(self): - 829 self.sql = "" - 830 self.errors = [] - 831 self._tokens = [] - 832 self._index = 0 - 833 self._curr = None - 834 self._next = None - 835 self._prev = None - 836 self._prev_comments = None - 837 - 838 def parse( - 839 self, raw_tokens: t.List[Token], sql: t.Optional[str] = None - 840 ) -> t.List[t.Optional[exp.Expression]]: - 841 """ - 842 Parses a list of tokens and returns a list of syntax trees, one tree - 843 per parsed SQL statement. - 844 - 845 Args: - 846 raw_tokens: the list of tokens. - 847 sql: the original SQL string, used to produce helpful debug messages. + 828 def __init__( + 829 self, + 830 error_level: t.Optional[ErrorLevel] = None, + 831 error_message_context: int = 100, + 832 max_errors: int = 3, + 833 ): + 834 self.error_level = error_level or ErrorLevel.IMMEDIATE + 835 self.error_message_context = error_message_context + 836 self.max_errors = max_errors + 837 self.reset() + 838 + 839 def reset(self): + 840 self.sql = "" + 841 self.errors = [] + 842 self._tokens = [] + 843 self._index = 0 + 844 self._curr = None + 845 self._next = None + 846 self._prev = None + 847 self._prev_comments = None 848 - 849 Returns: - 850 The list of syntax trees. - 851 """ - 852 return self._parse( - 853 parse_method=self.__class__._parse_statement, raw_tokens=raw_tokens, sql=sql - 854 ) + 849 def parse( + 850 self, raw_tokens: t.List[Token], sql: t.Optional[str] = None + 851 ) -> t.List[t.Optional[exp.Expression]]: + 852 """ + 853 Parses a list of tokens and returns a list of syntax trees, one tree + 854 per parsed SQL statement. 855 - 856 def parse_into( - 857 self, - 858 expression_types: exp.IntoType, - 859 raw_tokens: t.List[Token], - 860 sql: t.Optional[str] = None, - 861 ) -> t.List[t.Optional[exp.Expression]]: - 862 """ - 863 Parses a list of tokens into a given Expression type. If a collection of Expression - 864 types is given instead, this method will try to parse the token list into each one - 865 of them, stopping at the first for which the parsing succeeds. + 856 Args: + 857 raw_tokens: The list of tokens. + 858 sql: The original SQL string, used to produce helpful debug messages. + 859 + 860 Returns: + 861 The list of the produced syntax trees. + 862 """ + 863 return self._parse( + 864 parse_method=self.__class__._parse_statement, raw_tokens=raw_tokens, sql=sql + 865 ) 866 - 867 Args: - 868 expression_types: the expression type(s) to try and parse the token list into. - 869 raw_tokens: the list of tokens. - 870 sql: the original SQL string, used to produce helpful debug messages. - 871 - 872 Returns: - 873 The target Expression. - 874 """ - 875 errors = [] - 876 for expression_type in ensure_collection(expression_types): - 877 parser = self.EXPRESSION_PARSERS.get(expression_type) - 878 if not parser: - 879 raise TypeError(f"No parser registered for {expression_type}") - 880 try: - 881 return self._parse(parser, raw_tokens, sql) - 882 except ParseError as e: - 883 e.errors[0]["into_expression"] = expression_type - 884 errors.append(e) - 885 raise ParseError( - 886 f"Failed to parse '{sql or raw_tokens}' into {expression_types}", - 887 errors=merge_errors(errors), - 888 ) from errors[-1] - 889 - 890 def _parse( - 891 self, - 892 parse_method: t.Callable[[Parser], t.Optional[exp.Expression]], - 893 raw_tokens: t.List[Token], - 894 sql: t.Optional[str] = None, - 895 ) -> t.List[t.Optional[exp.Expression]]: - 896 self.reset() - 897 self.sql = sql or "" - 898 total = len(raw_tokens) - 899 chunks: t.List[t.List[Token]] = [[]] - 900 - 901 for i, token in enumerate(raw_tokens): - 902 if token.token_type == TokenType.SEMICOLON: - 903 if i < total - 1: - 904 chunks.append([]) - 905 else: - 906 chunks[-1].append(token) - 907 - 908 expressions = [] - 909 - 910 for tokens in chunks: - 911 self._index = -1 - 912 self._tokens = tokens - 913 self._advance() + 867 def parse_into( + 868 self, + 869 expression_types: exp.IntoType, + 870 raw_tokens: t.List[Token], + 871 sql: t.Optional[str] = None, + 872 ) -> t.List[t.Optional[exp.Expression]]: + 873 """ + 874 Parses a list of tokens into a given Expression type. If a collection of Expression + 875 types is given instead, this method will try to parse the token list into each one + 876 of them, stopping at the first for which the parsing succeeds. + 877 + 878 Args: + 879 expression_types: The expression type(s) to try and parse the token list into. + 880 raw_tokens: The list of tokens. + 881 sql: The original SQL string, used to produce helpful debug messages. + 882 + 883 Returns: + 884 The target Expression. + 885 """ + 886 errors = [] + 887 for expression_type in ensure_list(expression_types): + 888 parser = self.EXPRESSION_PARSERS.get(expression_type) + 889 if not parser: + 890 raise TypeError(f"No parser registered for {expression_type}") + 891 + 892 try: + 893 return self._parse(parser, raw_tokens, sql) + 894 except ParseError as e: + 895 e.errors[0]["into_expression"] = expression_type + 896 errors.append(e) + 897 + 898 raise ParseError( + 899 f"Failed to parse '{sql or raw_tokens}' into {expression_types}", + 900 errors=merge_errors(errors), + 901 ) from errors[-1] + 902 + 903 def _parse( + 904 self, + 905 parse_method: t.Callable[[Parser], t.Optional[exp.Expression]], + 906 raw_tokens: t.List[Token], + 907 sql: t.Optional[str] = None, + 908 ) -> t.List[t.Optional[exp.Expression]]: + 909 self.reset() + 910 self.sql = sql or "" + 911 + 912 total = len(raw_tokens) + 913 chunks: t.List[t.List[Token]] = [[]] 914 - 915 expressions.append(parse_method(self)) - 916 - 917 if self._index < len(self._tokens): - 918 self.raise_error("Invalid expression / Unexpected token") - 919 - 920 self.check_errors() + 915 for i, token in enumerate(raw_tokens): + 916 if token.token_type == TokenType.SEMICOLON: + 917 if i < total - 1: + 918 chunks.append([]) + 919 else: + 920 chunks[-1].append(token) 921 - 922 return expressions + 922 expressions = [] 923 - 924 def check_errors(self) -> None: - 925 """ - 926 Logs or raises any found errors, depending on the chosen error level setting. - 927 """ - 928 if self.error_level == ErrorLevel.WARN: - 929 for error in self.errors: - 930 logger.error(str(error)) - 931 elif self.error_level == ErrorLevel.RAISE and self.errors: - 932 raise ParseError( - 933 concat_messages(self.errors, self.max_errors), - 934 errors=merge_errors(self.errors), - 935 ) - 936 - 937 def raise_error(self, message: str, token: t.Optional[Token] = None) -> None: - 938 """ - 939 Appends an error in the list of recorded errors or raises it, depending on the chosen - 940 error level setting. - 941 """ - 942 token = token or self._curr or self._prev or Token.string("") - 943 start = token.start - 944 end = token.end + 1 - 945 start_context = self.sql[max(start - self.error_message_context, 0) : start] - 946 highlight = self.sql[start:end] - 947 end_context = self.sql[end : end + self.error_message_context] + 924 for tokens in chunks: + 925 self._index = -1 + 926 self._tokens = tokens + 927 self._advance() + 928 + 929 expressions.append(parse_method(self)) + 930 + 931 if self._index < len(self._tokens): + 932 self.raise_error("Invalid expression / Unexpected token") + 933 + 934 self.check_errors() + 935 + 936 return expressions + 937 + 938 def check_errors(self) -> None: + 939 """Logs or raises any found errors, depending on the chosen error level setting.""" + 940 if self.error_level == ErrorLevel.WARN: + 941 for error in self.errors: + 942 logger.error(str(error)) + 943 elif self.error_level == ErrorLevel.RAISE and self.errors: + 944 raise ParseError( + 945 concat_messages(self.errors, self.max_errors), + 946 errors=merge_errors(self.errors), + 947 ) 948 - 949 error = ParseError.new( - 950 f"{message}. Line {token.line}, Col: {token.col}.\n" - 951 f" {start_context}\033[4m{highlight}\033[0m{end_context}", - 952 description=message, - 953 line=token.line, - 954 col=token.col, - 955 start_context=start_context, - 956 highlight=highlight, - 957 end_context=end_context, - 958 ) - 959 - 960 if self.error_level == ErrorLevel.IMMEDIATE: - 961 raise error - 962 - 963 self.errors.append(error) - 964 - 965 def expression( - 966 self, exp_class: t.Type[E], comments: t.Optional[t.List[str]] = None, **kwargs - 967 ) -> E: - 968 """ - 969 Creates a new, validated Expression. - 970 - 971 Args: - 972 exp_class: the expression class to instantiate. - 973 comments: an optional list of comments to attach to the expression. - 974 kwargs: the arguments to set for the expression along with their respective values. - 975 - 976 Returns: - 977 The target expression. - 978 """ - 979 instance = exp_class(**kwargs) - 980 instance.add_comments(comments) if comments else self._add_comments(instance) - 981 self.validate_expression(instance) - 982 return instance - 983 - 984 def _add_comments(self, expression: t.Optional[exp.Expression]) -> None: - 985 if expression and self._prev_comments: - 986 expression.add_comments(self._prev_comments) - 987 self._prev_comments = None - 988 - 989 def validate_expression( - 990 self, expression: exp.Expression, args: t.Optional[t.List] = None - 991 ) -> None: - 992 """ - 993 Validates an already instantiated expression, making sure that all its mandatory arguments - 994 are set. - 995 - 996 Args: - 997 expression: the expression to validate. - 998 args: an optional list of items that was used to instantiate the expression, if it's a Func. - 999 """ -1000 if self.error_level == ErrorLevel.IGNORE: -1001 return -1002 -1003 for error_message in expression.error_messages(args): -1004 self.raise_error(error_message) -1005 -1006 def _find_sql(self, start: Token, end: Token) -> str: -1007 return self.sql[start.start : end.end + 1] -1008 -1009 def _advance(self, times: int = 1) -> None: -1010 self._index += times -1011 self._curr = seq_get(self._tokens, self._index) -1012 self._next = seq_get(self._tokens, self._index + 1) -1013 if self._index > 0: -1014 self._prev = self._tokens[self._index - 1] -1015 self._prev_comments = self._prev.comments -1016 else: -1017 self._prev = None -1018 self._prev_comments = None + 949 def raise_error(self, message: str, token: t.Optional[Token] = None) -> None: + 950 """ + 951 Appends an error in the list of recorded errors or raises it, depending on the chosen + 952 error level setting. + 953 """ + 954 token = token or self._curr or self._prev or Token.string("") + 955 start = token.start + 956 end = token.end + 1 + 957 start_context = self.sql[max(start - self.error_message_context, 0) : start] + 958 highlight = self.sql[start:end] + 959 end_context = self.sql[end : end + self.error_message_context] + 960 + 961 error = ParseError.new( + 962 f"{message}. Line {token.line}, Col: {token.col}.\n" + 963 f" {start_context}\033[4m{highlight}\033[0m{end_context}", + 964 description=message, + 965 line=token.line, + 966 col=token.col, + 967 start_context=start_context, + 968 highlight=highlight, + 969 end_context=end_context, + 970 ) + 971 + 972 if self.error_level == ErrorLevel.IMMEDIATE: + 973 raise error + 974 + 975 self.errors.append(error) + 976 + 977 def expression( + 978 self, exp_class: t.Type[E], comments: t.Optional[t.List[str]] = None, **kwargs + 979 ) -> E: + 980 """ + 981 Creates a new, validated Expression. + 982 + 983 Args: + 984 exp_class: The expression class to instantiate. + 985 comments: An optional list of comments to attach to the expression. + 986 kwargs: The arguments to set for the expression along with their respective values. + 987 + 988 Returns: + 989 The target expression. + 990 """ + 991 instance = exp_class(**kwargs) + 992 instance.add_comments(comments) if comments else self._add_comments(instance) + 993 return self.validate_expression(instance) + 994 + 995 def _add_comments(self, expression: t.Optional[exp.Expression]) -> None: + 996 if expression and self._prev_comments: + 997 expression.add_comments(self._prev_comments) + 998 self._prev_comments = None + 999 +1000 def validate_expression(self, expression: E, args: t.Optional[t.List] = None) -> E: +1001 """ +1002 Validates an Expression, making sure that all its mandatory arguments are set. +1003 +1004 Args: +1005 expression: The expression to validate. +1006 args: An optional list of items that was used to instantiate the expression, if it's a Func. +1007 +1008 Returns: +1009 The validated expression. +1010 """ +1011 if self.error_level != ErrorLevel.IGNORE: +1012 for error_message in expression.error_messages(args): +1013 self.raise_error(error_message) +1014 +1015 return expression +1016 +1017 def _find_sql(self, start: Token, end: Token) -> str: +1018 return self.sql[start.start : end.end + 1] 1019 -1020 def _retreat(self, index: int) -> None: -1021 if index != self._index: -1022 self._advance(index - self._index) -1023 -1024 def _parse_command(self) -> exp.Command: -1025 return self.expression(exp.Command, this=self._prev.text, expression=self._parse_string()) -1026 -1027 def _parse_comment(self, allow_exists: bool = True) -> exp.Expression: -1028 start = self._prev -1029 exists = self._parse_exists() if allow_exists else None -1030 -1031 self._match(TokenType.ON) -1032 -1033 kind = self._match_set(self.CREATABLES) and self._prev -1034 -1035 if not kind: -1036 return self._parse_as_command(start) -1037 -1038 if kind.token_type in (TokenType.FUNCTION, TokenType.PROCEDURE): -1039 this = self._parse_user_defined_function(kind=kind.token_type) -1040 elif kind.token_type == TokenType.TABLE: -1041 this = self._parse_table(alias_tokens=self.COMMENT_TABLE_ALIAS_TOKENS) -1042 elif kind.token_type == TokenType.COLUMN: -1043 this = self._parse_column() -1044 else: -1045 this = self._parse_id_var() -1046 -1047 self._match(TokenType.IS) +1020 def _advance(self, times: int = 1) -> None: +1021 self._index += times +1022 self._curr = seq_get(self._tokens, self._index) +1023 self._next = seq_get(self._tokens, self._index + 1) +1024 +1025 if self._index > 0: +1026 self._prev = self._tokens[self._index - 1] +1027 self._prev_comments = self._prev.comments +1028 else: +1029 self._prev = None +1030 self._prev_comments = None +1031 +1032 def _retreat(self, index: int) -> None: +1033 if index != self._index: +1034 self._advance(index - self._index) +1035 +1036 def _parse_command(self) -> exp.Command: +1037 return self.expression(exp.Command, this=self._prev.text, expression=self._parse_string()) +1038 +1039 def _parse_comment(self, allow_exists: bool = True) -> exp.Expression: +1040 start = self._prev +1041 exists = self._parse_exists() if allow_exists else None +1042 +1043 self._match(TokenType.ON) +1044 +1045 kind = self._match_set(self.CREATABLES) and self._prev +1046 if not kind: +1047 return self._parse_as_command(start) 1048 -1049 return self.expression( -1050 exp.Comment, this=this, kind=kind.text, expression=self._parse_string(), exists=exists -1051 ) -1052 -1053 # https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree#mergetree-table-ttl -1054 def _parse_ttl(self) -> exp.Expression: -1055 def _parse_ttl_action() -> t.Optional[exp.Expression]: -1056 this = self._parse_bitwise() +1049 if kind.token_type in (TokenType.FUNCTION, TokenType.PROCEDURE): +1050 this = self._parse_user_defined_function(kind=kind.token_type) +1051 elif kind.token_type == TokenType.TABLE: +1052 this = self._parse_table(alias_tokens=self.COMMENT_TABLE_ALIAS_TOKENS) +1053 elif kind.token_type == TokenType.COLUMN: +1054 this = self._parse_column() +1055 else: +1056 this = self._parse_id_var() 1057 -1058 if self._match_text_seq("DELETE"): -1059 return self.expression(exp.MergeTreeTTLAction, this=this, delete=True) -1060 if self._match_text_seq("RECOMPRESS"): -1061 return self.expression( -1062 exp.MergeTreeTTLAction, this=this, recompress=self._parse_bitwise() -1063 ) -1064 if self._match_text_seq("TO", "DISK"): -1065 return self.expression( -1066 exp.MergeTreeTTLAction, this=this, to_disk=self._parse_string() -1067 ) -1068 if self._match_text_seq("TO", "VOLUME"): -1069 return self.expression( -1070 exp.MergeTreeTTLAction, this=this, to_volume=self._parse_string() -1071 ) -1072 -1073 return this +1058 self._match(TokenType.IS) +1059 +1060 return self.expression( +1061 exp.Comment, this=this, kind=kind.text, expression=self._parse_string(), exists=exists +1062 ) +1063 +1064 def _parse_to_table( +1065 self, +1066 ) -> exp.ToTableProperty: +1067 table = self._parse_table_parts(schema=True) +1068 return self.expression(exp.ToTableProperty, this=table) +1069 +1070 # https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree#mergetree-table-ttl +1071 def _parse_ttl(self) -> exp.Expression: +1072 def _parse_ttl_action() -> t.Optional[exp.Expression]: +1073 this = self._parse_bitwise() 1074 -1075 expressions = self._parse_csv(_parse_ttl_action) -1076 where = self._parse_where() -1077 group = self._parse_group() -1078 -1079 aggregates = None -1080 if group and self._match(TokenType.SET): -1081 aggregates = self._parse_csv(self._parse_set_item) -1082 -1083 return self.expression( -1084 exp.MergeTreeTTL, -1085 expressions=expressions, -1086 where=where, -1087 group=group, -1088 aggregates=aggregates, -1089 ) -1090 -1091 def _parse_statement(self) -> t.Optional[exp.Expression]: -1092 if self._curr is None: -1093 return None -1094 -1095 if self._match_set(self.STATEMENT_PARSERS): -1096 return self.STATEMENT_PARSERS[self._prev.token_type](self) -1097 -1098 if self._match_set(Tokenizer.COMMANDS): -1099 return self._parse_command() -1100 -1101 expression = self._parse_expression() -1102 expression = self._parse_set_operations(expression) if expression else self._parse_select() -1103 return self._parse_query_modifiers(expression) -1104 -1105 def _parse_drop(self) -> t.Optional[exp.Drop | exp.Command]: -1106 start = self._prev -1107 temporary = self._match(TokenType.TEMPORARY) -1108 materialized = self._match_text_seq("MATERIALIZED") -1109 kind = self._match_set(self.CREATABLES) and self._prev.text -1110 if not kind: -1111 return self._parse_as_command(start) -1112 -1113 return self.expression( -1114 exp.Drop, -1115 exists=self._parse_exists(), -1116 this=self._parse_table(schema=True), -1117 kind=kind, -1118 temporary=temporary, -1119 materialized=materialized, -1120 cascade=self._match_text_seq("CASCADE"), -1121 constraints=self._match_text_seq("CONSTRAINTS"), -1122 purge=self._match_text_seq("PURGE"), -1123 ) -1124 -1125 def _parse_exists(self, not_: bool = False) -> t.Optional[bool]: -1126 return ( -1127 self._match(TokenType.IF) -1128 and (not not_ or self._match(TokenType.NOT)) -1129 and self._match(TokenType.EXISTS) -1130 ) -1131 -1132 def _parse_create(self) -> t.Optional[exp.Expression]: -1133 start = self._prev -1134 replace = self._prev.text.upper() == "REPLACE" or self._match_pair( -1135 TokenType.OR, TokenType.REPLACE -1136 ) -1137 unique = self._match(TokenType.UNIQUE) -1138 -1139 if self._match_pair(TokenType.TABLE, TokenType.FUNCTION, advance=False): -1140 self._match(TokenType.TABLE) -1141 -1142 properties = None -1143 create_token = self._match_set(self.CREATABLES) and self._prev -1144 -1145 if not create_token: -1146 properties = self._parse_properties() # exp.Properties.Location.POST_CREATE -1147 create_token = self._match_set(self.CREATABLES) and self._prev -1148 -1149 if not properties or not create_token: -1150 return self._parse_as_command(start) -1151 -1152 exists = self._parse_exists(not_=True) -1153 this = None -1154 expression = None -1155 indexes = None -1156 no_schema_binding = None -1157 begin = None -1158 clone = None -1159 -1160 if create_token.token_type in (TokenType.FUNCTION, TokenType.PROCEDURE): -1161 this = self._parse_user_defined_function(kind=create_token.token_type) -1162 temp_properties = self._parse_properties() -1163 if properties and temp_properties: -1164 properties.expressions.extend(temp_properties.expressions) -1165 elif temp_properties: -1166 properties = temp_properties -1167 -1168 self._match(TokenType.ALIAS) -1169 begin = self._match(TokenType.BEGIN) -1170 return_ = self._match_text_seq("RETURN") -1171 expression = self._parse_statement() -1172 -1173 if return_: -1174 expression = self.expression(exp.Return, this=expression) -1175 elif create_token.token_type == TokenType.INDEX: -1176 this = self._parse_index(index=self._parse_id_var()) -1177 elif create_token.token_type in self.DB_CREATABLES: -1178 table_parts = self._parse_table_parts(schema=True) +1075 if self._match_text_seq("DELETE"): +1076 return self.expression(exp.MergeTreeTTLAction, this=this, delete=True) +1077 if self._match_text_seq("RECOMPRESS"): +1078 return self.expression( +1079 exp.MergeTreeTTLAction, this=this, recompress=self._parse_bitwise() +1080 ) +1081 if self._match_text_seq("TO", "DISK"): +1082 return self.expression( +1083 exp.MergeTreeTTLAction, this=this, to_disk=self._parse_string() +1084 ) +1085 if self._match_text_seq("TO", "VOLUME"): +1086 return self.expression( +1087 exp.MergeTreeTTLAction, this=this, to_volume=self._parse_string() +1088 ) +1089 +1090 return this +1091 +1092 expressions = self._parse_csv(_parse_ttl_action) +1093 where = self._parse_where() +1094 group = self._parse_group() +1095 +1096 aggregates = None +1097 if group and self._match(TokenType.SET): +1098 aggregates = self._parse_csv(self._parse_set_item) +1099 +1100 return self.expression( +1101 exp.MergeTreeTTL, +1102 expressions=expressions, +1103 where=where, +1104 group=group, +1105 aggregates=aggregates, +1106 ) +1107 +1108 def _parse_statement(self) -> t.Optional[exp.Expression]: +1109 if self._curr is None: +1110 return None +1111 +1112 if self._match_set(self.STATEMENT_PARSERS): +1113 return self.STATEMENT_PARSERS[self._prev.token_type](self) +1114 +1115 if self._match_set(Tokenizer.COMMANDS): +1116 return self._parse_command() +1117 +1118 expression = self._parse_expression() +1119 expression = self._parse_set_operations(expression) if expression else self._parse_select() +1120 return self._parse_query_modifiers(expression) +1121 +1122 def _parse_drop(self) -> exp.Drop | exp.Command: +1123 start = self._prev +1124 temporary = self._match(TokenType.TEMPORARY) +1125 materialized = self._match_text_seq("MATERIALIZED") +1126 +1127 kind = self._match_set(self.CREATABLES) and self._prev.text +1128 if not kind: +1129 return self._parse_as_command(start) +1130 +1131 return self.expression( +1132 exp.Drop, +1133 exists=self._parse_exists(), +1134 this=self._parse_table(schema=True), +1135 kind=kind, +1136 temporary=temporary, +1137 materialized=materialized, +1138 cascade=self._match_text_seq("CASCADE"), +1139 constraints=self._match_text_seq("CONSTRAINTS"), +1140 purge=self._match_text_seq("PURGE"), +1141 ) +1142 +1143 def _parse_exists(self, not_: bool = False) -> t.Optional[bool]: +1144 return ( +1145 self._match(TokenType.IF) +1146 and (not not_ or self._match(TokenType.NOT)) +1147 and self._match(TokenType.EXISTS) +1148 ) +1149 +1150 def _parse_create(self) -> exp.Create | exp.Command: +1151 # Note: this can't be None because we've matched a statement parser +1152 start = self._prev +1153 replace = start.text.upper() == "REPLACE" or self._match_pair( +1154 TokenType.OR, TokenType.REPLACE +1155 ) +1156 unique = self._match(TokenType.UNIQUE) +1157 +1158 if self._match_pair(TokenType.TABLE, TokenType.FUNCTION, advance=False): +1159 self._advance() +1160 +1161 properties = None +1162 create_token = self._match_set(self.CREATABLES) and self._prev +1163 +1164 if not create_token: +1165 # exp.Properties.Location.POST_CREATE +1166 properties = self._parse_properties() +1167 create_token = self._match_set(self.CREATABLES) and self._prev +1168 +1169 if not properties or not create_token: +1170 return self._parse_as_command(start) +1171 +1172 exists = self._parse_exists(not_=True) +1173 this = None +1174 expression = None +1175 indexes = None +1176 no_schema_binding = None +1177 begin = None +1178 clone = None 1179 -1180 # exp.Properties.Location.POST_NAME -1181 if self._match(TokenType.COMMA): -1182 temp_properties = self._parse_properties(before=True) -1183 if properties and temp_properties: -1184 properties.expressions.extend(temp_properties.expressions) -1185 elif temp_properties: -1186 properties = temp_properties -1187 -1188 this = self._parse_schema(this=table_parts) +1180 def extend_props(temp_props: t.Optional[exp.Properties]) -> None: +1181 nonlocal properties +1182 if properties and temp_props: +1183 properties.expressions.extend(temp_props.expressions) +1184 elif temp_props: +1185 properties = temp_props +1186 +1187 if create_token.token_type in (TokenType.FUNCTION, TokenType.PROCEDURE): +1188 this = self._parse_user_defined_function(kind=create_token.token_type) 1189 -1190 # exp.Properties.Location.POST_SCHEMA and POST_WITH -1191 temp_properties = self._parse_properties() -1192 if properties and temp_properties: -1193 properties.expressions.extend(temp_properties.expressions) -1194 elif temp_properties: -1195 properties = temp_properties -1196 -1197 self._match(TokenType.ALIAS) -1198 -1199 # exp.Properties.Location.POST_ALIAS -1200 if not ( -1201 self._match(TokenType.SELECT, advance=False) -1202 or self._match(TokenType.WITH, advance=False) -1203 or self._match(TokenType.L_PAREN, advance=False) -1204 ): -1205 temp_properties = self._parse_properties() -1206 if properties and temp_properties: -1207 properties.expressions.extend(temp_properties.expressions) -1208 elif temp_properties: -1209 properties = temp_properties +1190 # exp.Properties.Location.POST_SCHEMA ("schema" here is the UDF's type signature) +1191 extend_props(self._parse_properties()) +1192 +1193 self._match(TokenType.ALIAS) +1194 begin = self._match(TokenType.BEGIN) +1195 return_ = self._match_text_seq("RETURN") +1196 expression = self._parse_statement() +1197 +1198 if return_: +1199 expression = self.expression(exp.Return, this=expression) +1200 elif create_token.token_type == TokenType.INDEX: +1201 this = self._parse_index(index=self._parse_id_var()) +1202 elif create_token.token_type in self.DB_CREATABLES: +1203 table_parts = self._parse_table_parts(schema=True) +1204 +1205 # exp.Properties.Location.POST_NAME +1206 self._match(TokenType.COMMA) +1207 extend_props(self._parse_properties(before=True)) +1208 +1209 this = self._parse_schema(this=table_parts) 1210 -1211 expression = self._parse_ddl_select() -1212 -1213 if create_token.token_type == TokenType.TABLE: -1214 indexes = [] -1215 while True: -1216 index = self._parse_index() -1217 -1218 # exp.Properties.Location.POST_EXPRESSION or exp.Properties.Location.POST_INDEX -1219 temp_properties = self._parse_properties() -1220 if properties and temp_properties: -1221 properties.expressions.extend(temp_properties.expressions) -1222 elif temp_properties: -1223 properties = temp_properties -1224 -1225 if not index: -1226 break -1227 else: -1228 self._match(TokenType.COMMA) -1229 indexes.append(index) -1230 elif create_token.token_type == TokenType.VIEW: -1231 if self._match_text_seq("WITH", "NO", "SCHEMA", "BINDING"): -1232 no_schema_binding = True -1233 -1234 if self._match_text_seq("CLONE"): -1235 clone = self._parse_table(schema=True) -1236 when = self._match_texts({"AT", "BEFORE"}) and self._prev.text.upper() -1237 clone_kind = ( -1238 self._match(TokenType.L_PAREN) -1239 and self._match_texts(self.CLONE_KINDS) -1240 and self._prev.text.upper() -1241 ) -1242 clone_expression = self._match(TokenType.FARROW) and self._parse_bitwise() -1243 self._match(TokenType.R_PAREN) -1244 clone = self.expression( -1245 exp.Clone, this=clone, when=when, kind=clone_kind, expression=clone_expression -1246 ) -1247 -1248 return self.expression( -1249 exp.Create, -1250 this=this, -1251 kind=create_token.text, -1252 replace=replace, -1253 unique=unique, -1254 expression=expression, -1255 exists=exists, -1256 properties=properties, -1257 indexes=indexes, -1258 no_schema_binding=no_schema_binding, -1259 begin=begin, -1260 clone=clone, -1261 ) -1262 -1263 def _parse_property_before(self) -> t.Optional[exp.Expression]: -1264 # only used for teradata currently -1265 self._match(TokenType.COMMA) +1211 # exp.Properties.Location.POST_SCHEMA and POST_WITH +1212 extend_props(self._parse_properties()) +1213 +1214 self._match(TokenType.ALIAS) +1215 if not self._match_set(self.DDL_SELECT_TOKENS, advance=False): +1216 # exp.Properties.Location.POST_ALIAS +1217 extend_props(self._parse_properties()) +1218 +1219 expression = self._parse_ddl_select() +1220 +1221 if create_token.token_type == TokenType.TABLE: +1222 indexes = [] +1223 while True: +1224 index = self._parse_index() +1225 +1226 # exp.Properties.Location.POST_EXPRESSION and POST_INDEX +1227 extend_props(self._parse_properties()) +1228 +1229 if not index: +1230 break +1231 else: +1232 self._match(TokenType.COMMA) +1233 indexes.append(index) +1234 elif create_token.token_type == TokenType.VIEW: +1235 if self._match_text_seq("WITH", "NO", "SCHEMA", "BINDING"): +1236 no_schema_binding = True +1237 +1238 if self._match_text_seq("CLONE"): +1239 clone = self._parse_table(schema=True) +1240 when = self._match_texts({"AT", "BEFORE"}) and self._prev.text.upper() +1241 clone_kind = ( +1242 self._match(TokenType.L_PAREN) +1243 and self._match_texts(self.CLONE_KINDS) +1244 and self._prev.text.upper() +1245 ) +1246 clone_expression = self._match(TokenType.FARROW) and self._parse_bitwise() +1247 self._match(TokenType.R_PAREN) +1248 clone = self.expression( +1249 exp.Clone, this=clone, when=when, kind=clone_kind, expression=clone_expression +1250 ) +1251 +1252 return self.expression( +1253 exp.Create, +1254 this=this, +1255 kind=create_token.text, +1256 replace=replace, +1257 unique=unique, +1258 expression=expression, +1259 exists=exists, +1260 properties=properties, +1261 indexes=indexes, +1262 no_schema_binding=no_schema_binding, +1263 begin=begin, +1264 clone=clone, +1265 ) 1266 -1267 kwargs = { -1268 "no": self._match_text_seq("NO"), -1269 "dual": self._match_text_seq("DUAL"), -1270 "before": self._match_text_seq("BEFORE"), -1271 "default": self._match_text_seq("DEFAULT"), -1272 "local": (self._match_text_seq("LOCAL") and "LOCAL") -1273 or (self._match_text_seq("NOT", "LOCAL") and "NOT LOCAL"), -1274 "after": self._match_text_seq("AFTER"), -1275 "minimum": self._match_texts(("MIN", "MINIMUM")), -1276 "maximum": self._match_texts(("MAX", "MAXIMUM")), -1277 } -1278 -1279 if self._match_texts(self.PROPERTY_PARSERS): -1280 parser = self.PROPERTY_PARSERS[self._prev.text.upper()] -1281 try: -1282 return parser(self, **{k: v for k, v in kwargs.items() if v}) -1283 except TypeError: -1284 self.raise_error(f"Cannot parse property '{self._prev.text}'") -1285 -1286 return None -1287 -1288 def _parse_property(self) -> t.Optional[exp.Expression]: -1289 if self._match_texts(self.PROPERTY_PARSERS): -1290 return self.PROPERTY_PARSERS[self._prev.text.upper()](self) +1267 def _parse_property_before(self) -> t.Optional[exp.Expression]: +1268 # only used for teradata currently +1269 self._match(TokenType.COMMA) +1270 +1271 kwargs = { +1272 "no": self._match_text_seq("NO"), +1273 "dual": self._match_text_seq("DUAL"), +1274 "before": self._match_text_seq("BEFORE"), +1275 "default": self._match_text_seq("DEFAULT"), +1276 "local": (self._match_text_seq("LOCAL") and "LOCAL") +1277 or (self._match_text_seq("NOT", "LOCAL") and "NOT LOCAL"), +1278 "after": self._match_text_seq("AFTER"), +1279 "minimum": self._match_texts(("MIN", "MINIMUM")), +1280 "maximum": self._match_texts(("MAX", "MAXIMUM")), +1281 } +1282 +1283 if self._match_texts(self.PROPERTY_PARSERS): +1284 parser = self.PROPERTY_PARSERS[self._prev.text.upper()] +1285 try: +1286 return parser(self, **{k: v for k, v in kwargs.items() if v}) +1287 except TypeError: +1288 self.raise_error(f"Cannot parse property '{self._prev.text}'") +1289 +1290 return None 1291 -1292 if self._match_pair(TokenType.DEFAULT, TokenType.CHARACTER_SET): -1293 return self._parse_character_set(default=True) -1294 -1295 if self._match_text_seq("COMPOUND", "SORTKEY"): -1296 return self._parse_sortkey(compound=True) -1297 -1298 if self._match_text_seq("SQL", "SECURITY"): -1299 return self.expression(exp.SqlSecurityProperty, definer=self._match_text_seq("DEFINER")) -1300 -1301 assignment = self._match_pair( -1302 TokenType.VAR, TokenType.EQ, advance=False -1303 ) or self._match_pair(TokenType.STRING, TokenType.EQ, advance=False) +1292 def _parse_property(self) -> t.Optional[exp.Expression]: +1293 if self._match_texts(self.PROPERTY_PARSERS): +1294 return self.PROPERTY_PARSERS[self._prev.text.upper()](self) +1295 +1296 if self._match_pair(TokenType.DEFAULT, TokenType.CHARACTER_SET): +1297 return self._parse_character_set(default=True) +1298 +1299 if self._match_text_seq("COMPOUND", "SORTKEY"): +1300 return self._parse_sortkey(compound=True) +1301 +1302 if self._match_text_seq("SQL", "SECURITY"): +1303 return self.expression(exp.SqlSecurityProperty, definer=self._match_text_seq("DEFINER")) 1304 -1305 if assignment: -1306 key = self._parse_var_or_string() -1307 self._match(TokenType.EQ) -1308 return self.expression(exp.Property, this=key, value=self._parse_column()) -1309 -1310 return None -1311 -1312 def _parse_stored(self) -> exp.Expression: -1313 self._match(TokenType.ALIAS) -1314 -1315 input_format = self._parse_string() if self._match_text_seq("INPUTFORMAT") else None -1316 output_format = self._parse_string() if self._match_text_seq("OUTPUTFORMAT") else None -1317 -1318 return self.expression( -1319 exp.FileFormatProperty, -1320 this=self.expression( -1321 exp.InputOutputFormat, input_format=input_format, output_format=output_format -1322 ) -1323 if input_format or output_format -1324 else self._parse_var_or_string() or self._parse_number() or self._parse_id_var(), -1325 ) -1326 -1327 def _parse_property_assignment(self, exp_class: t.Type[exp.Expression]) -> exp.Expression: -1328 self._match(TokenType.EQ) -1329 self._match(TokenType.ALIAS) -1330 return self.expression(exp_class, this=self._parse_field()) -1331 -1332 def _parse_properties(self, before: t.Optional[bool] = None) -> t.Optional[exp.Expression]: -1333 properties = [] -1334 -1335 while True: -1336 if before: -1337 prop = self._parse_property_before() -1338 else: -1339 prop = self._parse_property() -1340 -1341 if not prop: -1342 break -1343 for p in ensure_list(prop): -1344 properties.append(p) -1345 -1346 if properties: -1347 return self.expression(exp.Properties, expressions=properties) +1305 assignment = self._match_pair( +1306 TokenType.VAR, TokenType.EQ, advance=False +1307 ) or self._match_pair(TokenType.STRING, TokenType.EQ, advance=False) +1308 +1309 if assignment: +1310 key = self._parse_var_or_string() +1311 self._match(TokenType.EQ) +1312 return self.expression(exp.Property, this=key, value=self._parse_column()) +1313 +1314 return None +1315 +1316 def _parse_stored(self) -> exp.FileFormatProperty: +1317 self._match(TokenType.ALIAS) +1318 +1319 input_format = self._parse_string() if self._match_text_seq("INPUTFORMAT") else None +1320 output_format = self._parse_string() if self._match_text_seq("OUTPUTFORMAT") else None +1321 +1322 return self.expression( +1323 exp.FileFormatProperty, +1324 this=self.expression( +1325 exp.InputOutputFormat, input_format=input_format, output_format=output_format +1326 ) +1327 if input_format or output_format +1328 else self._parse_var_or_string() or self._parse_number() or self._parse_id_var(), +1329 ) +1330 +1331 def _parse_property_assignment(self, exp_class: t.Type[E]) -> E: +1332 self._match(TokenType.EQ) +1333 self._match(TokenType.ALIAS) +1334 return self.expression(exp_class, this=self._parse_field()) +1335 +1336 def _parse_properties(self, before: t.Optional[bool] = None) -> t.Optional[exp.Properties]: +1337 properties = [] +1338 while True: +1339 if before: +1340 prop = self._parse_property_before() +1341 else: +1342 prop = self._parse_property() +1343 +1344 if not prop: +1345 break +1346 for p in ensure_list(prop): +1347 properties.append(p) 1348 -1349 return None -1350 -1351 def _parse_fallback(self, no: bool = False) -> exp.Expression: -1352 return self.expression( -1353 exp.FallbackProperty, no=no, protection=self._match_text_seq("PROTECTION") -1354 ) -1355 -1356 def _parse_volatile_property(self) -> exp.Expression: -1357 if self._index >= 2: -1358 pre_volatile_token = self._tokens[self._index - 2] -1359 else: -1360 pre_volatile_token = None -1361 -1362 if pre_volatile_token and pre_volatile_token.token_type in ( -1363 TokenType.CREATE, -1364 TokenType.REPLACE, -1365 TokenType.UNIQUE, -1366 ): -1367 return exp.VolatileProperty() -1368 -1369 return self.expression(exp.StabilityProperty, this=exp.Literal.string("VOLATILE")) -1370 -1371 def _parse_with_property( -1372 self, -1373 ) -> t.Union[t.Optional[exp.Expression], t.List[t.Optional[exp.Expression]]]: -1374 self._match(TokenType.WITH) -1375 if self._match(TokenType.L_PAREN, advance=False): -1376 return self._parse_wrapped_csv(self._parse_property) -1377 -1378 if self._match_text_seq("JOURNAL"): -1379 return self._parse_withjournaltable() -1380 -1381 if self._match_text_seq("DATA"): -1382 return self._parse_withdata(no=False) -1383 elif self._match_text_seq("NO", "DATA"): -1384 return self._parse_withdata(no=True) -1385 -1386 if not self._next: -1387 return None -1388 -1389 return self._parse_withisolatedloading() -1390 -1391 # https://dev.mysql.com/doc/refman/8.0/en/create-view.html -1392 def _parse_definer(self) -> t.Optional[exp.Expression]: -1393 self._match(TokenType.EQ) -1394 -1395 user = self._parse_id_var() -1396 self._match(TokenType.PARAMETER) -1397 host = self._parse_id_var() or (self._match(TokenType.MOD) and self._prev.text) -1398 -1399 if not user or not host: -1400 return None -1401 -1402 return exp.DefinerProperty(this=f"{user}@{host}") -1403 -1404 def _parse_withjournaltable(self) -> exp.Expression: -1405 self._match(TokenType.TABLE) -1406 self._match(TokenType.EQ) -1407 return self.expression(exp.WithJournalTableProperty, this=self._parse_table_parts()) -1408 -1409 def _parse_log(self, no: bool = False) -> exp.Expression: -1410 return self.expression(exp.LogProperty, no=no) -1411 -1412 def _parse_journal(self, **kwargs) -> exp.Expression: -1413 return self.expression(exp.JournalProperty, **kwargs) -1414 -1415 def _parse_checksum(self) -> exp.Expression: -1416 self._match(TokenType.EQ) -1417 -1418 on = None -1419 if self._match(TokenType.ON): -1420 on = True -1421 elif self._match_text_seq("OFF"): -1422 on = False -1423 default = self._match(TokenType.DEFAULT) +1349 if properties: +1350 return self.expression(exp.Properties, expressions=properties) +1351 +1352 return None +1353 +1354 def _parse_fallback(self, no: bool = False) -> exp.FallbackProperty: +1355 return self.expression( +1356 exp.FallbackProperty, no=no, protection=self._match_text_seq("PROTECTION") +1357 ) +1358 +1359 def _parse_volatile_property(self) -> exp.VolatileProperty | exp.StabilityProperty: +1360 if self._index >= 2: +1361 pre_volatile_token = self._tokens[self._index - 2] +1362 else: +1363 pre_volatile_token = None +1364 +1365 if pre_volatile_token and pre_volatile_token.token_type in self.PRE_VOLATILE_TOKENS: +1366 return exp.VolatileProperty() +1367 +1368 return self.expression(exp.StabilityProperty, this=exp.Literal.string("VOLATILE")) +1369 +1370 def _parse_with_property( +1371 self, +1372 ) -> t.Optional[exp.Expression] | t.List[t.Optional[exp.Expression]]: +1373 self._match(TokenType.WITH) +1374 if self._match(TokenType.L_PAREN, advance=False): +1375 return self._parse_wrapped_csv(self._parse_property) +1376 +1377 if self._match_text_seq("JOURNAL"): +1378 return self._parse_withjournaltable() +1379 +1380 if self._match_text_seq("DATA"): +1381 return self._parse_withdata(no=False) +1382 elif self._match_text_seq("NO", "DATA"): +1383 return self._parse_withdata(no=True) +1384 +1385 if not self._next: +1386 return None +1387 +1388 return self._parse_withisolatedloading() +1389 +1390 # https://dev.mysql.com/doc/refman/8.0/en/create-view.html +1391 def _parse_definer(self) -> t.Optional[exp.DefinerProperty]: +1392 self._match(TokenType.EQ) +1393 +1394 user = self._parse_id_var() +1395 self._match(TokenType.PARAMETER) +1396 host = self._parse_id_var() or (self._match(TokenType.MOD) and self._prev.text) +1397 +1398 if not user or not host: +1399 return None +1400 +1401 return exp.DefinerProperty(this=f"{user}@{host}") +1402 +1403 def _parse_withjournaltable(self) -> exp.WithJournalTableProperty: +1404 self._match(TokenType.TABLE) +1405 self._match(TokenType.EQ) +1406 return self.expression(exp.WithJournalTableProperty, this=self._parse_table_parts()) +1407 +1408 def _parse_log(self, no: bool = False) -> exp.LogProperty: +1409 return self.expression(exp.LogProperty, no=no) +1410 +1411 def _parse_journal(self, **kwargs) -> exp.JournalProperty: +1412 return self.expression(exp.JournalProperty, **kwargs) +1413 +1414 def _parse_checksum(self) -> exp.ChecksumProperty: +1415 self._match(TokenType.EQ) +1416 +1417 on = None +1418 if self._match(TokenType.ON): +1419 on = True +1420 elif self._match_text_seq("OFF"): +1421 on = False +1422 +1423 return self.expression(exp.ChecksumProperty, on=on, default=self._match(TokenType.DEFAULT)) 1424 -1425 return self.expression( -1426 exp.ChecksumProperty, -1427 on=on, -1428 default=default, -1429 ) -1430 -1431 def _parse_cluster(self) -> t.Optional[exp.Expression]: -1432 if not self._match_text_seq("BY"): -1433 self._retreat(self._index - 1) -1434 return None -1435 return self.expression( -1436 exp.Cluster, -1437 expressions=self._parse_csv(self._parse_ordered), -1438 ) -1439 -1440 def _parse_freespace(self) -> exp.Expression: -1441 self._match(TokenType.EQ) -1442 return self.expression( -1443 exp.FreespaceProperty, this=self._parse_number(), percent=self._match(TokenType.PERCENT) -1444 ) -1445 -1446 def _parse_mergeblockratio(self, no: bool = False, default: bool = False) -> exp.Expression: -1447 if self._match(TokenType.EQ): -1448 return self.expression( -1449 exp.MergeBlockRatioProperty, -1450 this=self._parse_number(), -1451 percent=self._match(TokenType.PERCENT), -1452 ) -1453 return self.expression( -1454 exp.MergeBlockRatioProperty, -1455 no=no, -1456 default=default, -1457 ) +1425 def _parse_cluster(self) -> t.Optional[exp.Cluster]: +1426 if not self._match_text_seq("BY"): +1427 self._retreat(self._index - 1) +1428 return None +1429 +1430 return self.expression(exp.Cluster, expressions=self._parse_csv(self._parse_ordered)) +1431 +1432 def _parse_freespace(self) -> exp.FreespaceProperty: +1433 self._match(TokenType.EQ) +1434 return self.expression( +1435 exp.FreespaceProperty, this=self._parse_number(), percent=self._match(TokenType.PERCENT) +1436 ) +1437 +1438 def _parse_mergeblockratio( +1439 self, no: bool = False, default: bool = False +1440 ) -> exp.MergeBlockRatioProperty: +1441 if self._match(TokenType.EQ): +1442 return self.expression( +1443 exp.MergeBlockRatioProperty, +1444 this=self._parse_number(), +1445 percent=self._match(TokenType.PERCENT), +1446 ) +1447 +1448 return self.expression(exp.MergeBlockRatioProperty, no=no, default=default) +1449 +1450 def _parse_datablocksize( +1451 self, +1452 default: t.Optional[bool] = None, +1453 minimum: t.Optional[bool] = None, +1454 maximum: t.Optional[bool] = None, +1455 ) -> exp.DataBlocksizeProperty: +1456 self._match(TokenType.EQ) +1457 size = self._parse_number() 1458 -1459 def _parse_datablocksize( -1460 self, -1461 default: t.Optional[bool] = None, -1462 minimum: t.Optional[bool] = None, -1463 maximum: t.Optional[bool] = None, -1464 ) -> exp.Expression: -1465 self._match(TokenType.EQ) -1466 size = self._parse_number() -1467 units = None -1468 if self._match_texts(("BYTES", "KBYTES", "KILOBYTES")): -1469 units = self._prev.text -1470 return self.expression( -1471 exp.DataBlocksizeProperty, -1472 size=size, -1473 units=units, -1474 default=default, -1475 minimum=minimum, -1476 maximum=maximum, -1477 ) +1459 units = None +1460 if self._match_texts(("BYTES", "KBYTES", "KILOBYTES")): +1461 units = self._prev.text +1462 +1463 return self.expression( +1464 exp.DataBlocksizeProperty, +1465 size=size, +1466 units=units, +1467 default=default, +1468 minimum=minimum, +1469 maximum=maximum, +1470 ) +1471 +1472 def _parse_blockcompression(self) -> exp.BlockCompressionProperty: +1473 self._match(TokenType.EQ) +1474 always = self._match_text_seq("ALWAYS") +1475 manual = self._match_text_seq("MANUAL") +1476 never = self._match_text_seq("NEVER") +1477 default = self._match_text_seq("DEFAULT") 1478 -1479 def _parse_blockcompression(self) -> exp.Expression: -1480 self._match(TokenType.EQ) -1481 always = self._match_text_seq("ALWAYS") -1482 manual = self._match_text_seq("MANUAL") -1483 never = self._match_text_seq("NEVER") -1484 default = self._match_text_seq("DEFAULT") -1485 autotemp = None -1486 if self._match_text_seq("AUTOTEMP"): -1487 autotemp = self._parse_schema() -1488 -1489 return self.expression( -1490 exp.BlockCompressionProperty, -1491 always=always, -1492 manual=manual, -1493 never=never, -1494 default=default, -1495 autotemp=autotemp, -1496 ) -1497 -1498 def _parse_withisolatedloading(self) -> exp.Expression: -1499 no = self._match_text_seq("NO") -1500 concurrent = self._match_text_seq("CONCURRENT") -1501 self._match_text_seq("ISOLATED", "LOADING") -1502 for_all = self._match_text_seq("FOR", "ALL") -1503 for_insert = self._match_text_seq("FOR", "INSERT") -1504 for_none = self._match_text_seq("FOR", "NONE") -1505 return self.expression( -1506 exp.IsolatedLoadingProperty, -1507 no=no, -1508 concurrent=concurrent, -1509 for_all=for_all, -1510 for_insert=for_insert, -1511 for_none=for_none, -1512 ) -1513 -1514 def _parse_locking(self) -> exp.Expression: -1515 if self._match(TokenType.TABLE): -1516 kind = "TABLE" -1517 elif self._match(TokenType.VIEW): -1518 kind = "VIEW" -1519 elif self._match(TokenType.ROW): -1520 kind = "ROW" -1521 elif self._match_text_seq("DATABASE"): -1522 kind = "DATABASE" -1523 else: -1524 kind = None -1525 -1526 if kind in ("DATABASE", "TABLE", "VIEW"): -1527 this = self._parse_table_parts() -1528 else: -1529 this = None -1530 -1531 if self._match(TokenType.FOR): -1532 for_or_in = "FOR" -1533 elif self._match(TokenType.IN): -1534 for_or_in = "IN" -1535 else: -1536 for_or_in = None -1537 -1538 if self._match_text_seq("ACCESS"): -1539 lock_type = "ACCESS" -1540 elif self._match_texts(("EXCL", "EXCLUSIVE")): -1541 lock_type = "EXCLUSIVE" -1542 elif self._match_text_seq("SHARE"): -1543 lock_type = "SHARE" -1544 elif self._match_text_seq("READ"): -1545 lock_type = "READ" -1546 elif self._match_text_seq("WRITE"): -1547 lock_type = "WRITE" -1548 elif self._match_text_seq("CHECKSUM"): -1549 lock_type = "CHECKSUM" -1550 else: -1551 lock_type = None -1552 -1553 override = self._match_text_seq("OVERRIDE") -1554 -1555 return self.expression( -1556 exp.LockingProperty, -1557 this=this, -1558 kind=kind, -1559 for_or_in=for_or_in, -1560 lock_type=lock_type, -1561 override=override, -1562 ) -1563 -1564 def _parse_partition_by(self) -> t.List[t.Optional[exp.Expression]]: -1565 if self._match(TokenType.PARTITION_BY): -1566 return self._parse_csv(self._parse_conjunction) -1567 return [] -1568 -1569 def _parse_partitioned_by(self) -> exp.Expression: -1570 self._match(TokenType.EQ) -1571 return self.expression( -1572 exp.PartitionedByProperty, -1573 this=self._parse_schema() or self._parse_bracket(self._parse_field()), -1574 ) -1575 -1576 def _parse_withdata(self, no: bool = False) -> exp.Expression: -1577 if self._match_text_seq("AND", "STATISTICS"): -1578 statistics = True -1579 elif self._match_text_seq("AND", "NO", "STATISTICS"): -1580 statistics = False -1581 else: -1582 statistics = None -1583 -1584 return self.expression(exp.WithDataProperty, no=no, statistics=statistics) -1585 -1586 def _parse_no_property(self) -> t.Optional[exp.Property]: -1587 if self._match_text_seq("PRIMARY", "INDEX"): -1588 return exp.NoPrimaryIndexProperty() -1589 return None -1590 -1591 def _parse_on_property(self) -> t.Optional[exp.Property]: -1592 if self._match_text_seq("COMMIT", "PRESERVE", "ROWS"): -1593 return exp.OnCommitProperty() -1594 elif self._match_text_seq("COMMIT", "DELETE", "ROWS"): -1595 return exp.OnCommitProperty(delete=True) -1596 return None +1479 autotemp = None +1480 if self._match_text_seq("AUTOTEMP"): +1481 autotemp = self._parse_schema() +1482 +1483 return self.expression( +1484 exp.BlockCompressionProperty, +1485 always=always, +1486 manual=manual, +1487 never=never, +1488 default=default, +1489 autotemp=autotemp, +1490 ) +1491 +1492 def _parse_withisolatedloading(self) -> exp.IsolatedLoadingProperty: +1493 no = self._match_text_seq("NO") +1494 concurrent = self._match_text_seq("CONCURRENT") +1495 self._match_text_seq("ISOLATED", "LOADING") +1496 for_all = self._match_text_seq("FOR", "ALL") +1497 for_insert = self._match_text_seq("FOR", "INSERT") +1498 for_none = self._match_text_seq("FOR", "NONE") +1499 return self.expression( +1500 exp.IsolatedLoadingProperty, +1501 no=no, +1502 concurrent=concurrent, +1503 for_all=for_all, +1504 for_insert=for_insert, +1505 for_none=for_none, +1506 ) +1507 +1508 def _parse_locking(self) -> exp.LockingProperty: +1509 if self._match(TokenType.TABLE): +1510 kind = "TABLE" +1511 elif self._match(TokenType.VIEW): +1512 kind = "VIEW" +1513 elif self._match(TokenType.ROW): +1514 kind = "ROW" +1515 elif self._match_text_seq("DATABASE"): +1516 kind = "DATABASE" +1517 else: +1518 kind = None +1519 +1520 if kind in ("DATABASE", "TABLE", "VIEW"): +1521 this = self._parse_table_parts() +1522 else: +1523 this = None +1524 +1525 if self._match(TokenType.FOR): +1526 for_or_in = "FOR" +1527 elif self._match(TokenType.IN): +1528 for_or_in = "IN" +1529 else: +1530 for_or_in = None +1531 +1532 if self._match_text_seq("ACCESS"): +1533 lock_type = "ACCESS" +1534 elif self._match_texts(("EXCL", "EXCLUSIVE")): +1535 lock_type = "EXCLUSIVE" +1536 elif self._match_text_seq("SHARE"): +1537 lock_type = "SHARE" +1538 elif self._match_text_seq("READ"): +1539 lock_type = "READ" +1540 elif self._match_text_seq("WRITE"): +1541 lock_type = "WRITE" +1542 elif self._match_text_seq("CHECKSUM"): +1543 lock_type = "CHECKSUM" +1544 else: +1545 lock_type = None +1546 +1547 override = self._match_text_seq("OVERRIDE") +1548 +1549 return self.expression( +1550 exp.LockingProperty, +1551 this=this, +1552 kind=kind, +1553 for_or_in=for_or_in, +1554 lock_type=lock_type, +1555 override=override, +1556 ) +1557 +1558 def _parse_partition_by(self) -> t.List[t.Optional[exp.Expression]]: +1559 if self._match(TokenType.PARTITION_BY): +1560 return self._parse_csv(self._parse_conjunction) +1561 return [] +1562 +1563 def _parse_partitioned_by(self) -> exp.PartitionedByProperty: +1564 self._match(TokenType.EQ) +1565 return self.expression( +1566 exp.PartitionedByProperty, +1567 this=self._parse_schema() or self._parse_bracket(self._parse_field()), +1568 ) +1569 +1570 def _parse_withdata(self, no: bool = False) -> exp.WithDataProperty: +1571 if self._match_text_seq("AND", "STATISTICS"): +1572 statistics = True +1573 elif self._match_text_seq("AND", "NO", "STATISTICS"): +1574 statistics = False +1575 else: +1576 statistics = None +1577 +1578 return self.expression(exp.WithDataProperty, no=no, statistics=statistics) +1579 +1580 def _parse_no_property(self) -> t.Optional[exp.NoPrimaryIndexProperty]: +1581 if self._match_text_seq("PRIMARY", "INDEX"): +1582 return exp.NoPrimaryIndexProperty() +1583 return None +1584 +1585 def _parse_on_property(self) -> t.Optional[exp.Expression]: +1586 if self._match_text_seq("COMMIT", "PRESERVE", "ROWS"): +1587 return exp.OnCommitProperty() +1588 elif self._match_text_seq("COMMIT", "DELETE", "ROWS"): +1589 return exp.OnCommitProperty(delete=True) +1590 return None +1591 +1592 def _parse_distkey(self) -> exp.DistKeyProperty: +1593 return self.expression(exp.DistKeyProperty, this=self._parse_wrapped(self._parse_id_var)) +1594 +1595 def _parse_create_like(self) -> t.Optional[exp.LikeProperty]: +1596 table = self._parse_table(schema=True) 1597 -1598 def _parse_distkey(self) -> exp.Expression: -1599 return self.expression(exp.DistKeyProperty, this=self._parse_wrapped(self._parse_id_var)) -1600 -1601 def _parse_create_like(self) -> t.Optional[exp.Expression]: -1602 table = self._parse_table(schema=True) -1603 options = [] -1604 while self._match_texts(("INCLUDING", "EXCLUDING")): -1605 this = self._prev.text.upper() -1606 id_var = self._parse_id_var() -1607 -1608 if not id_var: -1609 return None -1610 -1611 options.append( -1612 self.expression( -1613 exp.Property, -1614 this=this, -1615 value=exp.Var(this=id_var.this.upper()), -1616 ) -1617 ) -1618 return self.expression(exp.LikeProperty, this=table, expressions=options) -1619 -1620 def _parse_sortkey(self, compound: bool = False) -> exp.Expression: -1621 return self.expression( -1622 exp.SortKeyProperty, this=self._parse_wrapped_csv(self._parse_id_var), compound=compound -1623 ) -1624 -1625 def _parse_character_set(self, default: bool = False) -> exp.Expression: -1626 self._match(TokenType.EQ) -1627 return self.expression( -1628 exp.CharacterSetProperty, this=self._parse_var_or_string(), default=default -1629 ) -1630 -1631 def _parse_returns(self) -> exp.Expression: -1632 value: t.Optional[exp.Expression] -1633 is_table = self._match(TokenType.TABLE) -1634 -1635 if is_table: -1636 if self._match(TokenType.LT): -1637 value = self.expression( -1638 exp.Schema, -1639 this="TABLE", -1640 expressions=self._parse_csv(self._parse_struct_types), -1641 ) -1642 if not self._match(TokenType.GT): -1643 self.raise_error("Expecting >") -1644 else: -1645 value = self._parse_schema(exp.Var(this="TABLE")) -1646 else: -1647 value = self._parse_types() -1648 -1649 return self.expression(exp.ReturnsProperty, this=value, is_table=is_table) -1650 -1651 def _parse_describe(self) -> exp.Expression: -1652 kind = self._match_set(self.CREATABLES) and self._prev.text -1653 this = self._parse_table() -1654 -1655 return self.expression(exp.Describe, this=this, kind=kind) -1656 -1657 def _parse_insert(self) -> exp.Expression: -1658 overwrite = self._match(TokenType.OVERWRITE) -1659 local = self._match_text_seq("LOCAL") -1660 alternative = None -1661 -1662 if self._match_text_seq("DIRECTORY"): -1663 this: t.Optional[exp.Expression] = self.expression( -1664 exp.Directory, -1665 this=self._parse_var_or_string(), -1666 local=local, -1667 row_format=self._parse_row_format(match_row=True), -1668 ) -1669 else: -1670 if self._match(TokenType.OR): -1671 alternative = self._match_texts(self.INSERT_ALTERNATIVES) and self._prev.text -1672 -1673 self._match(TokenType.INTO) -1674 self._match(TokenType.TABLE) -1675 this = self._parse_table(schema=True) -1676 -1677 return self.expression( -1678 exp.Insert, -1679 this=this, -1680 exists=self._parse_exists(), -1681 partition=self._parse_partition(), -1682 expression=self._parse_ddl_select(), -1683 conflict=self._parse_on_conflict(), -1684 returning=self._parse_returning(), -1685 overwrite=overwrite, -1686 alternative=alternative, -1687 ) -1688 -1689 def _parse_on_conflict(self) -> t.Optional[exp.Expression]: -1690 conflict = self._match_text_seq("ON", "CONFLICT") -1691 duplicate = self._match_text_seq("ON", "DUPLICATE", "KEY") -1692 -1693 if not (conflict or duplicate): -1694 return None -1695 -1696 nothing = None -1697 expressions = None -1698 key = None -1699 constraint = None -1700 -1701 if conflict: -1702 if self._match_text_seq("ON", "CONSTRAINT"): -1703 constraint = self._parse_id_var() -1704 else: -1705 key = self._parse_csv(self._parse_value) -1706 -1707 self._match_text_seq("DO") -1708 if self._match_text_seq("NOTHING"): -1709 nothing = True -1710 else: -1711 self._match(TokenType.UPDATE) -1712 expressions = self._match(TokenType.SET) and self._parse_csv(self._parse_equality) +1598 options = [] +1599 while self._match_texts(("INCLUDING", "EXCLUDING")): +1600 this = self._prev.text.upper() +1601 +1602 id_var = self._parse_id_var() +1603 if not id_var: +1604 return None +1605 +1606 options.append( +1607 self.expression(exp.Property, this=this, value=exp.var(id_var.this.upper())) +1608 ) +1609 +1610 return self.expression(exp.LikeProperty, this=table, expressions=options) +1611 +1612 def _parse_sortkey(self, compound: bool = False) -> exp.SortKeyProperty: +1613 return self.expression( +1614 exp.SortKeyProperty, this=self._parse_wrapped_id_vars(), compound=compound +1615 ) +1616 +1617 def _parse_character_set(self, default: bool = False) -> exp.CharacterSetProperty: +1618 self._match(TokenType.EQ) +1619 return self.expression( +1620 exp.CharacterSetProperty, this=self._parse_var_or_string(), default=default +1621 ) +1622 +1623 def _parse_returns(self) -> exp.ReturnsProperty: +1624 value: t.Optional[exp.Expression] +1625 is_table = self._match(TokenType.TABLE) +1626 +1627 if is_table: +1628 if self._match(TokenType.LT): +1629 value = self.expression( +1630 exp.Schema, +1631 this="TABLE", +1632 expressions=self._parse_csv(self._parse_struct_types), +1633 ) +1634 if not self._match(TokenType.GT): +1635 self.raise_error("Expecting >") +1636 else: +1637 value = self._parse_schema(exp.var("TABLE")) +1638 else: +1639 value = self._parse_types() +1640 +1641 return self.expression(exp.ReturnsProperty, this=value, is_table=is_table) +1642 +1643 def _parse_describe(self) -> exp.Describe: +1644 kind = self._match_set(self.CREATABLES) and self._prev.text +1645 this = self._parse_table() +1646 return self.expression(exp.Describe, this=this, kind=kind) +1647 +1648 def _parse_insert(self) -> exp.Insert: +1649 overwrite = self._match(TokenType.OVERWRITE) +1650 local = self._match_text_seq("LOCAL") +1651 alternative = None +1652 +1653 if self._match_text_seq("DIRECTORY"): +1654 this: t.Optional[exp.Expression] = self.expression( +1655 exp.Directory, +1656 this=self._parse_var_or_string(), +1657 local=local, +1658 row_format=self._parse_row_format(match_row=True), +1659 ) +1660 else: +1661 if self._match(TokenType.OR): +1662 alternative = self._match_texts(self.INSERT_ALTERNATIVES) and self._prev.text +1663 +1664 self._match(TokenType.INTO) +1665 self._match(TokenType.TABLE) +1666 this = self._parse_table(schema=True) +1667 +1668 return self.expression( +1669 exp.Insert, +1670 this=this, +1671 exists=self._parse_exists(), +1672 partition=self._parse_partition(), +1673 expression=self._parse_ddl_select(), +1674 conflict=self._parse_on_conflict(), +1675 returning=self._parse_returning(), +1676 overwrite=overwrite, +1677 alternative=alternative, +1678 ) +1679 +1680 def _parse_on_conflict(self) -> t.Optional[exp.OnConflict]: +1681 conflict = self._match_text_seq("ON", "CONFLICT") +1682 duplicate = self._match_text_seq("ON", "DUPLICATE", "KEY") +1683 +1684 if not conflict and not duplicate: +1685 return None +1686 +1687 nothing = None +1688 expressions = None +1689 key = None +1690 constraint = None +1691 +1692 if conflict: +1693 if self._match_text_seq("ON", "CONSTRAINT"): +1694 constraint = self._parse_id_var() +1695 else: +1696 key = self._parse_csv(self._parse_value) +1697 +1698 self._match_text_seq("DO") +1699 if self._match_text_seq("NOTHING"): +1700 nothing = True +1701 else: +1702 self._match(TokenType.UPDATE) +1703 expressions = self._match(TokenType.SET) and self._parse_csv(self._parse_equality) +1704 +1705 return self.expression( +1706 exp.OnConflict, +1707 duplicate=duplicate, +1708 expressions=expressions, +1709 nothing=nothing, +1710 key=key, +1711 constraint=constraint, +1712 ) 1713 -1714 return self.expression( -1715 exp.OnConflict, -1716 duplicate=duplicate, -1717 expressions=expressions, -1718 nothing=nothing, -1719 key=key, -1720 constraint=constraint, -1721 ) -1722 -1723 def _parse_returning(self) -> t.Optional[exp.Expression]: -1724 if not self._match(TokenType.RETURNING): -1725 return None -1726 -1727 return self.expression(exp.Returning, expressions=self._parse_csv(self._parse_column)) -1728 -1729 def _parse_row(self) -> t.Optional[exp.Expression]: -1730 if not self._match(TokenType.FORMAT): -1731 return None -1732 return self._parse_row_format() +1714 def _parse_returning(self) -> t.Optional[exp.Returning]: +1715 if not self._match(TokenType.RETURNING): +1716 return None +1717 +1718 return self.expression(exp.Returning, expressions=self._parse_csv(self._parse_column)) +1719 +1720 def _parse_row(self) -> t.Optional[exp.RowFormatSerdeProperty | exp.RowFormatDelimitedProperty]: +1721 if not self._match(TokenType.FORMAT): +1722 return None +1723 return self._parse_row_format() +1724 +1725 def _parse_row_format( +1726 self, match_row: bool = False +1727 ) -> t.Optional[exp.RowFormatSerdeProperty | exp.RowFormatDelimitedProperty]: +1728 if match_row and not self._match_pair(TokenType.ROW, TokenType.FORMAT): +1729 return None +1730 +1731 if self._match_text_seq("SERDE"): +1732 return self.expression(exp.RowFormatSerdeProperty, this=self._parse_string()) 1733 -1734 def _parse_row_format(self, match_row: bool = False) -> t.Optional[exp.Expression]: -1735 if match_row and not self._match_pair(TokenType.ROW, TokenType.FORMAT): -1736 return None +1734 self._match_text_seq("DELIMITED") +1735 +1736 kwargs = {} 1737 -1738 if self._match_text_seq("SERDE"): -1739 return self.expression(exp.RowFormatSerdeProperty, this=self._parse_string()) -1740 -1741 self._match_text_seq("DELIMITED") -1742 -1743 kwargs = {} -1744 -1745 if self._match_text_seq("FIELDS", "TERMINATED", "BY"): -1746 kwargs["fields"] = self._parse_string() -1747 if self._match_text_seq("ESCAPED", "BY"): -1748 kwargs["escaped"] = self._parse_string() -1749 if self._match_text_seq("COLLECTION", "ITEMS", "TERMINATED", "BY"): -1750 kwargs["collection_items"] = self._parse_string() -1751 if self._match_text_seq("MAP", "KEYS", "TERMINATED", "BY"): -1752 kwargs["map_keys"] = self._parse_string() -1753 if self._match_text_seq("LINES", "TERMINATED", "BY"): -1754 kwargs["lines"] = self._parse_string() -1755 if self._match_text_seq("NULL", "DEFINED", "AS"): -1756 kwargs["null"] = self._parse_string() -1757 -1758 return self.expression(exp.RowFormatDelimitedProperty, **kwargs) # type: ignore -1759 -1760 def _parse_load(self) -> exp.Expression: -1761 if self._match_text_seq("DATA"): -1762 local = self._match_text_seq("LOCAL") -1763 self._match_text_seq("INPATH") -1764 inpath = self._parse_string() -1765 overwrite = self._match(TokenType.OVERWRITE) -1766 self._match_pair(TokenType.INTO, TokenType.TABLE) -1767 -1768 return self.expression( -1769 exp.LoadData, -1770 this=self._parse_table(schema=True), -1771 local=local, -1772 overwrite=overwrite, -1773 inpath=inpath, -1774 partition=self._parse_partition(), -1775 input_format=self._match_text_seq("INPUTFORMAT") and self._parse_string(), -1776 serde=self._match_text_seq("SERDE") and self._parse_string(), -1777 ) -1778 return self._parse_as_command(self._prev) -1779 -1780 def _parse_delete(self) -> exp.Expression: -1781 self._match(TokenType.FROM) -1782 -1783 return self.expression( -1784 exp.Delete, -1785 this=self._parse_table(), -1786 using=self._parse_csv(lambda: self._match(TokenType.USING) and self._parse_table()), -1787 where=self._parse_where(), -1788 returning=self._parse_returning(), -1789 ) -1790 -1791 def _parse_update(self) -> exp.Expression: -1792 return self.expression( -1793 exp.Update, -1794 **{ # type: ignore -1795 "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS), -1796 "expressions": self._match(TokenType.SET) and self._parse_csv(self._parse_equality), -1797 "from": self._parse_from(modifiers=True), -1798 "where": self._parse_where(), -1799 "returning": self._parse_returning(), -1800 }, -1801 ) -1802 -1803 def _parse_uncache(self) -> exp.Expression: -1804 if not self._match(TokenType.TABLE): -1805 self.raise_error("Expecting TABLE after UNCACHE") -1806 -1807 return self.expression( -1808 exp.Uncache, -1809 exists=self._parse_exists(), -1810 this=self._parse_table(schema=True), -1811 ) -1812 -1813 def _parse_cache(self) -> exp.Expression: -1814 lazy = self._match_text_seq("LAZY") -1815 self._match(TokenType.TABLE) -1816 table = self._parse_table(schema=True) -1817 options = [] -1818 -1819 if self._match_text_seq("OPTIONS"): -1820 self._match_l_paren() -1821 k = self._parse_string() -1822 self._match(TokenType.EQ) -1823 v = self._parse_string() -1824 options = [k, v] -1825 self._match_r_paren() +1738 if self._match_text_seq("FIELDS", "TERMINATED", "BY"): +1739 kwargs["fields"] = self._parse_string() +1740 if self._match_text_seq("ESCAPED", "BY"): +1741 kwargs["escaped"] = self._parse_string() +1742 if self._match_text_seq("COLLECTION", "ITEMS", "TERMINATED", "BY"): +1743 kwargs["collection_items"] = self._parse_string() +1744 if self._match_text_seq("MAP", "KEYS", "TERMINATED", "BY"): +1745 kwargs["map_keys"] = self._parse_string() +1746 if self._match_text_seq("LINES", "TERMINATED", "BY"): +1747 kwargs["lines"] = self._parse_string() +1748 if self._match_text_seq("NULL", "DEFINED", "AS"): +1749 kwargs["null"] = self._parse_string() +1750 +1751 return self.expression(exp.RowFormatDelimitedProperty, **kwargs) # type: ignore +1752 +1753 def _parse_load(self) -> exp.LoadData | exp.Command: +1754 if self._match_text_seq("DATA"): +1755 local = self._match_text_seq("LOCAL") +1756 self._match_text_seq("INPATH") +1757 inpath = self._parse_string() +1758 overwrite = self._match(TokenType.OVERWRITE) +1759 self._match_pair(TokenType.INTO, TokenType.TABLE) +1760 +1761 return self.expression( +1762 exp.LoadData, +1763 this=self._parse_table(schema=True), +1764 local=local, +1765 overwrite=overwrite, +1766 inpath=inpath, +1767 partition=self._parse_partition(), +1768 input_format=self._match_text_seq("INPUTFORMAT") and self._parse_string(), +1769 serde=self._match_text_seq("SERDE") and self._parse_string(), +1770 ) +1771 return self._parse_as_command(self._prev) +1772 +1773 def _parse_delete(self) -> exp.Delete: +1774 self._match(TokenType.FROM) +1775 +1776 return self.expression( +1777 exp.Delete, +1778 this=self._parse_table(), +1779 using=self._parse_csv(lambda: self._match(TokenType.USING) and self._parse_table()), +1780 where=self._parse_where(), +1781 returning=self._parse_returning(), +1782 ) +1783 +1784 def _parse_update(self) -> exp.Update: +1785 return self.expression( +1786 exp.Update, +1787 **{ # type: ignore +1788 "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS), +1789 "expressions": self._match(TokenType.SET) and self._parse_csv(self._parse_equality), +1790 "from": self._parse_from(modifiers=True), +1791 "where": self._parse_where(), +1792 "returning": self._parse_returning(), +1793 }, +1794 ) +1795 +1796 def _parse_uncache(self) -> exp.Uncache: +1797 if not self._match(TokenType.TABLE): +1798 self.raise_error("Expecting TABLE after UNCACHE") +1799 +1800 return self.expression( +1801 exp.Uncache, exists=self._parse_exists(), this=self._parse_table(schema=True) +1802 ) +1803 +1804 def _parse_cache(self) -> exp.Cache: +1805 lazy = self._match_text_seq("LAZY") +1806 self._match(TokenType.TABLE) +1807 table = self._parse_table(schema=True) +1808 +1809 options = [] +1810 if self._match_text_seq("OPTIONS"): +1811 self._match_l_paren() +1812 k = self._parse_string() +1813 self._match(TokenType.EQ) +1814 v = self._parse_string() +1815 options = [k, v] +1816 self._match_r_paren() +1817 +1818 self._match(TokenType.ALIAS) +1819 return self.expression( +1820 exp.Cache, +1821 this=table, +1822 lazy=lazy, +1823 options=options, +1824 expression=self._parse_select(nested=True), +1825 ) 1826 -1827 self._match(TokenType.ALIAS) -1828 return self.expression( -1829 exp.Cache, -1830 this=table, -1831 lazy=lazy, -1832 options=options, -1833 expression=self._parse_select(nested=True), -1834 ) -1835 -1836 def _parse_partition(self) -> t.Optional[exp.Expression]: -1837 if not self._match(TokenType.PARTITION): -1838 return None -1839 -1840 return self.expression( -1841 exp.Partition, expressions=self._parse_wrapped_csv(self._parse_conjunction) -1842 ) -1843 -1844 def _parse_value(self) -> exp.Expression: -1845 if self._match(TokenType.L_PAREN): -1846 expressions = self._parse_csv(self._parse_conjunction) -1847 self._match_r_paren() -1848 return self.expression(exp.Tuple, expressions=expressions) -1849 -1850 # In presto we can have VALUES 1, 2 which results in 1 column & 2 rows. -1851 # Source: https://prestodb.io/docs/current/sql/values.html -1852 return self.expression(exp.Tuple, expressions=[self._parse_conjunction()]) -1853 -1854 def _parse_select( -1855 self, nested: bool = False, table: bool = False, parse_subquery_alias: bool = True -1856 ) -> t.Optional[exp.Expression]: -1857 cte = self._parse_with() -1858 if cte: -1859 this = self._parse_statement() -1860 -1861 if not this: -1862 self.raise_error("Failed to parse any statement following CTE") -1863 return cte -1864 -1865 if "with" in this.arg_types: -1866 this.set("with", cte) -1867 else: -1868 self.raise_error(f"{this.key} does not support CTE") -1869 this = cte -1870 elif self._match(TokenType.SELECT): -1871 comments = self._prev_comments -1872 -1873 hint = self._parse_hint() -1874 all_ = self._match(TokenType.ALL) -1875 distinct = self._match(TokenType.DISTINCT) -1876 -1877 kind = ( -1878 self._match(TokenType.ALIAS) -1879 and self._match_texts(("STRUCT", "VALUE")) -1880 and self._prev.text -1881 ) +1827 def _parse_partition(self) -> t.Optional[exp.Partition]: +1828 if not self._match(TokenType.PARTITION): +1829 return None +1830 +1831 return self.expression( +1832 exp.Partition, expressions=self._parse_wrapped_csv(self._parse_conjunction) +1833 ) +1834 +1835 def _parse_value(self) -> exp.Tuple: +1836 if self._match(TokenType.L_PAREN): +1837 expressions = self._parse_csv(self._parse_conjunction) +1838 self._match_r_paren() +1839 return self.expression(exp.Tuple, expressions=expressions) +1840 +1841 # In presto we can have VALUES 1, 2 which results in 1 column & 2 rows. +1842 # Source: https://prestodb.io/docs/current/sql/values.html +1843 return self.expression(exp.Tuple, expressions=[self._parse_conjunction()]) +1844 +1845 def _parse_select( +1846 self, nested: bool = False, table: bool = False, parse_subquery_alias: bool = True +1847 ) -> t.Optional[exp.Expression]: +1848 cte = self._parse_with() +1849 if cte: +1850 this = self._parse_statement() +1851 +1852 if not this: +1853 self.raise_error("Failed to parse any statement following CTE") +1854 return cte +1855 +1856 if "with" in this.arg_types: +1857 this.set("with", cte) +1858 else: +1859 self.raise_error(f"{this.key} does not support CTE") +1860 this = cte +1861 elif self._match(TokenType.SELECT): +1862 comments = self._prev_comments +1863 +1864 hint = self._parse_hint() +1865 all_ = self._match(TokenType.ALL) +1866 distinct = self._match(TokenType.DISTINCT) +1867 +1868 kind = ( +1869 self._match(TokenType.ALIAS) +1870 and self._match_texts(("STRUCT", "VALUE")) +1871 and self._prev.text +1872 ) +1873 +1874 if distinct: +1875 distinct = self.expression( +1876 exp.Distinct, +1877 on=self._parse_value() if self._match(TokenType.ON) else None, +1878 ) +1879 +1880 if all_ and distinct: +1881 self.raise_error("Cannot specify both ALL and DISTINCT after SELECT") 1882 -1883 if distinct: -1884 distinct = self.expression( -1885 exp.Distinct, -1886 on=self._parse_value() if self._match(TokenType.ON) else None, -1887 ) -1888 -1889 if all_ and distinct: -1890 self.raise_error("Cannot specify both ALL and DISTINCT after SELECT") -1891 -1892 limit = self._parse_limit(top=True) -1893 expressions = self._parse_csv(self._parse_expression) -1894 -1895 this = self.expression( -1896 exp.Select, -1897 kind=kind, -1898 hint=hint, -1899 distinct=distinct, -1900 expressions=expressions, -1901 limit=limit, -1902 ) -1903 this.comments = comments -1904 -1905 into = self._parse_into() -1906 if into: -1907 this.set("into", into) -1908 -1909 from_ = self._parse_from() -1910 if from_: -1911 this.set("from", from_) -1912 -1913 this = self._parse_query_modifiers(this) -1914 elif (table or nested) and self._match(TokenType.L_PAREN): -1915 if self._match(TokenType.PIVOT): -1916 this = self._parse_simplified_pivot() -1917 elif self._match(TokenType.FROM): -1918 this = exp.select("*").from_( -1919 t.cast(exp.From, self._parse_from(skip_from_token=True)) -1920 ) -1921 else: -1922 this = self._parse_table() if table else self._parse_select(nested=True) -1923 this = self._parse_set_operations(self._parse_query_modifiers(this)) -1924 -1925 self._match_r_paren() -1926 -1927 # early return so that subquery unions aren't parsed again -1928 # SELECT * FROM (SELECT 1) UNION ALL SELECT 1 -1929 # Union ALL should be a property of the top select node, not the subquery -1930 return self._parse_subquery(this, parse_alias=parse_subquery_alias) -1931 elif self._match(TokenType.VALUES): -1932 this = self.expression( -1933 exp.Values, -1934 expressions=self._parse_csv(self._parse_value), -1935 alias=self._parse_table_alias(), -1936 ) -1937 else: -1938 this = None +1883 limit = self._parse_limit(top=True) +1884 expressions = self._parse_csv(self._parse_expression) +1885 +1886 this = self.expression( +1887 exp.Select, +1888 kind=kind, +1889 hint=hint, +1890 distinct=distinct, +1891 expressions=expressions, +1892 limit=limit, +1893 ) +1894 this.comments = comments +1895 +1896 into = self._parse_into() +1897 if into: +1898 this.set("into", into) +1899 +1900 from_ = self._parse_from() +1901 if from_: +1902 this.set("from", from_) +1903 +1904 this = self._parse_query_modifiers(this) +1905 elif (table or nested) and self._match(TokenType.L_PAREN): +1906 if self._match(TokenType.PIVOT): +1907 this = self._parse_simplified_pivot() +1908 elif self._match(TokenType.FROM): +1909 this = exp.select("*").from_( +1910 t.cast(exp.From, self._parse_from(skip_from_token=True)) +1911 ) +1912 else: +1913 this = self._parse_table() if table else self._parse_select(nested=True) +1914 this = self._parse_set_operations(self._parse_query_modifiers(this)) +1915 +1916 self._match_r_paren() +1917 +1918 # early return so that subquery unions aren't parsed again +1919 # SELECT * FROM (SELECT 1) UNION ALL SELECT 1 +1920 # Union ALL should be a property of the top select node, not the subquery +1921 return self._parse_subquery(this, parse_alias=parse_subquery_alias) +1922 elif self._match(TokenType.VALUES): +1923 this = self.expression( +1924 exp.Values, +1925 expressions=self._parse_csv(self._parse_value), +1926 alias=self._parse_table_alias(), +1927 ) +1928 else: +1929 this = None +1930 +1931 return self._parse_set_operations(this) +1932 +1933 def _parse_with(self, skip_with_token: bool = False) -> t.Optional[exp.With]: +1934 if not skip_with_token and not self._match(TokenType.WITH): +1935 return None +1936 +1937 comments = self._prev_comments +1938 recursive = self._match(TokenType.RECURSIVE) 1939 -1940 return self._parse_set_operations(this) -1941 -1942 def _parse_with(self, skip_with_token: bool = False) -> t.Optional[exp.Expression]: -1943 if not skip_with_token and not self._match(TokenType.WITH): -1944 return None -1945 -1946 comments = self._prev_comments -1947 recursive = self._match(TokenType.RECURSIVE) +1940 expressions = [] +1941 while True: +1942 expressions.append(self._parse_cte()) +1943 +1944 if not self._match(TokenType.COMMA) and not self._match(TokenType.WITH): +1945 break +1946 else: +1947 self._match(TokenType.WITH) 1948 -1949 expressions = [] -1950 while True: -1951 expressions.append(self._parse_cte()) +1949 return self.expression( +1950 exp.With, comments=comments, expressions=expressions, recursive=recursive +1951 ) 1952 -1953 if not self._match(TokenType.COMMA) and not self._match(TokenType.WITH): -1954 break -1955 else: -1956 self._match(TokenType.WITH) +1953 def _parse_cte(self) -> exp.CTE: +1954 alias = self._parse_table_alias() +1955 if not alias or not alias.this: +1956 self.raise_error("Expected CTE to have alias") 1957 -1958 return self.expression( -1959 exp.With, comments=comments, expressions=expressions, recursive=recursive -1960 ) -1961 -1962 def _parse_cte(self) -> exp.Expression: -1963 alias = self._parse_table_alias() -1964 if not alias or not alias.this: -1965 self.raise_error("Expected CTE to have alias") -1966 -1967 self._match(TokenType.ALIAS) -1968 -1969 return self.expression( -1970 exp.CTE, -1971 this=self._parse_wrapped(self._parse_statement), -1972 alias=alias, -1973 ) -1974 -1975 def _parse_table_alias( -1976 self, alias_tokens: t.Optional[t.Collection[TokenType]] = None -1977 ) -> t.Optional[exp.Expression]: -1978 any_token = self._match(TokenType.ALIAS) -1979 alias = ( -1980 self._parse_id_var(any_token=any_token, tokens=alias_tokens or self.TABLE_ALIAS_TOKENS) -1981 or self._parse_string_as_identifier() -1982 ) +1958 self._match(TokenType.ALIAS) +1959 return self.expression( +1960 exp.CTE, this=self._parse_wrapped(self._parse_statement), alias=alias +1961 ) +1962 +1963 def _parse_table_alias( +1964 self, alias_tokens: t.Optional[t.Collection[TokenType]] = None +1965 ) -> t.Optional[exp.TableAlias]: +1966 any_token = self._match(TokenType.ALIAS) +1967 alias = ( +1968 self._parse_id_var(any_token=any_token, tokens=alias_tokens or self.TABLE_ALIAS_TOKENS) +1969 or self._parse_string_as_identifier() +1970 ) +1971 +1972 index = self._index +1973 if self._match(TokenType.L_PAREN): +1974 columns = self._parse_csv(self._parse_function_parameter) +1975 self._match_r_paren() if columns else self._retreat(index) +1976 else: +1977 columns = None +1978 +1979 if not alias and not columns: +1980 return None +1981 +1982 return self.expression(exp.TableAlias, this=alias, columns=columns) 1983 -1984 index = self._index -1985 if self._match(TokenType.L_PAREN): -1986 columns = self._parse_csv(self._parse_function_parameter) -1987 self._match_r_paren() if columns else self._retreat(index) -1988 else: -1989 columns = None -1990 -1991 if not alias and not columns: -1992 return None -1993 -1994 return self.expression(exp.TableAlias, this=alias, columns=columns) -1995 -1996 def _parse_subquery( -1997 self, this: t.Optional[exp.Expression], parse_alias: bool = True -1998 ) -> t.Optional[exp.Expression]: -1999 if not this: -2000 return None -2001 return self.expression( -2002 exp.Subquery, -2003 this=this, -2004 pivots=self._parse_pivots(), -2005 alias=self._parse_table_alias() if parse_alias else None, -2006 ) -2007 -2008 def _parse_query_modifiers( -2009 self, this: t.Optional[exp.Expression] -2010 ) -> t.Optional[exp.Expression]: -2011 if isinstance(this, self.MODIFIABLES): -2012 for key, parser in self.QUERY_MODIFIER_PARSERS.items(): -2013 expression = parser(self) -2014 -2015 if expression: -2016 this.set(key, expression) -2017 return this +1984 def _parse_subquery( +1985 self, this: t.Optional[exp.Expression], parse_alias: bool = True +1986 ) -> t.Optional[exp.Subquery]: +1987 if not this: +1988 return None +1989 +1990 return self.expression( +1991 exp.Subquery, +1992 this=this, +1993 pivots=self._parse_pivots(), +1994 alias=self._parse_table_alias() if parse_alias else None, +1995 ) +1996 +1997 def _parse_query_modifiers( +1998 self, this: t.Optional[exp.Expression] +1999 ) -> t.Optional[exp.Expression]: +2000 if isinstance(this, self.MODIFIABLES): +2001 for key, parser in self.QUERY_MODIFIER_PARSERS.items(): +2002 expression = parser(self) +2003 +2004 if expression: +2005 if key == "limit": +2006 offset = expression.args.pop("offset", None) +2007 if offset: +2008 this.set("offset", exp.Offset(expression=offset)) +2009 this.set(key, expression) +2010 return this +2011 +2012 def _parse_hint(self) -> t.Optional[exp.Hint]: +2013 if self._match(TokenType.HINT): +2014 hints = self._parse_csv(self._parse_function) +2015 +2016 if not self._match_pair(TokenType.STAR, TokenType.SLASH): +2017 self.raise_error("Expected */ after HINT") 2018 -2019 def _parse_hint(self) -> t.Optional[exp.Expression]: -2020 if self._match(TokenType.HINT): -2021 hints = self._parse_csv(self._parse_function) -2022 if not self._match_pair(TokenType.STAR, TokenType.SLASH): -2023 self.raise_error("Expected */ after HINT") -2024 return self.expression(exp.Hint, expressions=hints) -2025 -2026 return None -2027 -2028 def _parse_into(self) -> t.Optional[exp.Expression]: -2029 if not self._match(TokenType.INTO): -2030 return None -2031 -2032 temp = self._match(TokenType.TEMPORARY) -2033 unlogged = self._match_text_seq("UNLOGGED") -2034 self._match(TokenType.TABLE) -2035 -2036 return self.expression( -2037 exp.Into, this=self._parse_table(schema=True), temporary=temp, unlogged=unlogged -2038 ) -2039 -2040 def _parse_from( -2041 self, modifiers: bool = False, skip_from_token: bool = False -2042 ) -> t.Optional[exp.From]: -2043 if not skip_from_token and not self._match(TokenType.FROM): -2044 return None -2045 -2046 comments = self._prev_comments -2047 this = self._parse_table() -2048 -2049 return self.expression( -2050 exp.From, -2051 comments=comments, -2052 this=self._parse_query_modifiers(this) if modifiers else this, -2053 ) -2054 -2055 def _parse_match_recognize(self) -> t.Optional[exp.Expression]: -2056 if not self._match(TokenType.MATCH_RECOGNIZE): -2057 return None -2058 -2059 self._match_l_paren() -2060 -2061 partition = self._parse_partition_by() -2062 order = self._parse_order() -2063 measures = ( -2064 self._parse_csv(self._parse_expression) if self._match_text_seq("MEASURES") else None -2065 ) -2066 -2067 if self._match_text_seq("ONE", "ROW", "PER", "MATCH"): -2068 rows = exp.Var(this="ONE ROW PER MATCH") -2069 elif self._match_text_seq("ALL", "ROWS", "PER", "MATCH"): -2070 text = "ALL ROWS PER MATCH" -2071 if self._match_text_seq("SHOW", "EMPTY", "MATCHES"): -2072 text += f" SHOW EMPTY MATCHES" -2073 elif self._match_text_seq("OMIT", "EMPTY", "MATCHES"): -2074 text += f" OMIT EMPTY MATCHES" -2075 elif self._match_text_seq("WITH", "UNMATCHED", "ROWS"): -2076 text += f" WITH UNMATCHED ROWS" -2077 rows = exp.Var(this=text) -2078 else: -2079 rows = None -2080 -2081 if self._match_text_seq("AFTER", "MATCH", "SKIP"): -2082 text = "AFTER MATCH SKIP" -2083 if self._match_text_seq("PAST", "LAST", "ROW"): -2084 text += f" PAST LAST ROW" -2085 elif self._match_text_seq("TO", "NEXT", "ROW"): -2086 text += f" TO NEXT ROW" -2087 elif self._match_text_seq("TO", "FIRST"): -2088 text += f" TO FIRST {self._advance_any().text}" # type: ignore -2089 elif self._match_text_seq("TO", "LAST"): -2090 text += f" TO LAST {self._advance_any().text}" # type: ignore -2091 after = exp.Var(this=text) -2092 else: -2093 after = None -2094 -2095 if self._match_text_seq("PATTERN"): -2096 self._match_l_paren() -2097 -2098 if not self._curr: -2099 self.raise_error("Expecting )", self._curr) -2100 -2101 paren = 1 -2102 start = self._curr -2103 -2104 while self._curr and paren > 0: -2105 if self._curr.token_type == TokenType.L_PAREN: -2106 paren += 1 -2107 if self._curr.token_type == TokenType.R_PAREN: -2108 paren -= 1 -2109 end = self._prev -2110 self._advance() -2111 if paren > 0: -2112 self.raise_error("Expecting )", self._curr) -2113 pattern = exp.Var(this=self._find_sql(start, end)) -2114 else: -2115 pattern = None -2116 -2117 define = ( -2118 self._parse_csv( -2119 lambda: self.expression( -2120 exp.Alias, -2121 alias=self._parse_id_var(any_token=True), -2122 this=self._match(TokenType.ALIAS) and self._parse_conjunction(), -2123 ) -2124 ) -2125 if self._match_text_seq("DEFINE") -2126 else None -2127 ) +2019 return self.expression(exp.Hint, expressions=hints) +2020 +2021 return None +2022 +2023 def _parse_into(self) -> t.Optional[exp.Into]: +2024 if not self._match(TokenType.INTO): +2025 return None +2026 +2027 temp = self._match(TokenType.TEMPORARY) +2028 unlogged = self._match_text_seq("UNLOGGED") +2029 self._match(TokenType.TABLE) +2030 +2031 return self.expression( +2032 exp.Into, this=self._parse_table(schema=True), temporary=temp, unlogged=unlogged +2033 ) +2034 +2035 def _parse_from( +2036 self, modifiers: bool = False, skip_from_token: bool = False +2037 ) -> t.Optional[exp.From]: +2038 if not skip_from_token and not self._match(TokenType.FROM): +2039 return None +2040 +2041 comments = self._prev_comments +2042 this = self._parse_table() +2043 +2044 return self.expression( +2045 exp.From, +2046 comments=comments, +2047 this=self._parse_query_modifiers(this) if modifiers else this, +2048 ) +2049 +2050 def _parse_match_recognize(self) -> t.Optional[exp.MatchRecognize]: +2051 if not self._match(TokenType.MATCH_RECOGNIZE): +2052 return None +2053 +2054 self._match_l_paren() +2055 +2056 partition = self._parse_partition_by() +2057 order = self._parse_order() +2058 measures = ( +2059 self._parse_csv(self._parse_expression) if self._match_text_seq("MEASURES") else None +2060 ) +2061 +2062 if self._match_text_seq("ONE", "ROW", "PER", "MATCH"): +2063 rows = exp.var("ONE ROW PER MATCH") +2064 elif self._match_text_seq("ALL", "ROWS", "PER", "MATCH"): +2065 text = "ALL ROWS PER MATCH" +2066 if self._match_text_seq("SHOW", "EMPTY", "MATCHES"): +2067 text += f" SHOW EMPTY MATCHES" +2068 elif self._match_text_seq("OMIT", "EMPTY", "MATCHES"): +2069 text += f" OMIT EMPTY MATCHES" +2070 elif self._match_text_seq("WITH", "UNMATCHED", "ROWS"): +2071 text += f" WITH UNMATCHED ROWS" +2072 rows = exp.var(text) +2073 else: +2074 rows = None +2075 +2076 if self._match_text_seq("AFTER", "MATCH", "SKIP"): +2077 text = "AFTER MATCH SKIP" +2078 if self._match_text_seq("PAST", "LAST", "ROW"): +2079 text += f" PAST LAST ROW" +2080 elif self._match_text_seq("TO", "NEXT", "ROW"): +2081 text += f" TO NEXT ROW" +2082 elif self._match_text_seq("TO", "FIRST"): +2083 text += f" TO FIRST {self._advance_any().text}" # type: ignore +2084 elif self._match_text_seq("TO", "LAST"): +2085 text += f" TO LAST {self._advance_any().text}" # type: ignore +2086 after = exp.var(text) +2087 else: +2088 after = None +2089 +2090 if self._match_text_seq("PATTERN"): +2091 self._match_l_paren() +2092 +2093 if not self._curr: +2094 self.raise_error("Expecting )", self._curr) +2095 +2096 paren = 1 +2097 start = self._curr +2098 +2099 while self._curr and paren > 0: +2100 if self._curr.token_type == TokenType.L_PAREN: +2101 paren += 1 +2102 if self._curr.token_type == TokenType.R_PAREN: +2103 paren -= 1 +2104 +2105 end = self._prev +2106 self._advance() +2107 +2108 if paren > 0: +2109 self.raise_error("Expecting )", self._curr) +2110 +2111 pattern = exp.var(self._find_sql(start, end)) +2112 else: +2113 pattern = None +2114 +2115 define = ( +2116 self._parse_csv( +2117 lambda: self.expression( +2118 exp.Alias, +2119 alias=self._parse_id_var(any_token=True), +2120 this=self._match(TokenType.ALIAS) and self._parse_conjunction(), +2121 ) +2122 ) +2123 if self._match_text_seq("DEFINE") +2124 else None +2125 ) +2126 +2127 self._match_r_paren() 2128 -2129 self._match_r_paren() -2130 -2131 return self.expression( -2132 exp.MatchRecognize, -2133 partition_by=partition, -2134 order=order, -2135 measures=measures, -2136 rows=rows, -2137 after=after, -2138 pattern=pattern, -2139 define=define, -2140 alias=self._parse_table_alias(), -2141 ) -2142 -2143 def _parse_lateral(self) -> t.Optional[exp.Expression]: -2144 outer_apply = self._match_pair(TokenType.OUTER, TokenType.APPLY) -2145 cross_apply = self._match_pair(TokenType.CROSS, TokenType.APPLY) -2146 -2147 if outer_apply or cross_apply: -2148 this = self._parse_select(table=True) -2149 view = None -2150 outer = not cross_apply -2151 elif self._match(TokenType.LATERAL): -2152 this = self._parse_select(table=True) -2153 view = self._match(TokenType.VIEW) -2154 outer = self._match(TokenType.OUTER) -2155 else: -2156 return None -2157 -2158 if not this: -2159 this = self._parse_function() or self._parse_id_var(any_token=False) -2160 while self._match(TokenType.DOT): -2161 this = exp.Dot( -2162 this=this, -2163 expression=self._parse_function() or self._parse_id_var(any_token=False), -2164 ) -2165 -2166 table_alias: t.Optional[exp.Expression] -2167 -2168 if view: -2169 table = self._parse_id_var(any_token=False) -2170 columns = self._parse_csv(self._parse_id_var) if self._match(TokenType.ALIAS) else [] -2171 table_alias = self.expression(exp.TableAlias, this=table, columns=columns) -2172 else: -2173 table_alias = self._parse_table_alias() -2174 -2175 expression = self.expression( -2176 exp.Lateral, -2177 this=this, -2178 view=view, -2179 outer=outer, -2180 alias=table_alias, -2181 ) -2182 -2183 return expression -2184 -2185 def _parse_join_parts( -2186 self, -2187 ) -> t.Tuple[t.Optional[Token], t.Optional[Token], t.Optional[Token]]: -2188 return ( -2189 self._match_set(self.JOIN_METHODS) and self._prev, -2190 self._match_set(self.JOIN_SIDES) and self._prev, -2191 self._match_set(self.JOIN_KINDS) and self._prev, -2192 ) -2193 -2194 def _parse_join(self, skip_join_token: bool = False) -> t.Optional[exp.Expression]: -2195 if self._match(TokenType.COMMA): -2196 return self.expression(exp.Join, this=self._parse_table()) -2197 -2198 index = self._index -2199 method, side, kind = self._parse_join_parts() -2200 hint = self._prev.text if self._match_texts(self.JOIN_HINTS) else None -2201 join = self._match(TokenType.JOIN) -2202 -2203 if not skip_join_token and not join: -2204 self._retreat(index) -2205 kind = None -2206 method = None -2207 side = None -2208 -2209 outer_apply = self._match_pair(TokenType.OUTER, TokenType.APPLY, False) -2210 cross_apply = self._match_pair(TokenType.CROSS, TokenType.APPLY, False) -2211 -2212 if not skip_join_token and not join and not outer_apply and not cross_apply: -2213 return None -2214 -2215 if outer_apply: -2216 side = Token(TokenType.LEFT, "LEFT") -2217 -2218 kwargs: t.Dict[str, t.Any] = {"this": self._parse_table()} -2219 -2220 if method: -2221 kwargs["method"] = method.text -2222 if side: -2223 kwargs["side"] = side.text -2224 if kind: -2225 kwargs["kind"] = kind.text -2226 if hint: -2227 kwargs["hint"] = hint +2129 return self.expression( +2130 exp.MatchRecognize, +2131 partition_by=partition, +2132 order=order, +2133 measures=measures, +2134 rows=rows, +2135 after=after, +2136 pattern=pattern, +2137 define=define, +2138 alias=self._parse_table_alias(), +2139 ) +2140 +2141 def _parse_lateral(self) -> t.Optional[exp.Lateral]: +2142 outer_apply = self._match_pair(TokenType.OUTER, TokenType.APPLY) +2143 cross_apply = self._match_pair(TokenType.CROSS, TokenType.APPLY) +2144 +2145 if outer_apply or cross_apply: +2146 this = self._parse_select(table=True) +2147 view = None +2148 outer = not cross_apply +2149 elif self._match(TokenType.LATERAL): +2150 this = self._parse_select(table=True) +2151 view = self._match(TokenType.VIEW) +2152 outer = self._match(TokenType.OUTER) +2153 else: +2154 return None +2155 +2156 if not this: +2157 this = self._parse_function() or self._parse_id_var(any_token=False) +2158 while self._match(TokenType.DOT): +2159 this = exp.Dot( +2160 this=this, +2161 expression=self._parse_function() or self._parse_id_var(any_token=False), +2162 ) +2163 +2164 if view: +2165 table = self._parse_id_var(any_token=False) +2166 columns = self._parse_csv(self._parse_id_var) if self._match(TokenType.ALIAS) else [] +2167 table_alias: t.Optional[exp.TableAlias] = self.expression( +2168 exp.TableAlias, this=table, columns=columns +2169 ) +2170 elif isinstance(this, exp.Subquery) and this.alias: +2171 # Ensures parity between the Subquery's and the Lateral's "alias" args +2172 table_alias = this.args["alias"].copy() +2173 else: +2174 table_alias = self._parse_table_alias() +2175 +2176 return self.expression(exp.Lateral, this=this, view=view, outer=outer, alias=table_alias) +2177 +2178 def _parse_join_parts( +2179 self, +2180 ) -> t.Tuple[t.Optional[Token], t.Optional[Token], t.Optional[Token]]: +2181 return ( +2182 self._match_set(self.JOIN_METHODS) and self._prev, +2183 self._match_set(self.JOIN_SIDES) and self._prev, +2184 self._match_set(self.JOIN_KINDS) and self._prev, +2185 ) +2186 +2187 def _parse_join(self, skip_join_token: bool = False) -> t.Optional[exp.Join]: +2188 if self._match(TokenType.COMMA): +2189 return self.expression(exp.Join, this=self._parse_table()) +2190 +2191 index = self._index +2192 method, side, kind = self._parse_join_parts() +2193 hint = self._prev.text if self._match_texts(self.JOIN_HINTS) else None +2194 join = self._match(TokenType.JOIN) +2195 +2196 if not skip_join_token and not join: +2197 self._retreat(index) +2198 kind = None +2199 method = None +2200 side = None +2201 +2202 outer_apply = self._match_pair(TokenType.OUTER, TokenType.APPLY, False) +2203 cross_apply = self._match_pair(TokenType.CROSS, TokenType.APPLY, False) +2204 +2205 if not skip_join_token and not join and not outer_apply and not cross_apply: +2206 return None +2207 +2208 if outer_apply: +2209 side = Token(TokenType.LEFT, "LEFT") +2210 +2211 kwargs: t.Dict[str, t.Any] = {"this": self._parse_table()} +2212 +2213 if method: +2214 kwargs["method"] = method.text +2215 if side: +2216 kwargs["side"] = side.text +2217 if kind: +2218 kwargs["kind"] = kind.text +2219 if hint: +2220 kwargs["hint"] = hint +2221 +2222 if self._match(TokenType.ON): +2223 kwargs["on"] = self._parse_conjunction() +2224 elif self._match(TokenType.USING): +2225 kwargs["using"] = self._parse_wrapped_id_vars() +2226 +2227 return self.expression(exp.Join, **kwargs) 2228 -2229 if self._match(TokenType.ON): -2230 kwargs["on"] = self._parse_conjunction() -2231 elif self._match(TokenType.USING): -2232 kwargs["using"] = self._parse_wrapped_id_vars() -2233 -2234 return self.expression(exp.Join, **kwargs) -2235 -2236 def _parse_index( -2237 self, -2238 index: t.Optional[exp.Expression] = None, -2239 ) -> t.Optional[exp.Expression]: -2240 if index: -2241 unique = None -2242 primary = None -2243 amp = None -2244 -2245 self._match(TokenType.ON) -2246 self._match(TokenType.TABLE) # hive -2247 table = self._parse_table_parts(schema=True) -2248 else: -2249 unique = self._match(TokenType.UNIQUE) -2250 primary = self._match_text_seq("PRIMARY") -2251 amp = self._match_text_seq("AMP") -2252 if not self._match(TokenType.INDEX): -2253 return None -2254 index = self._parse_id_var() -2255 table = None -2256 -2257 if self._match(TokenType.L_PAREN, advance=False): -2258 columns = self._parse_wrapped_csv(self._parse_ordered) -2259 else: -2260 columns = None -2261 -2262 return self.expression( -2263 exp.Index, -2264 this=index, -2265 table=table, -2266 columns=columns, -2267 unique=unique, -2268 primary=primary, -2269 amp=amp, -2270 partition_by=self._parse_partition_by(), -2271 ) -2272 -2273 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: -2274 return ( -2275 (not schema and self._parse_function()) -2276 or self._parse_id_var(any_token=False) -2277 or self._parse_string_as_identifier() -2278 or self._parse_placeholder() -2279 ) -2280 -2281 def _parse_table_parts(self, schema: bool = False) -> exp.Table: -2282 catalog = None -2283 db = None -2284 table = self._parse_table_part(schema=schema) -2285 -2286 while self._match(TokenType.DOT): -2287 if catalog: -2288 # This allows nesting the table in arbitrarily many dot expressions if needed -2289 table = self.expression( -2290 exp.Dot, this=table, expression=self._parse_table_part(schema=schema) -2291 ) -2292 else: -2293 catalog = db -2294 db = table -2295 table = self._parse_table_part(schema=schema) -2296 -2297 if not table: -2298 self.raise_error(f"Expected table name but got {self._curr}") -2299 -2300 return self.expression( -2301 exp.Table, this=table, db=db, catalog=catalog, pivots=self._parse_pivots() -2302 ) -2303 -2304 def _parse_table( -2305 self, schema: bool = False, alias_tokens: t.Optional[t.Collection[TokenType]] = None -2306 ) -> t.Optional[exp.Expression]: -2307 lateral = self._parse_lateral() -2308 if lateral: -2309 return lateral -2310 -2311 unnest = self._parse_unnest() -2312 if unnest: -2313 return unnest -2314 -2315 values = self._parse_derived_table_values() -2316 if values: -2317 return values -2318 -2319 subquery = self._parse_select(table=True) -2320 if subquery: -2321 if not subquery.args.get("pivots"): -2322 subquery.set("pivots", self._parse_pivots()) -2323 return subquery +2229 def _parse_index( +2230 self, +2231 index: t.Optional[exp.Expression] = None, +2232 ) -> t.Optional[exp.Index]: +2233 if index: +2234 unique = None +2235 primary = None +2236 amp = None +2237 +2238 self._match(TokenType.ON) +2239 self._match(TokenType.TABLE) # hive +2240 table = self._parse_table_parts(schema=True) +2241 else: +2242 unique = self._match(TokenType.UNIQUE) +2243 primary = self._match_text_seq("PRIMARY") +2244 amp = self._match_text_seq("AMP") +2245 +2246 if not self._match(TokenType.INDEX): +2247 return None +2248 +2249 index = self._parse_id_var() +2250 table = None +2251 +2252 using = self._parse_field() if self._match(TokenType.USING) else None +2253 +2254 if self._match(TokenType.L_PAREN, advance=False): +2255 columns = self._parse_wrapped_csv(self._parse_ordered) +2256 else: +2257 columns = None +2258 +2259 return self.expression( +2260 exp.Index, +2261 this=index, +2262 table=table, +2263 using=using, +2264 columns=columns, +2265 unique=unique, +2266 primary=primary, +2267 amp=amp, +2268 partition_by=self._parse_partition_by(), +2269 ) +2270 +2271 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: +2272 return ( +2273 (not schema and self._parse_function(optional_parens=False)) +2274 or self._parse_id_var(any_token=False) +2275 or self._parse_string_as_identifier() +2276 or self._parse_placeholder() +2277 ) +2278 +2279 def _parse_table_parts(self, schema: bool = False) -> exp.Table: +2280 catalog = None +2281 db = None +2282 table = self._parse_table_part(schema=schema) +2283 +2284 while self._match(TokenType.DOT): +2285 if catalog: +2286 # This allows nesting the table in arbitrarily many dot expressions if needed +2287 table = self.expression( +2288 exp.Dot, this=table, expression=self._parse_table_part(schema=schema) +2289 ) +2290 else: +2291 catalog = db +2292 db = table +2293 table = self._parse_table_part(schema=schema) +2294 +2295 if not table: +2296 self.raise_error(f"Expected table name but got {self._curr}") +2297 +2298 return self.expression( +2299 exp.Table, this=table, db=db, catalog=catalog, pivots=self._parse_pivots() +2300 ) +2301 +2302 def _parse_table( +2303 self, schema: bool = False, alias_tokens: t.Optional[t.Collection[TokenType]] = None +2304 ) -> t.Optional[exp.Expression]: +2305 lateral = self._parse_lateral() +2306 if lateral: +2307 return lateral +2308 +2309 unnest = self._parse_unnest() +2310 if unnest: +2311 return unnest +2312 +2313 values = self._parse_derived_table_values() +2314 if values: +2315 return values +2316 +2317 subquery = self._parse_select(table=True) +2318 if subquery: +2319 if not subquery.args.get("pivots"): +2320 subquery.set("pivots", self._parse_pivots()) +2321 return subquery +2322 +2323 this: exp.Expression = self._parse_table_parts(schema=schema) 2324 -2325 this: exp.Expression = self._parse_table_parts(schema=schema) -2326 -2327 if schema: -2328 return self._parse_schema(this=this) -2329 -2330 if self.alias_post_tablesample: -2331 table_sample = self._parse_table_sample() -2332 -2333 alias = self._parse_table_alias(alias_tokens=alias_tokens or self.TABLE_ALIAS_TOKENS) -2334 if alias: -2335 this.set("alias", alias) -2336 -2337 if not this.args.get("pivots"): -2338 this.set("pivots", self._parse_pivots()) -2339 -2340 if self._match_pair(TokenType.WITH, TokenType.L_PAREN): -2341 this.set( -2342 "hints", -2343 self._parse_csv(lambda: self._parse_function() or self._parse_var(any_token=True)), -2344 ) -2345 self._match_r_paren() -2346 -2347 if not self.alias_post_tablesample: -2348 table_sample = self._parse_table_sample() -2349 -2350 if table_sample: -2351 table_sample.set("this", this) -2352 this = table_sample +2325 if schema: +2326 return self._parse_schema(this=this) +2327 +2328 if self.ALIAS_POST_TABLESAMPLE: +2329 table_sample = self._parse_table_sample() +2330 +2331 alias = self._parse_table_alias(alias_tokens=alias_tokens or self.TABLE_ALIAS_TOKENS) +2332 if alias: +2333 this.set("alias", alias) +2334 +2335 if not this.args.get("pivots"): +2336 this.set("pivots", self._parse_pivots()) +2337 +2338 if self._match_pair(TokenType.WITH, TokenType.L_PAREN): +2339 this.set( +2340 "hints", +2341 self._parse_csv(lambda: self._parse_function() or self._parse_var(any_token=True)), +2342 ) +2343 self._match_r_paren() +2344 +2345 if not self.ALIAS_POST_TABLESAMPLE: +2346 table_sample = self._parse_table_sample() +2347 +2348 if table_sample: +2349 table_sample.set("this", this) +2350 this = table_sample +2351 +2352 return this 2353 -2354 return this -2355 -2356 def _parse_unnest(self) -> t.Optional[exp.Expression]: -2357 if not self._match(TokenType.UNNEST): -2358 return None -2359 -2360 expressions = self._parse_wrapped_csv(self._parse_type) -2361 ordinality = self._match_pair(TokenType.WITH, TokenType.ORDINALITY) -2362 alias = self._parse_table_alias() -2363 -2364 if alias and self.unnest_column_only: -2365 if alias.args.get("columns"): -2366 self.raise_error("Unexpected extra column alias in unnest.") +2354 def _parse_unnest(self, with_alias: bool = True) -> t.Optional[exp.Unnest]: +2355 if not self._match(TokenType.UNNEST): +2356 return None +2357 +2358 expressions = self._parse_wrapped_csv(self._parse_type) +2359 ordinality = self._match_pair(TokenType.WITH, TokenType.ORDINALITY) +2360 +2361 alias = self._parse_table_alias() if with_alias else None +2362 +2363 if alias and self.UNNEST_COLUMN_ONLY: +2364 if alias.args.get("columns"): +2365 self.raise_error("Unexpected extra column alias in unnest.") +2366 2367 alias.set("columns", [alias.this]) 2368 alias.set("this", None) 2369 2370 offset = None 2371 if self._match_pair(TokenType.WITH, TokenType.OFFSET): 2372 self._match(TokenType.ALIAS) -2373 offset = self._parse_id_var() or exp.Identifier(this="offset") +2373 offset = self._parse_id_var() or exp.to_identifier("offset") 2374 2375 return self.expression( -2376 exp.Unnest, -2377 expressions=expressions, -2378 ordinality=ordinality, -2379 alias=alias, -2380 offset=offset, -2381 ) -2382 -2383 def _parse_derived_table_values(self) -> t.Optional[exp.Expression]: -2384 is_derived = self._match_pair(TokenType.L_PAREN, TokenType.VALUES) -2385 if not is_derived and not self._match(TokenType.VALUES): -2386 return None -2387 -2388 expressions = self._parse_csv(self._parse_value) +2376 exp.Unnest, expressions=expressions, ordinality=ordinality, alias=alias, offset=offset +2377 ) +2378 +2379 def _parse_derived_table_values(self) -> t.Optional[exp.Values]: +2380 is_derived = self._match_pair(TokenType.L_PAREN, TokenType.VALUES) +2381 if not is_derived and not self._match(TokenType.VALUES): +2382 return None +2383 +2384 expressions = self._parse_csv(self._parse_value) +2385 alias = self._parse_table_alias() +2386 +2387 if is_derived: +2388 self._match_r_paren() 2389 -2390 if is_derived: -2391 self._match_r_paren() -2392 -2393 return self.expression(exp.Values, expressions=expressions, alias=self._parse_table_alias()) -2394 -2395 def _parse_table_sample(self, as_modifier: bool = False) -> t.Optional[exp.Expression]: -2396 if not self._match(TokenType.TABLE_SAMPLE) and not ( -2397 as_modifier and self._match_text_seq("USING", "SAMPLE") -2398 ): -2399 return None -2400 -2401 bucket_numerator = None -2402 bucket_denominator = None -2403 bucket_field = None -2404 percent = None -2405 rows = None -2406 size = None -2407 seed = None -2408 -2409 kind = ( -2410 self._prev.text if self._prev.token_type == TokenType.TABLE_SAMPLE else "USING SAMPLE" -2411 ) -2412 method = self._parse_var(tokens=(TokenType.ROW,)) -2413 -2414 self._match(TokenType.L_PAREN) -2415 -2416 num = self._parse_number() -2417 -2418 if self._match_text_seq("BUCKET"): -2419 bucket_numerator = self._parse_number() -2420 self._match_text_seq("OUT", "OF") -2421 bucket_denominator = bucket_denominator = self._parse_number() -2422 self._match(TokenType.ON) -2423 bucket_field = self._parse_field() -2424 elif self._match_set((TokenType.PERCENT, TokenType.MOD)): -2425 percent = num -2426 elif self._match(TokenType.ROWS): -2427 rows = num -2428 else: -2429 size = num -2430 -2431 self._match(TokenType.R_PAREN) -2432 -2433 if self._match(TokenType.L_PAREN): -2434 method = self._parse_var() -2435 seed = self._match(TokenType.COMMA) and self._parse_number() -2436 self._match_r_paren() -2437 elif self._match_texts(("SEED", "REPEATABLE")): -2438 seed = self._parse_wrapped(self._parse_number) -2439 -2440 return self.expression( -2441 exp.TableSample, -2442 method=method, -2443 bucket_numerator=bucket_numerator, -2444 bucket_denominator=bucket_denominator, -2445 bucket_field=bucket_field, -2446 percent=percent, -2447 rows=rows, -2448 size=size, -2449 seed=seed, -2450 kind=kind, -2451 ) -2452 -2453 def _parse_pivots(self) -> t.List[t.Optional[exp.Expression]]: -2454 return list(iter(self._parse_pivot, None)) -2455 -2456 # https://duckdb.org/docs/sql/statements/pivot -2457 def _parse_simplified_pivot(self) -> exp.Pivot: -2458 def _parse_on() -> t.Optional[exp.Expression]: -2459 this = self._parse_bitwise() -2460 return self._parse_in(this) if self._match(TokenType.IN) else this -2461 -2462 this = self._parse_table() -2463 expressions = self._match(TokenType.ON) and self._parse_csv(_parse_on) -2464 using = self._match(TokenType.USING) and self._parse_csv( -2465 lambda: self._parse_alias(self._parse_function()) -2466 ) -2467 group = self._parse_group() -2468 return self.expression( -2469 exp.Pivot, this=this, expressions=expressions, using=using, group=group -2470 ) -2471 -2472 def _parse_pivot(self) -> t.Optional[exp.Expression]: -2473 index = self._index -2474 -2475 if self._match(TokenType.PIVOT): -2476 unpivot = False -2477 elif self._match(TokenType.UNPIVOT): -2478 unpivot = True -2479 else: -2480 return None -2481 -2482 expressions = [] -2483 field = None -2484 -2485 if not self._match(TokenType.L_PAREN): -2486 self._retreat(index) -2487 return None -2488 -2489 if unpivot: -2490 expressions = self._parse_csv(self._parse_column) -2491 else: -2492 expressions = self._parse_csv(lambda: self._parse_alias(self._parse_function())) -2493 -2494 if not expressions: -2495 self.raise_error("Failed to parse PIVOT's aggregation list") -2496 -2497 if not self._match(TokenType.FOR): -2498 self.raise_error("Expecting FOR") -2499 -2500 value = self._parse_column() -2501 -2502 if not self._match(TokenType.IN): -2503 self.raise_error("Expecting IN") -2504 -2505 field = self._parse_in(value, alias=True) -2506 -2507 self._match_r_paren() -2508 -2509 pivot = self.expression(exp.Pivot, expressions=expressions, field=field, unpivot=unpivot) -2510 -2511 if not self._match_set((TokenType.PIVOT, TokenType.UNPIVOT), advance=False): -2512 pivot.set("alias", self._parse_table_alias()) -2513 -2514 if not unpivot: -2515 names = self._pivot_column_names(t.cast(t.List[exp.Expression], expressions)) -2516 -2517 columns: t.List[exp.Expression] = [] -2518 for fld in pivot.args["field"].expressions: -2519 field_name = fld.sql() if self.IDENTIFY_PIVOT_STRINGS else fld.alias_or_name -2520 for name in names: -2521 if self.PREFIXED_PIVOT_COLUMNS: -2522 name = f"{name}_{field_name}" if name else field_name -2523 else: -2524 name = f"{field_name}_{name}" if name else field_name -2525 -2526 columns.append(exp.to_identifier(name)) -2527 -2528 pivot.set("columns", columns) -2529 -2530 return pivot -2531 -2532 def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]: -2533 return [agg.alias for agg in aggregations] -2534 -2535 def _parse_where(self, skip_where_token: bool = False) -> t.Optional[exp.Expression]: -2536 if not skip_where_token and not self._match(TokenType.WHERE): -2537 return None -2538 -2539 return self.expression( -2540 exp.Where, comments=self._prev_comments, this=self._parse_conjunction() -2541 ) -2542 -2543 def _parse_group(self, skip_group_by_token: bool = False) -> t.Optional[exp.Expression]: -2544 if not skip_group_by_token and not self._match(TokenType.GROUP_BY): -2545 return None -2546 -2547 elements = defaultdict(list) -2548 -2549 while True: -2550 expressions = self._parse_csv(self._parse_conjunction) -2551 if expressions: -2552 elements["expressions"].extend(expressions) -2553 -2554 grouping_sets = self._parse_grouping_sets() -2555 if grouping_sets: -2556 elements["grouping_sets"].extend(grouping_sets) -2557 -2558 rollup = None -2559 cube = None -2560 totals = None -2561 -2562 with_ = self._match(TokenType.WITH) -2563 if self._match(TokenType.ROLLUP): -2564 rollup = with_ or self._parse_wrapped_csv(self._parse_column) -2565 elements["rollup"].extend(ensure_list(rollup)) -2566 -2567 if self._match(TokenType.CUBE): -2568 cube = with_ or self._parse_wrapped_csv(self._parse_column) -2569 elements["cube"].extend(ensure_list(cube)) -2570 -2571 if self._match_text_seq("TOTALS"): -2572 totals = True -2573 elements["totals"] = True # type: ignore -2574 -2575 if not (grouping_sets or rollup or cube or totals): -2576 break -2577 -2578 return self.expression(exp.Group, **elements) # type: ignore -2579 -2580 def _parse_grouping_sets(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]: -2581 if not self._match(TokenType.GROUPING_SETS): -2582 return None -2583 -2584 return self._parse_wrapped_csv(self._parse_grouping_set) -2585 -2586 def _parse_grouping_set(self) -> t.Optional[exp.Expression]: -2587 if self._match(TokenType.L_PAREN): -2588 grouping_set = self._parse_csv(self._parse_column) -2589 self._match_r_paren() -2590 return self.expression(exp.Tuple, expressions=grouping_set) -2591 -2592 return self._parse_column() -2593 -2594 def _parse_having(self, skip_having_token: bool = False) -> t.Optional[exp.Expression]: -2595 if not skip_having_token and not self._match(TokenType.HAVING): -2596 return None -2597 return self.expression(exp.Having, this=self._parse_conjunction()) -2598 -2599 def _parse_qualify(self) -> t.Optional[exp.Expression]: -2600 if not self._match(TokenType.QUALIFY): -2601 return None -2602 return self.expression(exp.Qualify, this=self._parse_conjunction()) -2603 -2604 def _parse_order( -2605 self, this: t.Optional[exp.Expression] = None, skip_order_token: bool = False -2606 ) -> t.Optional[exp.Expression]: -2607 if not skip_order_token and not self._match(TokenType.ORDER_BY): -2608 return this -2609 -2610 return self.expression( -2611 exp.Order, this=this, expressions=self._parse_csv(self._parse_ordered) -2612 ) -2613 -2614 def _parse_sort( -2615 self, exp_class: t.Type[exp.Expression], *texts: str -2616 ) -> t.Optional[exp.Expression]: -2617 if not self._match_text_seq(*texts): -2618 return None -2619 return self.expression(exp_class, expressions=self._parse_csv(self._parse_ordered)) -2620 -2621 def _parse_ordered(self) -> exp.Expression: -2622 this = self._parse_conjunction() -2623 self._match(TokenType.ASC) -2624 is_desc = self._match(TokenType.DESC) -2625 is_nulls_first = self._match_text_seq("NULLS", "FIRST") -2626 is_nulls_last = self._match_text_seq("NULLS", "LAST") -2627 desc = is_desc or False -2628 asc = not desc -2629 nulls_first = is_nulls_first or False -2630 explicitly_null_ordered = is_nulls_first or is_nulls_last -2631 if ( -2632 not explicitly_null_ordered -2633 and ( -2634 (asc and self.null_ordering == "nulls_are_small") -2635 or (desc and self.null_ordering != "nulls_are_small") -2636 ) -2637 and self.null_ordering != "nulls_are_last" -2638 ): -2639 nulls_first = True -2640 -2641 return self.expression(exp.Ordered, this=this, desc=desc, nulls_first=nulls_first) -2642 -2643 def _parse_limit( -2644 self, this: t.Optional[exp.Expression] = None, top: bool = False -2645 ) -> t.Optional[exp.Expression]: -2646 if self._match(TokenType.TOP if top else TokenType.LIMIT): -2647 limit_paren = self._match(TokenType.L_PAREN) -2648 limit_exp = self.expression( -2649 exp.Limit, this=this, expression=self._parse_number() if top else self._parse_term() -2650 ) -2651 -2652 if limit_paren: -2653 self._match_r_paren() +2390 return self.expression( +2391 exp.Values, expressions=expressions, alias=alias or self._parse_table_alias() +2392 ) +2393 +2394 def _parse_table_sample(self, as_modifier: bool = False) -> t.Optional[exp.TableSample]: +2395 if not self._match(TokenType.TABLE_SAMPLE) and not ( +2396 as_modifier and self._match_text_seq("USING", "SAMPLE") +2397 ): +2398 return None +2399 +2400 bucket_numerator = None +2401 bucket_denominator = None +2402 bucket_field = None +2403 percent = None +2404 rows = None +2405 size = None +2406 seed = None +2407 +2408 kind = ( +2409 self._prev.text if self._prev.token_type == TokenType.TABLE_SAMPLE else "USING SAMPLE" +2410 ) +2411 method = self._parse_var(tokens=(TokenType.ROW,)) +2412 +2413 self._match(TokenType.L_PAREN) +2414 +2415 num = self._parse_number() +2416 +2417 if self._match_text_seq("BUCKET"): +2418 bucket_numerator = self._parse_number() +2419 self._match_text_seq("OUT", "OF") +2420 bucket_denominator = bucket_denominator = self._parse_number() +2421 self._match(TokenType.ON) +2422 bucket_field = self._parse_field() +2423 elif self._match_set((TokenType.PERCENT, TokenType.MOD)): +2424 percent = num +2425 elif self._match(TokenType.ROWS): +2426 rows = num +2427 else: +2428 size = num +2429 +2430 self._match(TokenType.R_PAREN) +2431 +2432 if self._match(TokenType.L_PAREN): +2433 method = self._parse_var() +2434 seed = self._match(TokenType.COMMA) and self._parse_number() +2435 self._match_r_paren() +2436 elif self._match_texts(("SEED", "REPEATABLE")): +2437 seed = self._parse_wrapped(self._parse_number) +2438 +2439 return self.expression( +2440 exp.TableSample, +2441 method=method, +2442 bucket_numerator=bucket_numerator, +2443 bucket_denominator=bucket_denominator, +2444 bucket_field=bucket_field, +2445 percent=percent, +2446 rows=rows, +2447 size=size, +2448 seed=seed, +2449 kind=kind, +2450 ) +2451 +2452 def _parse_pivots(self) -> t.List[t.Optional[exp.Expression]]: +2453 return list(iter(self._parse_pivot, None)) +2454 +2455 # https://duckdb.org/docs/sql/statements/pivot +2456 def _parse_simplified_pivot(self) -> exp.Pivot: +2457 def _parse_on() -> t.Optional[exp.Expression]: +2458 this = self._parse_bitwise() +2459 return self._parse_in(this) if self._match(TokenType.IN) else this +2460 +2461 this = self._parse_table() +2462 expressions = self._match(TokenType.ON) and self._parse_csv(_parse_on) +2463 using = self._match(TokenType.USING) and self._parse_csv( +2464 lambda: self._parse_alias(self._parse_function()) +2465 ) +2466 group = self._parse_group() +2467 return self.expression( +2468 exp.Pivot, this=this, expressions=expressions, using=using, group=group +2469 ) +2470 +2471 def _parse_pivot(self) -> t.Optional[exp.Pivot]: +2472 index = self._index +2473 +2474 if self._match(TokenType.PIVOT): +2475 unpivot = False +2476 elif self._match(TokenType.UNPIVOT): +2477 unpivot = True +2478 else: +2479 return None +2480 +2481 expressions = [] +2482 field = None +2483 +2484 if not self._match(TokenType.L_PAREN): +2485 self._retreat(index) +2486 return None +2487 +2488 if unpivot: +2489 expressions = self._parse_csv(self._parse_column) +2490 else: +2491 expressions = self._parse_csv(lambda: self._parse_alias(self._parse_function())) +2492 +2493 if not expressions: +2494 self.raise_error("Failed to parse PIVOT's aggregation list") +2495 +2496 if not self._match(TokenType.FOR): +2497 self.raise_error("Expecting FOR") +2498 +2499 value = self._parse_column() +2500 +2501 if not self._match(TokenType.IN): +2502 self.raise_error("Expecting IN") +2503 +2504 field = self._parse_in(value, alias=True) +2505 +2506 self._match_r_paren() +2507 +2508 pivot = self.expression(exp.Pivot, expressions=expressions, field=field, unpivot=unpivot) +2509 +2510 if not self._match_set((TokenType.PIVOT, TokenType.UNPIVOT), advance=False): +2511 pivot.set("alias", self._parse_table_alias()) +2512 +2513 if not unpivot: +2514 names = self._pivot_column_names(t.cast(t.List[exp.Expression], expressions)) +2515 +2516 columns: t.List[exp.Expression] = [] +2517 for fld in pivot.args["field"].expressions: +2518 field_name = fld.sql() if self.IDENTIFY_PIVOT_STRINGS else fld.alias_or_name +2519 for name in names: +2520 if self.PREFIXED_PIVOT_COLUMNS: +2521 name = f"{name}_{field_name}" if name else field_name +2522 else: +2523 name = f"{field_name}_{name}" if name else field_name +2524 +2525 columns.append(exp.to_identifier(name)) +2526 +2527 pivot.set("columns", columns) +2528 +2529 return pivot +2530 +2531 def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]: +2532 return [agg.alias for agg in aggregations] +2533 +2534 def _parse_where(self, skip_where_token: bool = False) -> t.Optional[exp.Where]: +2535 if not skip_where_token and not self._match(TokenType.WHERE): +2536 return None +2537 +2538 return self.expression( +2539 exp.Where, comments=self._prev_comments, this=self._parse_conjunction() +2540 ) +2541 +2542 def _parse_group(self, skip_group_by_token: bool = False) -> t.Optional[exp.Group]: +2543 if not skip_group_by_token and not self._match(TokenType.GROUP_BY): +2544 return None +2545 +2546 elements = defaultdict(list) +2547 +2548 while True: +2549 expressions = self._parse_csv(self._parse_conjunction) +2550 if expressions: +2551 elements["expressions"].extend(expressions) +2552 +2553 grouping_sets = self._parse_grouping_sets() +2554 if grouping_sets: +2555 elements["grouping_sets"].extend(grouping_sets) +2556 +2557 rollup = None +2558 cube = None +2559 totals = None +2560 +2561 with_ = self._match(TokenType.WITH) +2562 if self._match(TokenType.ROLLUP): +2563 rollup = with_ or self._parse_wrapped_csv(self._parse_column) +2564 elements["rollup"].extend(ensure_list(rollup)) +2565 +2566 if self._match(TokenType.CUBE): +2567 cube = with_ or self._parse_wrapped_csv(self._parse_column) +2568 elements["cube"].extend(ensure_list(cube)) +2569 +2570 if self._match_text_seq("TOTALS"): +2571 totals = True +2572 elements["totals"] = True # type: ignore +2573 +2574 if not (grouping_sets or rollup or cube or totals): +2575 break +2576 +2577 return self.expression(exp.Group, **elements) # type: ignore +2578 +2579 def _parse_grouping_sets(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]: +2580 if not self._match(TokenType.GROUPING_SETS): +2581 return None +2582 +2583 return self._parse_wrapped_csv(self._parse_grouping_set) +2584 +2585 def _parse_grouping_set(self) -> t.Optional[exp.Expression]: +2586 if self._match(TokenType.L_PAREN): +2587 grouping_set = self._parse_csv(self._parse_column) +2588 self._match_r_paren() +2589 return self.expression(exp.Tuple, expressions=grouping_set) +2590 +2591 return self._parse_column() +2592 +2593 def _parse_having(self, skip_having_token: bool = False) -> t.Optional[exp.Having]: +2594 if not skip_having_token and not self._match(TokenType.HAVING): +2595 return None +2596 return self.expression(exp.Having, this=self._parse_conjunction()) +2597 +2598 def _parse_qualify(self) -> t.Optional[exp.Qualify]: +2599 if not self._match(TokenType.QUALIFY): +2600 return None +2601 return self.expression(exp.Qualify, this=self._parse_conjunction()) +2602 +2603 def _parse_order( +2604 self, this: t.Optional[exp.Expression] = None, skip_order_token: bool = False +2605 ) -> t.Optional[exp.Expression]: +2606 if not skip_order_token and not self._match(TokenType.ORDER_BY): +2607 return this +2608 +2609 return self.expression( +2610 exp.Order, this=this, expressions=self._parse_csv(self._parse_ordered) +2611 ) +2612 +2613 def _parse_sort(self, exp_class: t.Type[E], *texts: str) -> t.Optional[E]: +2614 if not self._match_text_seq(*texts): +2615 return None +2616 return self.expression(exp_class, expressions=self._parse_csv(self._parse_ordered)) +2617 +2618 def _parse_ordered(self) -> exp.Ordered: +2619 this = self._parse_conjunction() +2620 self._match(TokenType.ASC) +2621 +2622 is_desc = self._match(TokenType.DESC) +2623 is_nulls_first = self._match_text_seq("NULLS", "FIRST") +2624 is_nulls_last = self._match_text_seq("NULLS", "LAST") +2625 desc = is_desc or False +2626 asc = not desc +2627 nulls_first = is_nulls_first or False +2628 explicitly_null_ordered = is_nulls_first or is_nulls_last +2629 +2630 if ( +2631 not explicitly_null_ordered +2632 and ( +2633 (asc and self.NULL_ORDERING == "nulls_are_small") +2634 or (desc and self.NULL_ORDERING != "nulls_are_small") +2635 ) +2636 and self.NULL_ORDERING != "nulls_are_last" +2637 ): +2638 nulls_first = True +2639 +2640 return self.expression(exp.Ordered, this=this, desc=desc, nulls_first=nulls_first) +2641 +2642 def _parse_limit( +2643 self, this: t.Optional[exp.Expression] = None, top: bool = False +2644 ) -> t.Optional[exp.Expression]: +2645 if self._match(TokenType.TOP if top else TokenType.LIMIT): +2646 limit_paren = self._match(TokenType.L_PAREN) +2647 expression = self._parse_number() if top else self._parse_term() +2648 +2649 if self._match(TokenType.COMMA): +2650 offset = expression +2651 expression = self._parse_term() +2652 else: +2653 offset = None 2654 -2655 return limit_exp +2655 limit_exp = self.expression(exp.Limit, this=this, expression=expression, offset=offset) 2656 -2657 if self._match(TokenType.FETCH): -2658 direction = self._match_set((TokenType.FIRST, TokenType.NEXT)) -2659 direction = self._prev.text if direction else "FIRST" -2660 -2661 count = self._parse_number() -2662 percent = self._match(TokenType.PERCENT) -2663 -2664 self._match_set((TokenType.ROW, TokenType.ROWS)) +2657 if limit_paren: +2658 self._match_r_paren() +2659 +2660 return limit_exp +2661 +2662 if self._match(TokenType.FETCH): +2663 direction = self._match_set((TokenType.FIRST, TokenType.NEXT)) +2664 direction = self._prev.text if direction else "FIRST" 2665 -2666 only = self._match_text_seq("ONLY") -2667 with_ties = self._match_text_seq("WITH", "TIES") +2666 count = self._parse_number() +2667 percent = self._match(TokenType.PERCENT) 2668 -2669 if only and with_ties: -2670 self.raise_error("Cannot specify both ONLY and WITH TIES in FETCH clause") -2671 -2672 return self.expression( -2673 exp.Fetch, -2674 direction=direction, -2675 count=count, -2676 percent=percent, -2677 with_ties=with_ties, -2678 ) -2679 -2680 return this -2681 -2682 def _parse_offset(self, this: t.Optional[exp.Expression] = None) -> t.Optional[exp.Expression]: -2683 if not self._match_set((TokenType.OFFSET, TokenType.COMMA)): -2684 return this -2685 -2686 count = self._parse_number() -2687 self._match_set((TokenType.ROW, TokenType.ROWS)) -2688 return self.expression(exp.Offset, this=this, expression=count) -2689 -2690 def _parse_locks(self) -> t.List[exp.Expression]: -2691 # Lists are invariant, so we need to use a type hint here -2692 locks: t.List[exp.Expression] = [] -2693 -2694 while True: -2695 if self._match_text_seq("FOR", "UPDATE"): -2696 update = True -2697 elif self._match_text_seq("FOR", "SHARE") or self._match_text_seq( -2698 "LOCK", "IN", "SHARE", "MODE" -2699 ): -2700 update = False -2701 else: -2702 break -2703 -2704 expressions = None -2705 if self._match_text_seq("OF"): -2706 expressions = self._parse_csv(lambda: self._parse_table(schema=True)) -2707 -2708 wait: t.Optional[bool | exp.Expression] = None -2709 if self._match_text_seq("NOWAIT"): -2710 wait = True -2711 elif self._match_text_seq("WAIT"): -2712 wait = self._parse_primary() -2713 elif self._match_text_seq("SKIP", "LOCKED"): -2714 wait = False -2715 -2716 locks.append( -2717 self.expression(exp.Lock, update=update, expressions=expressions, wait=wait) -2718 ) -2719 -2720 return locks -2721 -2722 def _parse_set_operations(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: -2723 if not self._match_set(self.SET_OPERATIONS): -2724 return this -2725 -2726 token_type = self._prev.token_type -2727 -2728 if token_type == TokenType.UNION: -2729 expression = exp.Union -2730 elif token_type == TokenType.EXCEPT: -2731 expression = exp.Except -2732 else: -2733 expression = exp.Intersect -2734 -2735 return self.expression( -2736 expression, -2737 this=this, -2738 distinct=self._match(TokenType.DISTINCT) or not self._match(TokenType.ALL), -2739 expression=self._parse_set_operations(self._parse_select(nested=True)), -2740 ) -2741 -2742 def _parse_expression(self) -> t.Optional[exp.Expression]: -2743 return self._parse_alias(self._parse_conjunction()) +2669 self._match_set((TokenType.ROW, TokenType.ROWS)) +2670 +2671 only = self._match_text_seq("ONLY") +2672 with_ties = self._match_text_seq("WITH", "TIES") +2673 +2674 if only and with_ties: +2675 self.raise_error("Cannot specify both ONLY and WITH TIES in FETCH clause") +2676 +2677 return self.expression( +2678 exp.Fetch, +2679 direction=direction, +2680 count=count, +2681 percent=percent, +2682 with_ties=with_ties, +2683 ) +2684 +2685 return this +2686 +2687 def _parse_offset(self, this: t.Optional[exp.Expression] = None) -> t.Optional[exp.Expression]: +2688 if not self._match(TokenType.OFFSET): +2689 return this +2690 +2691 count = self._parse_number() +2692 self._match_set((TokenType.ROW, TokenType.ROWS)) +2693 return self.expression(exp.Offset, this=this, expression=count) +2694 +2695 def _parse_locks(self) -> t.List[exp.Lock]: +2696 locks = [] +2697 while True: +2698 if self._match_text_seq("FOR", "UPDATE"): +2699 update = True +2700 elif self._match_text_seq("FOR", "SHARE") or self._match_text_seq( +2701 "LOCK", "IN", "SHARE", "MODE" +2702 ): +2703 update = False +2704 else: +2705 break +2706 +2707 expressions = None +2708 if self._match_text_seq("OF"): +2709 expressions = self._parse_csv(lambda: self._parse_table(schema=True)) +2710 +2711 wait: t.Optional[bool | exp.Expression] = None +2712 if self._match_text_seq("NOWAIT"): +2713 wait = True +2714 elif self._match_text_seq("WAIT"): +2715 wait = self._parse_primary() +2716 elif self._match_text_seq("SKIP", "LOCKED"): +2717 wait = False +2718 +2719 locks.append( +2720 self.expression(exp.Lock, update=update, expressions=expressions, wait=wait) +2721 ) +2722 +2723 return locks +2724 +2725 def _parse_set_operations(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: +2726 if not self._match_set(self.SET_OPERATIONS): +2727 return this +2728 +2729 token_type = self._prev.token_type +2730 +2731 if token_type == TokenType.UNION: +2732 expression = exp.Union +2733 elif token_type == TokenType.EXCEPT: +2734 expression = exp.Except +2735 else: +2736 expression = exp.Intersect +2737 +2738 return self.expression( +2739 expression, +2740 this=this, +2741 distinct=self._match(TokenType.DISTINCT) or not self._match(TokenType.ALL), +2742 expression=self._parse_set_operations(self._parse_select(nested=True)), +2743 ) 2744 -2745 def _parse_conjunction(self) -> t.Optional[exp.Expression]: -2746 return self._parse_tokens(self._parse_equality, self.CONJUNCTION) +2745 def _parse_expression(self) -> t.Optional[exp.Expression]: +2746 return self._parse_alias(self._parse_conjunction()) 2747 -2748 def _parse_equality(self) -> t.Optional[exp.Expression]: -2749 return self._parse_tokens(self._parse_comparison, self.EQUALITY) +2748 def _parse_conjunction(self) -> t.Optional[exp.Expression]: +2749 return self._parse_tokens(self._parse_equality, self.CONJUNCTION) 2750 -2751 def _parse_comparison(self) -> t.Optional[exp.Expression]: -2752 return self._parse_tokens(self._parse_range, self.COMPARISON) +2751 def _parse_equality(self) -> t.Optional[exp.Expression]: +2752 return self._parse_tokens(self._parse_comparison, self.EQUALITY) 2753 -2754 def _parse_range(self) -> t.Optional[exp.Expression]: -2755 this = self._parse_bitwise() -2756 negate = self._match(TokenType.NOT) -2757 -2758 if self._match_set(self.RANGE_PARSERS): -2759 expression = self.RANGE_PARSERS[self._prev.token_type](self, this) -2760 if not expression: -2761 return this -2762 -2763 this = expression -2764 elif self._match(TokenType.ISNULL): -2765 this = self.expression(exp.Is, this=this, expression=exp.Null()) -2766 -2767 # Postgres supports ISNULL and NOTNULL for conditions. -2768 # https://blog.andreiavram.ro/postgresql-null-composite-type/ -2769 if self._match(TokenType.NOTNULL): -2770 this = self.expression(exp.Is, this=this, expression=exp.Null()) -2771 this = self.expression(exp.Not, this=this) -2772 -2773 if negate: +2754 def _parse_comparison(self) -> t.Optional[exp.Expression]: +2755 return self._parse_tokens(self._parse_range, self.COMPARISON) +2756 +2757 def _parse_range(self) -> t.Optional[exp.Expression]: +2758 this = self._parse_bitwise() +2759 negate = self._match(TokenType.NOT) +2760 +2761 if self._match_set(self.RANGE_PARSERS): +2762 expression = self.RANGE_PARSERS[self._prev.token_type](self, this) +2763 if not expression: +2764 return this +2765 +2766 this = expression +2767 elif self._match(TokenType.ISNULL): +2768 this = self.expression(exp.Is, this=this, expression=exp.Null()) +2769 +2770 # Postgres supports ISNULL and NOTNULL for conditions. +2771 # https://blog.andreiavram.ro/postgresql-null-composite-type/ +2772 if self._match(TokenType.NOTNULL): +2773 this = self.expression(exp.Is, this=this, expression=exp.Null()) 2774 this = self.expression(exp.Not, this=this) 2775 -2776 if self._match(TokenType.IS): -2777 this = self._parse_is(this) +2776 if negate: +2777 this = self.expression(exp.Not, this=this) 2778 -2779 return this -2780 -2781 def _parse_is(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: -2782 index = self._index - 1 -2783 negate = self._match(TokenType.NOT) -2784 if self._match_text_seq("DISTINCT", "FROM"): -2785 klass = exp.NullSafeEQ if negate else exp.NullSafeNEQ -2786 return self.expression(klass, this=this, expression=self._parse_expression()) +2779 if self._match(TokenType.IS): +2780 this = self._parse_is(this) +2781 +2782 return this +2783 +2784 def _parse_is(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: +2785 index = self._index - 1 +2786 negate = self._match(TokenType.NOT) 2787 -2788 expression = self._parse_null() or self._parse_boolean() -2789 if not expression: -2790 self._retreat(index) -2791 return None -2792 -2793 this = self.expression(exp.Is, this=this, expression=expression) -2794 return self.expression(exp.Not, this=this) if negate else this -2795 -2796 def _parse_in(self, this: t.Optional[exp.Expression], alias: bool = False) -> exp.In: -2797 unnest = self._parse_unnest() -2798 if unnest: -2799 this = self.expression(exp.In, this=this, unnest=unnest) -2800 elif self._match(TokenType.L_PAREN): -2801 expressions = self._parse_csv(lambda: self._parse_select_or_expression(alias=alias)) -2802 -2803 if len(expressions) == 1 and isinstance(expressions[0], exp.Subqueryable): -2804 this = self.expression(exp.In, this=this, query=expressions[0]) -2805 else: -2806 this = self.expression(exp.In, this=this, expressions=expressions) -2807 -2808 self._match_r_paren(this) -2809 else: -2810 this = self.expression(exp.In, this=this, field=self._parse_field()) +2788 if self._match_text_seq("DISTINCT", "FROM"): +2789 klass = exp.NullSafeEQ if negate else exp.NullSafeNEQ +2790 return self.expression(klass, this=this, expression=self._parse_expression()) +2791 +2792 expression = self._parse_null() or self._parse_boolean() +2793 if not expression: +2794 self._retreat(index) +2795 return None +2796 +2797 this = self.expression(exp.Is, this=this, expression=expression) +2798 return self.expression(exp.Not, this=this) if negate else this +2799 +2800 def _parse_in(self, this: t.Optional[exp.Expression], alias: bool = False) -> exp.In: +2801 unnest = self._parse_unnest(with_alias=False) +2802 if unnest: +2803 this = self.expression(exp.In, this=this, unnest=unnest) +2804 elif self._match(TokenType.L_PAREN): +2805 expressions = self._parse_csv(lambda: self._parse_select_or_expression(alias=alias)) +2806 +2807 if len(expressions) == 1 and isinstance(expressions[0], exp.Subqueryable): +2808 this = self.expression(exp.In, this=this, query=expressions[0]) +2809 else: +2810 this = self.expression(exp.In, this=this, expressions=expressions) 2811 -2812 return this -2813 -2814 def _parse_between(self, this: exp.Expression) -> exp.Expression: -2815 low = self._parse_bitwise() -2816 self._match(TokenType.AND) -2817 high = self._parse_bitwise() -2818 return self.expression(exp.Between, this=this, low=low, high=high) -2819 -2820 def _parse_escape(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: -2821 if not self._match(TokenType.ESCAPE): -2822 return this -2823 return self.expression(exp.Escape, this=this, expression=self._parse_string()) -2824 -2825 def _parse_interval(self) -> t.Optional[exp.Expression]: -2826 if not self._match(TokenType.INTERVAL): -2827 return None +2812 self._match_r_paren(this) +2813 else: +2814 this = self.expression(exp.In, this=this, field=self._parse_field()) +2815 +2816 return this +2817 +2818 def _parse_between(self, this: exp.Expression) -> exp.Between: +2819 low = self._parse_bitwise() +2820 self._match(TokenType.AND) +2821 high = self._parse_bitwise() +2822 return self.expression(exp.Between, this=this, low=low, high=high) +2823 +2824 def _parse_escape(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: +2825 if not self._match(TokenType.ESCAPE): +2826 return this +2827 return self.expression(exp.Escape, this=this, expression=self._parse_string()) 2828 -2829 this = self._parse_primary() or self._parse_term() -2830 unit = self._parse_function() or self._parse_var() -2831 -2832 # Most dialects support, e.g., the form INTERVAL '5' day, thus we try to parse -2833 # each INTERVAL expression into this canonical form so it's easy to transpile -2834 if this and this.is_number: -2835 this = exp.Literal.string(this.name) -2836 elif this and this.is_string: -2837 parts = this.name.split() -2838 -2839 if len(parts) == 2: -2840 if unit: -2841 # this is not actually a unit, it's something else -2842 unit = None -2843 self._retreat(self._index - 1) -2844 else: -2845 this = exp.Literal.string(parts[0]) -2846 unit = self.expression(exp.Var, this=parts[1]) -2847 -2848 return self.expression(exp.Interval, this=this, unit=unit) -2849 -2850 def _parse_bitwise(self) -> t.Optional[exp.Expression]: -2851 this = self._parse_term() -2852 -2853 while True: -2854 if self._match_set(self.BITWISE): -2855 this = self.expression( -2856 self.BITWISE[self._prev.token_type], -2857 this=this, -2858 expression=self._parse_term(), -2859 ) -2860 elif self._match_pair(TokenType.LT, TokenType.LT): -2861 this = self.expression( -2862 exp.BitwiseLeftShift, this=this, expression=self._parse_term() -2863 ) -2864 elif self._match_pair(TokenType.GT, TokenType.GT): -2865 this = self.expression( -2866 exp.BitwiseRightShift, this=this, expression=self._parse_term() -2867 ) -2868 else: -2869 break -2870 -2871 return this +2829 def _parse_interval(self) -> t.Optional[exp.Interval]: +2830 if not self._match(TokenType.INTERVAL): +2831 return None +2832 +2833 this = self._parse_primary() or self._parse_term() +2834 unit = self._parse_function() or self._parse_var() +2835 +2836 # Most dialects support, e.g., the form INTERVAL '5' day, thus we try to parse +2837 # each INTERVAL expression into this canonical form so it's easy to transpile +2838 if this and this.is_number: +2839 this = exp.Literal.string(this.name) +2840 elif this and this.is_string: +2841 parts = this.name.split() +2842 +2843 if len(parts) == 2: +2844 if unit: +2845 # this is not actually a unit, it's something else +2846 unit = None +2847 self._retreat(self._index - 1) +2848 else: +2849 this = exp.Literal.string(parts[0]) +2850 unit = self.expression(exp.Var, this=parts[1]) +2851 +2852 return self.expression(exp.Interval, this=this, unit=unit) +2853 +2854 def _parse_bitwise(self) -> t.Optional[exp.Expression]: +2855 this = self._parse_term() +2856 +2857 while True: +2858 if self._match_set(self.BITWISE): +2859 this = self.expression( +2860 self.BITWISE[self._prev.token_type], this=this, expression=self._parse_term() +2861 ) +2862 elif self._match_pair(TokenType.LT, TokenType.LT): +2863 this = self.expression( +2864 exp.BitwiseLeftShift, this=this, expression=self._parse_term() +2865 ) +2866 elif self._match_pair(TokenType.GT, TokenType.GT): +2867 this = self.expression( +2868 exp.BitwiseRightShift, this=this, expression=self._parse_term() +2869 ) +2870 else: +2871 break 2872 -2873 def _parse_term(self) -> t.Optional[exp.Expression]: -2874 return self._parse_tokens(self._parse_factor, self.TERM) -2875 -2876 def _parse_factor(self) -> t.Optional[exp.Expression]: -2877 return self._parse_tokens(self._parse_unary, self.FACTOR) -2878 -2879 def _parse_unary(self) -> t.Optional[exp.Expression]: -2880 if self._match_set(self.UNARY_PARSERS): -2881 return self.UNARY_PARSERS[self._prev.token_type](self) -2882 return self._parse_at_time_zone(self._parse_type()) -2883 -2884 def _parse_type(self) -> t.Optional[exp.Expression]: -2885 interval = self._parse_interval() -2886 if interval: -2887 return interval -2888 -2889 index = self._index -2890 data_type = self._parse_types(check_func=True) -2891 this = self._parse_column() -2892 -2893 if data_type: -2894 if isinstance(this, exp.Literal): -2895 parser = self.TYPE_LITERAL_PARSERS.get(data_type.this) -2896 if parser: -2897 return parser(self, this, data_type) -2898 return self.expression(exp.Cast, this=this, to=data_type) -2899 if not data_type.expressions: -2900 self._retreat(index) -2901 return self._parse_column() -2902 return self._parse_column_ops(data_type) -2903 -2904 return this +2873 return this +2874 +2875 def _parse_term(self) -> t.Optional[exp.Expression]: +2876 return self._parse_tokens(self._parse_factor, self.TERM) +2877 +2878 def _parse_factor(self) -> t.Optional[exp.Expression]: +2879 return self._parse_tokens(self._parse_unary, self.FACTOR) +2880 +2881 def _parse_unary(self) -> t.Optional[exp.Expression]: +2882 if self._match_set(self.UNARY_PARSERS): +2883 return self.UNARY_PARSERS[self._prev.token_type](self) +2884 return self._parse_at_time_zone(self._parse_type()) +2885 +2886 def _parse_type(self) -> t.Optional[exp.Expression]: +2887 interval = self._parse_interval() +2888 if interval: +2889 return interval +2890 +2891 index = self._index +2892 data_type = self._parse_types(check_func=True) +2893 this = self._parse_column() +2894 +2895 if data_type: +2896 if isinstance(this, exp.Literal): +2897 parser = self.TYPE_LITERAL_PARSERS.get(data_type.this) +2898 if parser: +2899 return parser(self, this, data_type) +2900 return self.expression(exp.Cast, this=this, to=data_type) +2901 if not data_type.expressions: +2902 self._retreat(index) +2903 return self._parse_column() +2904 return self._parse_column_ops(data_type) 2905 -2906 def _parse_type_size(self) -> t.Optional[exp.Expression]: -2907 this = self._parse_type() -2908 if not this: -2909 return None -2910 -2911 return self.expression( -2912 exp.DataTypeSize, this=this, expression=self._parse_var(any_token=True) -2913 ) -2914 -2915 def _parse_types( -2916 self, check_func: bool = False, schema: bool = False -2917 ) -> t.Optional[exp.Expression]: -2918 index = self._index -2919 -2920 prefix = self._match_text_seq("SYSUDTLIB", ".") +2906 return this +2907 +2908 def _parse_type_size(self) -> t.Optional[exp.DataTypeSize]: +2909 this = self._parse_type() +2910 if not this: +2911 return None +2912 +2913 return self.expression( +2914 exp.DataTypeSize, this=this, expression=self._parse_var(any_token=True) +2915 ) +2916 +2917 def _parse_types( +2918 self, check_func: bool = False, schema: bool = False +2919 ) -> t.Optional[exp.Expression]: +2920 index = self._index 2921 -2922 if not self._match_set(self.TYPE_TOKENS): -2923 return None -2924 -2925 type_token = self._prev.token_type +2922 prefix = self._match_text_seq("SYSUDTLIB", ".") +2923 +2924 if not self._match_set(self.TYPE_TOKENS): +2925 return None 2926 -2927 if type_token == TokenType.PSEUDO_TYPE: -2928 return self.expression(exp.PseudoType, this=self._prev.text) -2929 -2930 nested = type_token in self.NESTED_TYPE_TOKENS -2931 is_struct = type_token == TokenType.STRUCT -2932 expressions = None -2933 maybe_func = False -2934 -2935 if self._match(TokenType.L_PAREN): -2936 if is_struct: -2937 expressions = self._parse_csv(self._parse_struct_types) -2938 elif nested: -2939 expressions = self._parse_csv( -2940 lambda: self._parse_types(check_func=check_func, schema=schema) -2941 ) -2942 else: -2943 expressions = self._parse_csv(self._parse_type_size) -2944 -2945 if not expressions or not self._match(TokenType.R_PAREN): -2946 self._retreat(index) -2947 return None +2927 type_token = self._prev.token_type +2928 +2929 if type_token == TokenType.PSEUDO_TYPE: +2930 return self.expression(exp.PseudoType, this=self._prev.text) +2931 +2932 nested = type_token in self.NESTED_TYPE_TOKENS +2933 is_struct = type_token == TokenType.STRUCT +2934 expressions = None +2935 maybe_func = False +2936 +2937 if self._match(TokenType.L_PAREN): +2938 if is_struct: +2939 expressions = self._parse_csv(self._parse_struct_types) +2940 elif nested: +2941 expressions = self._parse_csv( +2942 lambda: self._parse_types(check_func=check_func, schema=schema) +2943 ) +2944 elif type_token in self.ENUM_TYPE_TOKENS: +2945 expressions = self._parse_csv(self._parse_primary) +2946 else: +2947 expressions = self._parse_csv(self._parse_type_size) 2948 -2949 maybe_func = True -2950 -2951 if self._match_pair(TokenType.L_BRACKET, TokenType.R_BRACKET): -2952 this = exp.DataType( -2953 this=exp.DataType.Type.ARRAY, -2954 expressions=[exp.DataType.build(type_token.value, expressions=expressions)], -2955 nested=True, -2956 ) -2957 -2958 while self._match_pair(TokenType.L_BRACKET, TokenType.R_BRACKET): -2959 this = exp.DataType( -2960 this=exp.DataType.Type.ARRAY, -2961 expressions=[this], -2962 nested=True, -2963 ) +2949 if not expressions or not self._match(TokenType.R_PAREN): +2950 self._retreat(index) +2951 return None +2952 +2953 maybe_func = True +2954 +2955 if self._match_pair(TokenType.L_BRACKET, TokenType.R_BRACKET): +2956 this = exp.DataType( +2957 this=exp.DataType.Type.ARRAY, +2958 expressions=[exp.DataType.build(type_token.value, expressions=expressions)], +2959 nested=True, +2960 ) +2961 +2962 while self._match_pair(TokenType.L_BRACKET, TokenType.R_BRACKET): +2963 this = exp.DataType(this=exp.DataType.Type.ARRAY, expressions=[this], nested=True) 2964 2965 return this 2966 @@ -3080,1688 +3080,1694 @@ 2986 2987 value: t.Optional[exp.Expression] = None 2988 if type_token in self.TIMESTAMPS: -2989 if self._match_text_seq("WITH", "TIME", "ZONE") or type_token == TokenType.TIMESTAMPTZ: -2990 value = exp.DataType(this=exp.DataType.Type.TIMESTAMPTZ, expressions=expressions) -2991 elif ( -2992 self._match_text_seq("WITH", "LOCAL", "TIME", "ZONE") -2993 or type_token == TokenType.TIMESTAMPLTZ -2994 ): -2995 value = exp.DataType(this=exp.DataType.Type.TIMESTAMPLTZ, expressions=expressions) -2996 elif self._match_text_seq("WITHOUT", "TIME", "ZONE"): -2997 if type_token == TokenType.TIME: -2998 value = exp.DataType(this=exp.DataType.Type.TIME, expressions=expressions) -2999 else: -3000 value = exp.DataType(this=exp.DataType.Type.TIMESTAMP, expressions=expressions) -3001 -3002 maybe_func = maybe_func and value is None -3003 -3004 if value is None: -3005 value = exp.DataType(this=exp.DataType.Type.TIMESTAMP, expressions=expressions) -3006 elif type_token == TokenType.INTERVAL: -3007 unit = self._parse_var() +2989 if self._match_text_seq("WITH", "TIME", "ZONE"): +2990 maybe_func = False +2991 value = exp.DataType(this=exp.DataType.Type.TIMESTAMPTZ, expressions=expressions) +2992 elif self._match_text_seq("WITH", "LOCAL", "TIME", "ZONE"): +2993 maybe_func = False +2994 value = exp.DataType(this=exp.DataType.Type.TIMESTAMPLTZ, expressions=expressions) +2995 elif self._match_text_seq("WITHOUT", "TIME", "ZONE"): +2996 maybe_func = False +2997 elif type_token == TokenType.INTERVAL: +2998 unit = self._parse_var() +2999 +3000 if not unit: +3001 value = self.expression(exp.DataType, this=exp.DataType.Type.INTERVAL) +3002 else: +3003 value = self.expression(exp.Interval, unit=unit) +3004 +3005 if maybe_func and check_func: +3006 index2 = self._index +3007 peek = self._parse_string() 3008 -3009 if not unit: -3010 value = self.expression(exp.DataType, this=exp.DataType.Type.INTERVAL) -3011 else: -3012 value = self.expression(exp.Interval, unit=unit) -3013 -3014 if maybe_func and check_func: -3015 index2 = self._index -3016 peek = self._parse_string() +3009 if not peek: +3010 self._retreat(index) +3011 return None +3012 +3013 self._retreat(index2) +3014 +3015 if value: +3016 return value 3017 -3018 if not peek: -3019 self._retreat(index) -3020 return None -3021 -3022 self._retreat(index2) -3023 -3024 if value: -3025 return value -3026 -3027 return exp.DataType( -3028 this=exp.DataType.Type[type_token.value.upper()], -3029 expressions=expressions, -3030 nested=nested, -3031 values=values, -3032 prefix=prefix, -3033 ) -3034 -3035 def _parse_struct_types(self) -> t.Optional[exp.Expression]: -3036 this = self._parse_type() or self._parse_id_var() -3037 self._match(TokenType.COLON) -3038 return self._parse_column_def(this) -3039 -3040 def _parse_at_time_zone(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: -3041 if not self._match_text_seq("AT", "TIME", "ZONE"): -3042 return this -3043 return self.expression(exp.AtTimeZone, this=this, zone=self._parse_unary()) -3044 -3045 def _parse_column(self) -> t.Optional[exp.Expression]: -3046 this = self._parse_field() -3047 if isinstance(this, exp.Identifier): -3048 this = self.expression(exp.Column, this=this) -3049 elif not this: -3050 return self._parse_bracket(this) -3051 return self._parse_column_ops(this) -3052 -3053 def _parse_column_ops(self, this: exp.Expression) -> exp.Expression: -3054 this = self._parse_bracket(this) -3055 -3056 while self._match_set(self.COLUMN_OPERATORS): -3057 op_token = self._prev.token_type -3058 op = self.COLUMN_OPERATORS.get(op_token) -3059 -3060 if op_token == TokenType.DCOLON: -3061 field = self._parse_types() -3062 if not field: -3063 self.raise_error("Expected type") -3064 elif op and self._curr: -3065 self._advance() -3066 value = self._prev.text -3067 field = ( -3068 exp.Literal.number(value) -3069 if self._prev.token_type == TokenType.NUMBER -3070 else exp.Literal.string(value) -3071 ) -3072 else: -3073 field = self._parse_field(anonymous_func=True) -3074 -3075 if isinstance(field, exp.Func): -3076 # bigquery allows function calls like x.y.count(...) -3077 # SAFE.SUBSTR(...) -3078 # https://cloud.google.com/bigquery/docs/reference/standard-sql/functions-reference#function_call_rules -3079 this = self._replace_columns_with_dots(this) -3080 -3081 if op: -3082 this = op(self, this, field) -3083 elif isinstance(this, exp.Column) and not this.args.get("catalog"): -3084 this = self.expression( -3085 exp.Column, -3086 this=field, -3087 table=this.this, -3088 db=this.args.get("table"), -3089 catalog=this.args.get("db"), -3090 ) -3091 else: -3092 this = self.expression(exp.Dot, this=this, expression=field) -3093 this = self._parse_bracket(this) -3094 return this -3095 -3096 def _parse_primary(self) -> t.Optional[exp.Expression]: -3097 if self._match_set(self.PRIMARY_PARSERS): -3098 token_type = self._prev.token_type -3099 primary = self.PRIMARY_PARSERS[token_type](self, self._prev) -3100 -3101 if token_type == TokenType.STRING: -3102 expressions = [primary] -3103 while self._match(TokenType.STRING): -3104 expressions.append(exp.Literal.string(self._prev.text)) -3105 if len(expressions) > 1: -3106 return self.expression(exp.Concat, expressions=expressions) -3107 return primary +3018 return exp.DataType( +3019 this=exp.DataType.Type[type_token.value.upper()], +3020 expressions=expressions, +3021 nested=nested, +3022 values=values, +3023 prefix=prefix, +3024 ) +3025 +3026 def _parse_struct_types(self) -> t.Optional[exp.Expression]: +3027 this = self._parse_type() or self._parse_id_var() +3028 self._match(TokenType.COLON) +3029 return self._parse_column_def(this) +3030 +3031 def _parse_at_time_zone(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: +3032 if not self._match_text_seq("AT", "TIME", "ZONE"): +3033 return this +3034 return self.expression(exp.AtTimeZone, this=this, zone=self._parse_unary()) +3035 +3036 def _parse_column(self) -> t.Optional[exp.Expression]: +3037 this = self._parse_field() +3038 if isinstance(this, exp.Identifier): +3039 this = self.expression(exp.Column, this=this) +3040 elif not this: +3041 return self._parse_bracket(this) +3042 return self._parse_column_ops(this) +3043 +3044 def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: +3045 this = self._parse_bracket(this) +3046 +3047 while self._match_set(self.COLUMN_OPERATORS): +3048 op_token = self._prev.token_type +3049 op = self.COLUMN_OPERATORS.get(op_token) +3050 +3051 if op_token == TokenType.DCOLON: +3052 field = self._parse_types() +3053 if not field: +3054 self.raise_error("Expected type") +3055 elif op and self._curr: +3056 self._advance() +3057 value = self._prev.text +3058 field = ( +3059 exp.Literal.number(value) +3060 if self._prev.token_type == TokenType.NUMBER +3061 else exp.Literal.string(value) +3062 ) +3063 else: +3064 field = self._parse_field(anonymous_func=True, any_token=True) +3065 +3066 if isinstance(field, exp.Func): +3067 # bigquery allows function calls like x.y.count(...) +3068 # SAFE.SUBSTR(...) +3069 # https://cloud.google.com/bigquery/docs/reference/standard-sql/functions-reference#function_call_rules +3070 this = self._replace_columns_with_dots(this) +3071 +3072 if op: +3073 this = op(self, this, field) +3074 elif isinstance(this, exp.Column) and not this.args.get("catalog"): +3075 this = self.expression( +3076 exp.Column, +3077 this=field, +3078 table=this.this, +3079 db=this.args.get("table"), +3080 catalog=this.args.get("db"), +3081 ) +3082 else: +3083 this = self.expression(exp.Dot, this=this, expression=field) +3084 this = self._parse_bracket(this) +3085 return this +3086 +3087 def _parse_primary(self) -> t.Optional[exp.Expression]: +3088 if self._match_set(self.PRIMARY_PARSERS): +3089 token_type = self._prev.token_type +3090 primary = self.PRIMARY_PARSERS[token_type](self, self._prev) +3091 +3092 if token_type == TokenType.STRING: +3093 expressions = [primary] +3094 while self._match(TokenType.STRING): +3095 expressions.append(exp.Literal.string(self._prev.text)) +3096 +3097 if len(expressions) > 1: +3098 return self.expression(exp.Concat, expressions=expressions) +3099 +3100 return primary +3101 +3102 if self._match_pair(TokenType.DOT, TokenType.NUMBER): +3103 return exp.Literal.number(f"0.{self._prev.text}") +3104 +3105 if self._match(TokenType.L_PAREN): +3106 comments = self._prev_comments +3107 query = self._parse_select() 3108 -3109 if self._match_pair(TokenType.DOT, TokenType.NUMBER): -3110 return exp.Literal.number(f"0.{self._prev.text}") -3111 -3112 if self._match(TokenType.L_PAREN): -3113 comments = self._prev_comments -3114 query = self._parse_select() +3109 if query: +3110 expressions = [query] +3111 else: +3112 expressions = self._parse_csv(self._parse_expression) +3113 +3114 this = self._parse_query_modifiers(seq_get(expressions, 0)) 3115 -3116 if query: -3117 expressions = [query] -3118 else: -3119 expressions = self._parse_csv(self._parse_expression) -3120 -3121 this = self._parse_query_modifiers(seq_get(expressions, 0)) -3122 -3123 if isinstance(this, exp.Subqueryable): -3124 this = self._parse_set_operations( -3125 self._parse_subquery(this=this, parse_alias=False) -3126 ) -3127 elif len(expressions) > 1: -3128 this = self.expression(exp.Tuple, expressions=expressions) -3129 else: -3130 this = self.expression(exp.Paren, this=self._parse_set_operations(this)) -3131 -3132 if this: -3133 this.add_comments(comments) -3134 self._match_r_paren(expression=this) -3135 -3136 return this -3137 -3138 return None -3139 -3140 def _parse_field( -3141 self, -3142 any_token: bool = False, -3143 tokens: t.Optional[t.Collection[TokenType]] = None, -3144 anonymous_func: bool = False, -3145 ) -> t.Optional[exp.Expression]: -3146 return ( -3147 self._parse_primary() -3148 or self._parse_function(anonymous=anonymous_func) -3149 or self._parse_id_var(any_token=any_token, tokens=tokens) -3150 ) -3151 -3152 def _parse_function( -3153 self, functions: t.Optional[t.Dict[str, t.Callable]] = None, anonymous: bool = False -3154 ) -> t.Optional[exp.Expression]: -3155 if not self._curr: -3156 return None -3157 -3158 token_type = self._curr.token_type -3159 -3160 if self._match_set(self.NO_PAREN_FUNCTION_PARSERS): -3161 return self.NO_PAREN_FUNCTION_PARSERS[token_type](self) -3162 -3163 if not self._next or self._next.token_type != TokenType.L_PAREN: -3164 if token_type in self.NO_PAREN_FUNCTIONS: -3165 self._advance() -3166 return self.expression(self.NO_PAREN_FUNCTIONS[token_type]) -3167 -3168 return None -3169 -3170 if token_type not in self.FUNC_TOKENS: -3171 return None +3116 if isinstance(this, exp.Subqueryable): +3117 this = self._parse_set_operations( +3118 self._parse_subquery(this=this, parse_alias=False) +3119 ) +3120 elif len(expressions) > 1: +3121 this = self.expression(exp.Tuple, expressions=expressions) +3122 else: +3123 this = self.expression(exp.Paren, this=self._parse_set_operations(this)) +3124 +3125 if this: +3126 this.add_comments(comments) +3127 +3128 self._match_r_paren(expression=this) +3129 return this +3130 +3131 return None +3132 +3133 def _parse_field( +3134 self, +3135 any_token: bool = False, +3136 tokens: t.Optional[t.Collection[TokenType]] = None, +3137 anonymous_func: bool = False, +3138 ) -> t.Optional[exp.Expression]: +3139 return ( +3140 self._parse_primary() +3141 or self._parse_function(anonymous=anonymous_func) +3142 or self._parse_id_var(any_token=any_token, tokens=tokens) +3143 ) +3144 +3145 def _parse_function( +3146 self, +3147 functions: t.Optional[t.Dict[str, t.Callable]] = None, +3148 anonymous: bool = False, +3149 optional_parens: bool = True, +3150 ) -> t.Optional[exp.Expression]: +3151 if not self._curr: +3152 return None +3153 +3154 token_type = self._curr.token_type +3155 +3156 if optional_parens and self._match_set(self.NO_PAREN_FUNCTION_PARSERS): +3157 return self.NO_PAREN_FUNCTION_PARSERS[token_type](self) +3158 +3159 if not self._next or self._next.token_type != TokenType.L_PAREN: +3160 if optional_parens and token_type in self.NO_PAREN_FUNCTIONS: +3161 self._advance() +3162 return self.expression(self.NO_PAREN_FUNCTIONS[token_type]) +3163 +3164 return None +3165 +3166 if token_type not in self.FUNC_TOKENS: +3167 return None +3168 +3169 this = self._curr.text +3170 upper = this.upper() +3171 self._advance(2) 3172 -3173 this = self._curr.text -3174 upper = this.upper() -3175 self._advance(2) -3176 -3177 parser = self.FUNCTION_PARSERS.get(upper) -3178 -3179 if parser and not anonymous: -3180 this = parser(self) -3181 else: -3182 subquery_predicate = self.SUBQUERY_PREDICATES.get(token_type) -3183 -3184 if subquery_predicate and self._curr.token_type in (TokenType.SELECT, TokenType.WITH): -3185 this = self.expression(subquery_predicate, this=self._parse_select()) -3186 self._match_r_paren() -3187 return this -3188 -3189 if functions is None: -3190 functions = self.FUNCTIONS -3191 -3192 function = functions.get(upper) -3193 -3194 alias = upper in self.FUNCTIONS_WITH_ALIASED_ARGS -3195 args = self._parse_csv(lambda: self._parse_lambda(alias=alias)) -3196 -3197 if function and not anonymous: -3198 this = function(args) -3199 self.validate_expression(this, args) -3200 else: -3201 this = self.expression(exp.Anonymous, this=this, expressions=args) -3202 -3203 self._match_r_paren(this) -3204 return self._parse_window(this) -3205 -3206 def _parse_function_parameter(self) -> t.Optional[exp.Expression]: -3207 return self._parse_column_def(self._parse_id_var()) +3173 parser = self.FUNCTION_PARSERS.get(upper) +3174 +3175 if parser and not anonymous: +3176 this = parser(self) +3177 else: +3178 subquery_predicate = self.SUBQUERY_PREDICATES.get(token_type) +3179 +3180 if subquery_predicate and self._curr.token_type in (TokenType.SELECT, TokenType.WITH): +3181 this = self.expression(subquery_predicate, this=self._parse_select()) +3182 self._match_r_paren() +3183 return this +3184 +3185 if functions is None: +3186 functions = self.FUNCTIONS +3187 +3188 function = functions.get(upper) +3189 +3190 alias = upper in self.FUNCTIONS_WITH_ALIASED_ARGS +3191 args = self._parse_csv(lambda: self._parse_lambda(alias=alias)) +3192 +3193 if function and not anonymous: +3194 this = self.validate_expression(function(args), args) +3195 else: +3196 this = self.expression(exp.Anonymous, this=this, expressions=args) +3197 +3198 self._match_r_paren(this) +3199 return self._parse_window(this) +3200 +3201 def _parse_function_parameter(self) -> t.Optional[exp.Expression]: +3202 return self._parse_column_def(self._parse_id_var()) +3203 +3204 def _parse_user_defined_function( +3205 self, kind: t.Optional[TokenType] = None +3206 ) -> t.Optional[exp.Expression]: +3207 this = self._parse_id_var() 3208 -3209 def _parse_user_defined_function( -3210 self, kind: t.Optional[TokenType] = None -3211 ) -> t.Optional[exp.Expression]: -3212 this = self._parse_id_var() -3213 -3214 while self._match(TokenType.DOT): -3215 this = self.expression(exp.Dot, this=this, expression=self._parse_id_var()) -3216 -3217 if not self._match(TokenType.L_PAREN): -3218 return this -3219 -3220 expressions = self._parse_csv(self._parse_function_parameter) -3221 self._match_r_paren() -3222 return self.expression( -3223 exp.UserDefinedFunction, this=this, expressions=expressions, wrapped=True -3224 ) +3209 while self._match(TokenType.DOT): +3210 this = self.expression(exp.Dot, this=this, expression=self._parse_id_var()) +3211 +3212 if not self._match(TokenType.L_PAREN): +3213 return this +3214 +3215 expressions = self._parse_csv(self._parse_function_parameter) +3216 self._match_r_paren() +3217 return self.expression( +3218 exp.UserDefinedFunction, this=this, expressions=expressions, wrapped=True +3219 ) +3220 +3221 def _parse_introducer(self, token: Token) -> exp.Introducer | exp.Identifier: +3222 literal = self._parse_primary() +3223 if literal: +3224 return self.expression(exp.Introducer, this=token.text, expression=literal) 3225 -3226 def _parse_introducer(self, token: Token) -> t.Optional[exp.Expression]: -3227 literal = self._parse_primary() -3228 if literal: -3229 return self.expression(exp.Introducer, this=token.text, expression=literal) -3230 -3231 return self.expression(exp.Identifier, this=token.text) -3232 -3233 def _parse_session_parameter(self) -> exp.Expression: -3234 kind = None -3235 this = self._parse_id_var() or self._parse_primary() -3236 -3237 if this and self._match(TokenType.DOT): -3238 kind = this.name -3239 this = self._parse_var() or self._parse_primary() +3226 return self.expression(exp.Identifier, this=token.text) +3227 +3228 def _parse_session_parameter(self) -> exp.SessionParameter: +3229 kind = None +3230 this = self._parse_id_var() or self._parse_primary() +3231 +3232 if this and self._match(TokenType.DOT): +3233 kind = this.name +3234 this = self._parse_var() or self._parse_primary() +3235 +3236 return self.expression(exp.SessionParameter, this=this, kind=kind) +3237 +3238 def _parse_lambda(self, alias: bool = False) -> t.Optional[exp.Expression]: +3239 index = self._index 3240 -3241 return self.expression(exp.SessionParameter, this=this, kind=kind) -3242 -3243 def _parse_lambda(self, alias: bool = False) -> t.Optional[exp.Expression]: -3244 index = self._index -3245 -3246 if self._match(TokenType.L_PAREN): -3247 expressions = self._parse_csv(self._parse_id_var) +3241 if self._match(TokenType.L_PAREN): +3242 expressions = self._parse_csv(self._parse_id_var) +3243 +3244 if not self._match(TokenType.R_PAREN): +3245 self._retreat(index) +3246 else: +3247 expressions = [self._parse_id_var()] 3248 -3249 if not self._match(TokenType.R_PAREN): -3250 self._retreat(index) -3251 else: -3252 expressions = [self._parse_id_var()] +3249 if self._match_set(self.LAMBDAS): +3250 return self.LAMBDAS[self._prev.token_type](self, expressions) +3251 +3252 self._retreat(index) 3253 -3254 if self._match_set(self.LAMBDAS): -3255 return self.LAMBDAS[self._prev.token_type](self, expressions) -3256 -3257 self._retreat(index) -3258 -3259 this: t.Optional[exp.Expression] -3260 -3261 if self._match(TokenType.DISTINCT): -3262 this = self.expression( -3263 exp.Distinct, expressions=self._parse_csv(self._parse_conjunction) -3264 ) -3265 else: -3266 this = self._parse_select_or_expression(alias=alias) +3254 this: t.Optional[exp.Expression] +3255 +3256 if self._match(TokenType.DISTINCT): +3257 this = self.expression( +3258 exp.Distinct, expressions=self._parse_csv(self._parse_conjunction) +3259 ) +3260 else: +3261 this = self._parse_select_or_expression(alias=alias) +3262 +3263 if isinstance(this, exp.EQ): +3264 left = this.this +3265 if isinstance(left, exp.Column): +3266 left.replace(exp.var(left.text("this"))) 3267 -3268 if isinstance(this, exp.EQ): -3269 left = this.this -3270 if isinstance(left, exp.Column): -3271 left.replace(exp.Var(this=left.text("this"))) +3268 return self._parse_limit(self._parse_order(self._parse_respect_or_ignore_nulls(this))) +3269 +3270 def _parse_schema(self, this: t.Optional[exp.Expression] = None) -> t.Optional[exp.Expression]: +3271 index = self._index 3272 -3273 return self._parse_limit(self._parse_order(self._parse_respect_or_ignore_nulls(this))) -3274 -3275 def _parse_schema(self, this: t.Optional[exp.Expression] = None) -> t.Optional[exp.Expression]: -3276 index = self._index -3277 -3278 if not self.errors: -3279 try: -3280 if self._parse_select(nested=True): -3281 return this -3282 except ParseError: -3283 pass -3284 finally: -3285 self.errors.clear() -3286 self._retreat(index) -3287 -3288 if not self._match(TokenType.L_PAREN): -3289 return this +3273 if not self.errors: +3274 try: +3275 if self._parse_select(nested=True): +3276 return this +3277 except ParseError: +3278 pass +3279 finally: +3280 self.errors.clear() +3281 self._retreat(index) +3282 +3283 if not self._match(TokenType.L_PAREN): +3284 return this +3285 +3286 args = self._parse_csv( +3287 lambda: self._parse_constraint() +3288 or self._parse_column_def(self._parse_field(any_token=True)) +3289 ) 3290 -3291 args = self._parse_csv( -3292 lambda: self._parse_constraint() -3293 or self._parse_column_def(self._parse_field(any_token=True)) -3294 ) -3295 self._match_r_paren() -3296 return self.expression(exp.Schema, this=this, expressions=args) -3297 -3298 def _parse_column_def(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: -3299 # column defs are not really columns, they're identifiers -3300 if isinstance(this, exp.Column): -3301 this = this.this -3302 kind = self._parse_types(schema=True) +3291 self._match_r_paren() +3292 return self.expression(exp.Schema, this=this, expressions=args) +3293 +3294 def _parse_column_def(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: +3295 # column defs are not really columns, they're identifiers +3296 if isinstance(this, exp.Column): +3297 this = this.this +3298 +3299 kind = self._parse_types(schema=True) +3300 +3301 if self._match_text_seq("FOR", "ORDINALITY"): +3302 return self.expression(exp.ColumnDef, this=this, ordinality=True) 3303 -3304 if self._match_text_seq("FOR", "ORDINALITY"): -3305 return self.expression(exp.ColumnDef, this=this, ordinality=True) -3306 -3307 constraints = [] -3308 while True: -3309 constraint = self._parse_column_constraint() -3310 if not constraint: -3311 break -3312 constraints.append(constraint) +3304 constraints = [] +3305 while True: +3306 constraint = self._parse_column_constraint() +3307 if not constraint: +3308 break +3309 constraints.append(constraint) +3310 +3311 if not kind and not constraints: +3312 return this 3313 -3314 if not kind and not constraints: -3315 return this -3316 -3317 return self.expression(exp.ColumnDef, this=this, kind=kind, constraints=constraints) -3318 -3319 def _parse_auto_increment(self) -> exp.Expression: -3320 start = None -3321 increment = None -3322 -3323 if self._match(TokenType.L_PAREN, advance=False): -3324 args = self._parse_wrapped_csv(self._parse_bitwise) -3325 start = seq_get(args, 0) -3326 increment = seq_get(args, 1) -3327 elif self._match_text_seq("START"): -3328 start = self._parse_bitwise() -3329 self._match_text_seq("INCREMENT") -3330 increment = self._parse_bitwise() -3331 -3332 if start and increment: -3333 return exp.GeneratedAsIdentityColumnConstraint(start=start, increment=increment) -3334 -3335 return exp.AutoIncrementColumnConstraint() -3336 -3337 def _parse_compress(self) -> exp.Expression: -3338 if self._match(TokenType.L_PAREN, advance=False): -3339 return self.expression( -3340 exp.CompressColumnConstraint, this=self._parse_wrapped_csv(self._parse_bitwise) -3341 ) -3342 -3343 return self.expression(exp.CompressColumnConstraint, this=self._parse_bitwise()) -3344 -3345 def _parse_generated_as_identity(self) -> exp.Expression: -3346 if self._match_text_seq("BY", "DEFAULT"): -3347 on_null = self._match_pair(TokenType.ON, TokenType.NULL) -3348 this = self.expression( -3349 exp.GeneratedAsIdentityColumnConstraint, this=False, on_null=on_null -3350 ) -3351 else: -3352 self._match_text_seq("ALWAYS") -3353 this = self.expression(exp.GeneratedAsIdentityColumnConstraint, this=True) -3354 -3355 self._match(TokenType.ALIAS) -3356 identity = self._match_text_seq("IDENTITY") -3357 -3358 if self._match(TokenType.L_PAREN): -3359 if self._match_text_seq("START", "WITH"): -3360 this.set("start", self._parse_bitwise()) -3361 if self._match_text_seq("INCREMENT", "BY"): -3362 this.set("increment", self._parse_bitwise()) -3363 if self._match_text_seq("MINVALUE"): -3364 this.set("minvalue", self._parse_bitwise()) -3365 if self._match_text_seq("MAXVALUE"): -3366 this.set("maxvalue", self._parse_bitwise()) -3367 -3368 if self._match_text_seq("CYCLE"): -3369 this.set("cycle", True) -3370 elif self._match_text_seq("NO", "CYCLE"): -3371 this.set("cycle", False) -3372 -3373 if not identity: -3374 this.set("expression", self._parse_bitwise()) -3375 -3376 self._match_r_paren() -3377 -3378 return this -3379 -3380 def _parse_inline(self) -> t.Optional[exp.Expression]: -3381 self._match_text_seq("LENGTH") -3382 return self.expression(exp.InlineLengthColumnConstraint, this=self._parse_bitwise()) -3383 -3384 def _parse_not_constraint(self) -> t.Optional[exp.Expression]: -3385 if self._match_text_seq("NULL"): -3386 return self.expression(exp.NotNullColumnConstraint) -3387 if self._match_text_seq("CASESPECIFIC"): -3388 return self.expression(exp.CaseSpecificColumnConstraint, not_=True) -3389 return None -3390 -3391 def _parse_column_constraint(self) -> t.Optional[exp.Expression]: -3392 if self._match(TokenType.CONSTRAINT): -3393 this = self._parse_id_var() -3394 else: -3395 this = None -3396 -3397 if self._match_texts(self.CONSTRAINT_PARSERS): -3398 return self.expression( -3399 exp.ColumnConstraint, -3400 this=this, -3401 kind=self.CONSTRAINT_PARSERS[self._prev.text.upper()](self), -3402 ) -3403 -3404 return this -3405 -3406 def _parse_constraint(self) -> t.Optional[exp.Expression]: -3407 if not self._match(TokenType.CONSTRAINT): -3408 return self._parse_unnamed_constraint(constraints=self.SCHEMA_UNNAMED_CONSTRAINTS) -3409 -3410 this = self._parse_id_var() -3411 expressions = [] -3412 -3413 while True: -3414 constraint = self._parse_unnamed_constraint() or self._parse_function() -3415 if not constraint: -3416 break -3417 expressions.append(constraint) -3418 -3419 return self.expression(exp.Constraint, this=this, expressions=expressions) -3420 -3421 def _parse_unnamed_constraint( -3422 self, constraints: t.Optional[t.Collection[str]] = None -3423 ) -> t.Optional[exp.Expression]: -3424 if not self._match_texts(constraints or self.CONSTRAINT_PARSERS): -3425 return None -3426 -3427 constraint = self._prev.text.upper() -3428 if constraint not in self.CONSTRAINT_PARSERS: -3429 self.raise_error(f"No parser found for schema constraint {constraint}.") -3430 -3431 return self.CONSTRAINT_PARSERS[constraint](self) -3432 -3433 def _parse_unique(self) -> exp.Expression: -3434 self._match_text_seq("KEY") -3435 return self.expression( -3436 exp.UniqueColumnConstraint, this=self._parse_schema(self._parse_id_var(any_token=False)) -3437 ) -3438 -3439 def _parse_key_constraint_options(self) -> t.List[str]: -3440 options = [] -3441 while True: -3442 if not self._curr: -3443 break -3444 -3445 if self._match(TokenType.ON): -3446 action = None -3447 on = self._advance_any() and self._prev.text -3448 -3449 if self._match_text_seq("NO", "ACTION"): -3450 action = "NO ACTION" -3451 elif self._match_text_seq("CASCADE"): -3452 action = "CASCADE" -3453 elif self._match_pair(TokenType.SET, TokenType.NULL): -3454 action = "SET NULL" -3455 elif self._match_pair(TokenType.SET, TokenType.DEFAULT): -3456 action = "SET DEFAULT" -3457 else: -3458 self.raise_error("Invalid key constraint") -3459 -3460 options.append(f"ON {on} {action}") -3461 elif self._match_text_seq("NOT", "ENFORCED"): -3462 options.append("NOT ENFORCED") -3463 elif self._match_text_seq("DEFERRABLE"): -3464 options.append("DEFERRABLE") -3465 elif self._match_text_seq("INITIALLY", "DEFERRED"): -3466 options.append("INITIALLY DEFERRED") -3467 elif self._match_text_seq("NORELY"): -3468 options.append("NORELY") -3469 elif self._match_text_seq("MATCH", "FULL"): -3470 options.append("MATCH FULL") -3471 else: -3472 break -3473 -3474 return options -3475 -3476 def _parse_references(self, match: bool = True) -> t.Optional[exp.Expression]: -3477 if match and not self._match(TokenType.REFERENCES): -3478 return None -3479 -3480 expressions = None -3481 this = self._parse_id_var() -3482 -3483 if self._match(TokenType.L_PAREN, advance=False): -3484 expressions = self._parse_wrapped_id_vars() -3485 -3486 options = self._parse_key_constraint_options() -3487 return self.expression(exp.Reference, this=this, expressions=expressions, options=options) -3488 -3489 def _parse_foreign_key(self) -> exp.Expression: -3490 expressions = self._parse_wrapped_id_vars() -3491 reference = self._parse_references() -3492 options = {} -3493 -3494 while self._match(TokenType.ON): -3495 if not self._match_set((TokenType.DELETE, TokenType.UPDATE)): -3496 self.raise_error("Expected DELETE or UPDATE") -3497 -3498 kind = self._prev.text.lower() -3499 -3500 if self._match_text_seq("NO", "ACTION"): -3501 action = "NO ACTION" -3502 elif self._match(TokenType.SET): -3503 self._match_set((TokenType.NULL, TokenType.DEFAULT)) -3504 action = "SET " + self._prev.text.upper() -3505 else: -3506 self._advance() -3507 action = self._prev.text.upper() -3508 -3509 options[kind] = action -3510 -3511 return self.expression( -3512 exp.ForeignKey, expressions=expressions, reference=reference, **options # type: ignore -3513 ) -3514 -3515 def _parse_primary_key( -3516 self, wrapped_optional: bool = False, in_props: bool = False -3517 ) -> exp.Expression: -3518 desc = ( -3519 self._match_set((TokenType.ASC, TokenType.DESC)) -3520 and self._prev.token_type == TokenType.DESC -3521 ) -3522 -3523 if not in_props and not self._match(TokenType.L_PAREN, advance=False): -3524 return self.expression(exp.PrimaryKeyColumnConstraint, desc=desc) -3525 -3526 expressions = self._parse_wrapped_csv(self._parse_field, optional=wrapped_optional) -3527 options = self._parse_key_constraint_options() -3528 return self.expression(exp.PrimaryKey, expressions=expressions, options=options) -3529 -3530 @t.overload -3531 def _parse_bracket(self, this: exp.Expression) -> exp.Expression: -3532 ... -3533 -3534 @t.overload -3535 def _parse_bracket(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: -3536 ... -3537 -3538 def _parse_bracket(self, this): -3539 if not self._match_set((TokenType.L_BRACKET, TokenType.L_BRACE)): -3540 return this -3541 -3542 bracket_kind = self._prev.token_type -3543 expressions: t.List[t.Optional[exp.Expression]] -3544 -3545 if self._match(TokenType.COLON): -3546 expressions = [self.expression(exp.Slice, expression=self._parse_conjunction())] -3547 else: -3548 expressions = self._parse_csv(lambda: self._parse_slice(self._parse_conjunction())) -3549 -3550 # https://duckdb.org/docs/sql/data_types/struct.html#creating-structs -3551 if bracket_kind == TokenType.L_BRACE: -3552 this = self.expression(exp.Struct, expressions=expressions) -3553 elif not this or this.name.upper() == "ARRAY": -3554 this = self.expression(exp.Array, expressions=expressions) -3555 else: -3556 expressions = apply_index_offset(this, expressions, -self.index_offset) -3557 this = self.expression(exp.Bracket, this=this, expressions=expressions) -3558 -3559 if not self._match(TokenType.R_BRACKET) and bracket_kind == TokenType.L_BRACKET: -3560 self.raise_error("Expected ]") -3561 elif not self._match(TokenType.R_BRACE) and bracket_kind == TokenType.L_BRACE: -3562 self.raise_error("Expected }") -3563 -3564 self._add_comments(this) -3565 return self._parse_bracket(this) -3566 -3567 def _parse_slice(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: -3568 if self._match(TokenType.COLON): -3569 return self.expression(exp.Slice, this=this, expression=self._parse_conjunction()) -3570 return this +3314 return self.expression(exp.ColumnDef, this=this, kind=kind, constraints=constraints) +3315 +3316 def _parse_auto_increment( +3317 self, +3318 ) -> exp.GeneratedAsIdentityColumnConstraint | exp.AutoIncrementColumnConstraint: +3319 start = None +3320 increment = None +3321 +3322 if self._match(TokenType.L_PAREN, advance=False): +3323 args = self._parse_wrapped_csv(self._parse_bitwise) +3324 start = seq_get(args, 0) +3325 increment = seq_get(args, 1) +3326 elif self._match_text_seq("START"): +3327 start = self._parse_bitwise() +3328 self._match_text_seq("INCREMENT") +3329 increment = self._parse_bitwise() +3330 +3331 if start and increment: +3332 return exp.GeneratedAsIdentityColumnConstraint(start=start, increment=increment) +3333 +3334 return exp.AutoIncrementColumnConstraint() +3335 +3336 def _parse_compress(self) -> exp.CompressColumnConstraint: +3337 if self._match(TokenType.L_PAREN, advance=False): +3338 return self.expression( +3339 exp.CompressColumnConstraint, this=self._parse_wrapped_csv(self._parse_bitwise) +3340 ) +3341 +3342 return self.expression(exp.CompressColumnConstraint, this=self._parse_bitwise()) +3343 +3344 def _parse_generated_as_identity(self) -> exp.GeneratedAsIdentityColumnConstraint: +3345 if self._match_text_seq("BY", "DEFAULT"): +3346 on_null = self._match_pair(TokenType.ON, TokenType.NULL) +3347 this = self.expression( +3348 exp.GeneratedAsIdentityColumnConstraint, this=False, on_null=on_null +3349 ) +3350 else: +3351 self._match_text_seq("ALWAYS") +3352 this = self.expression(exp.GeneratedAsIdentityColumnConstraint, this=True) +3353 +3354 self._match(TokenType.ALIAS) +3355 identity = self._match_text_seq("IDENTITY") +3356 +3357 if self._match(TokenType.L_PAREN): +3358 if self._match_text_seq("START", "WITH"): +3359 this.set("start", self._parse_bitwise()) +3360 if self._match_text_seq("INCREMENT", "BY"): +3361 this.set("increment", self._parse_bitwise()) +3362 if self._match_text_seq("MINVALUE"): +3363 this.set("minvalue", self._parse_bitwise()) +3364 if self._match_text_seq("MAXVALUE"): +3365 this.set("maxvalue", self._parse_bitwise()) +3366 +3367 if self._match_text_seq("CYCLE"): +3368 this.set("cycle", True) +3369 elif self._match_text_seq("NO", "CYCLE"): +3370 this.set("cycle", False) +3371 +3372 if not identity: +3373 this.set("expression", self._parse_bitwise()) +3374 +3375 self._match_r_paren() +3376 +3377 return this +3378 +3379 def _parse_inline(self) -> exp.InlineLengthColumnConstraint: +3380 self._match_text_seq("LENGTH") +3381 return self.expression(exp.InlineLengthColumnConstraint, this=self._parse_bitwise()) +3382 +3383 def _parse_not_constraint( +3384 self, +3385 ) -> t.Optional[exp.NotNullColumnConstraint | exp.CaseSpecificColumnConstraint]: +3386 if self._match_text_seq("NULL"): +3387 return self.expression(exp.NotNullColumnConstraint) +3388 if self._match_text_seq("CASESPECIFIC"): +3389 return self.expression(exp.CaseSpecificColumnConstraint, not_=True) +3390 return None +3391 +3392 def _parse_column_constraint(self) -> t.Optional[exp.Expression]: +3393 if self._match(TokenType.CONSTRAINT): +3394 this = self._parse_id_var() +3395 else: +3396 this = None +3397 +3398 if self._match_texts(self.CONSTRAINT_PARSERS): +3399 return self.expression( +3400 exp.ColumnConstraint, +3401 this=this, +3402 kind=self.CONSTRAINT_PARSERS[self._prev.text.upper()](self), +3403 ) +3404 +3405 return this +3406 +3407 def _parse_constraint(self) -> t.Optional[exp.Expression]: +3408 if not self._match(TokenType.CONSTRAINT): +3409 return self._parse_unnamed_constraint(constraints=self.SCHEMA_UNNAMED_CONSTRAINTS) +3410 +3411 this = self._parse_id_var() +3412 expressions = [] +3413 +3414 while True: +3415 constraint = self._parse_unnamed_constraint() or self._parse_function() +3416 if not constraint: +3417 break +3418 expressions.append(constraint) +3419 +3420 return self.expression(exp.Constraint, this=this, expressions=expressions) +3421 +3422 def _parse_unnamed_constraint( +3423 self, constraints: t.Optional[t.Collection[str]] = None +3424 ) -> t.Optional[exp.Expression]: +3425 if not self._match_texts(constraints or self.CONSTRAINT_PARSERS): +3426 return None +3427 +3428 constraint = self._prev.text.upper() +3429 if constraint not in self.CONSTRAINT_PARSERS: +3430 self.raise_error(f"No parser found for schema constraint {constraint}.") +3431 +3432 return self.CONSTRAINT_PARSERS[constraint](self) +3433 +3434 def _parse_unique(self) -> exp.UniqueColumnConstraint: +3435 self._match_text_seq("KEY") +3436 return self.expression( +3437 exp.UniqueColumnConstraint, this=self._parse_schema(self._parse_id_var(any_token=False)) +3438 ) +3439 +3440 def _parse_key_constraint_options(self) -> t.List[str]: +3441 options = [] +3442 while True: +3443 if not self._curr: +3444 break +3445 +3446 if self._match(TokenType.ON): +3447 action = None +3448 on = self._advance_any() and self._prev.text +3449 +3450 if self._match_text_seq("NO", "ACTION"): +3451 action = "NO ACTION" +3452 elif self._match_text_seq("CASCADE"): +3453 action = "CASCADE" +3454 elif self._match_pair(TokenType.SET, TokenType.NULL): +3455 action = "SET NULL" +3456 elif self._match_pair(TokenType.SET, TokenType.DEFAULT): +3457 action = "SET DEFAULT" +3458 else: +3459 self.raise_error("Invalid key constraint") +3460 +3461 options.append(f"ON {on} {action}") +3462 elif self._match_text_seq("NOT", "ENFORCED"): +3463 options.append("NOT ENFORCED") +3464 elif self._match_text_seq("DEFERRABLE"): +3465 options.append("DEFERRABLE") +3466 elif self._match_text_seq("INITIALLY", "DEFERRED"): +3467 options.append("INITIALLY DEFERRED") +3468 elif self._match_text_seq("NORELY"): +3469 options.append("NORELY") +3470 elif self._match_text_seq("MATCH", "FULL"): +3471 options.append("MATCH FULL") +3472 else: +3473 break +3474 +3475 return options +3476 +3477 def _parse_references(self, match: bool = True) -> t.Optional[exp.Reference]: +3478 if match and not self._match(TokenType.REFERENCES): +3479 return None +3480 +3481 expressions = None +3482 this = self._parse_id_var() +3483 +3484 if self._match(TokenType.L_PAREN, advance=False): +3485 expressions = self._parse_wrapped_id_vars() +3486 +3487 options = self._parse_key_constraint_options() +3488 return self.expression(exp.Reference, this=this, expressions=expressions, options=options) +3489 +3490 def _parse_foreign_key(self) -> exp.ForeignKey: +3491 expressions = self._parse_wrapped_id_vars() +3492 reference = self._parse_references() +3493 options = {} +3494 +3495 while self._match(TokenType.ON): +3496 if not self._match_set((TokenType.DELETE, TokenType.UPDATE)): +3497 self.raise_error("Expected DELETE or UPDATE") +3498 +3499 kind = self._prev.text.lower() +3500 +3501 if self._match_text_seq("NO", "ACTION"): +3502 action = "NO ACTION" +3503 elif self._match(TokenType.SET): +3504 self._match_set((TokenType.NULL, TokenType.DEFAULT)) +3505 action = "SET " + self._prev.text.upper() +3506 else: +3507 self._advance() +3508 action = self._prev.text.upper() +3509 +3510 options[kind] = action +3511 +3512 return self.expression( +3513 exp.ForeignKey, expressions=expressions, reference=reference, **options # type: ignore +3514 ) +3515 +3516 def _parse_primary_key( +3517 self, wrapped_optional: bool = False, in_props: bool = False +3518 ) -> exp.PrimaryKeyColumnConstraint | exp.PrimaryKey: +3519 desc = ( +3520 self._match_set((TokenType.ASC, TokenType.DESC)) +3521 and self._prev.token_type == TokenType.DESC +3522 ) +3523 +3524 if not in_props and not self._match(TokenType.L_PAREN, advance=False): +3525 return self.expression(exp.PrimaryKeyColumnConstraint, desc=desc) +3526 +3527 expressions = self._parse_wrapped_csv(self._parse_field, optional=wrapped_optional) +3528 options = self._parse_key_constraint_options() +3529 return self.expression(exp.PrimaryKey, expressions=expressions, options=options) +3530 +3531 def _parse_bracket(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: +3532 if not self._match_set((TokenType.L_BRACKET, TokenType.L_BRACE)): +3533 return this +3534 +3535 bracket_kind = self._prev.token_type +3536 +3537 if self._match(TokenType.COLON): +3538 expressions: t.List[t.Optional[exp.Expression]] = [ +3539 self.expression(exp.Slice, expression=self._parse_conjunction()) +3540 ] +3541 else: +3542 expressions = self._parse_csv(lambda: self._parse_slice(self._parse_conjunction())) +3543 +3544 # https://duckdb.org/docs/sql/data_types/struct.html#creating-structs +3545 if bracket_kind == TokenType.L_BRACE: +3546 this = self.expression(exp.Struct, expressions=expressions) +3547 elif not this or this.name.upper() == "ARRAY": +3548 this = self.expression(exp.Array, expressions=expressions) +3549 else: +3550 expressions = apply_index_offset(this, expressions, -self.INDEX_OFFSET) +3551 this = self.expression(exp.Bracket, this=this, expressions=expressions) +3552 +3553 if not self._match(TokenType.R_BRACKET) and bracket_kind == TokenType.L_BRACKET: +3554 self.raise_error("Expected ]") +3555 elif not self._match(TokenType.R_BRACE) and bracket_kind == TokenType.L_BRACE: +3556 self.raise_error("Expected }") +3557 +3558 self._add_comments(this) +3559 return self._parse_bracket(this) +3560 +3561 def _parse_slice(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: +3562 if self._match(TokenType.COLON): +3563 return self.expression(exp.Slice, this=this, expression=self._parse_conjunction()) +3564 return this +3565 +3566 def _parse_case(self) -> t.Optional[exp.Expression]: +3567 ifs = [] +3568 default = None +3569 +3570 expression = self._parse_conjunction() 3571 -3572 def _parse_case(self) -> t.Optional[exp.Expression]: -3573 ifs = [] -3574 default = None -3575 -3576 expression = self._parse_conjunction() +3572 while self._match(TokenType.WHEN): +3573 this = self._parse_conjunction() +3574 self._match(TokenType.THEN) +3575 then = self._parse_conjunction() +3576 ifs.append(self.expression(exp.If, this=this, true=then)) 3577 -3578 while self._match(TokenType.WHEN): -3579 this = self._parse_conjunction() -3580 self._match(TokenType.THEN) -3581 then = self._parse_conjunction() -3582 ifs.append(self.expression(exp.If, this=this, true=then)) +3578 if self._match(TokenType.ELSE): +3579 default = self._parse_conjunction() +3580 +3581 if not self._match(TokenType.END): +3582 self.raise_error("Expected END after CASE", self._prev) 3583 -3584 if self._match(TokenType.ELSE): -3585 default = self._parse_conjunction() -3586 -3587 if not self._match(TokenType.END): -3588 self.raise_error("Expected END after CASE", self._prev) -3589 -3590 return self._parse_window( -3591 self.expression(exp.Case, this=expression, ifs=ifs, default=default) -3592 ) -3593 -3594 def _parse_if(self) -> t.Optional[exp.Expression]: -3595 if self._match(TokenType.L_PAREN): -3596 args = self._parse_csv(self._parse_conjunction) -3597 this = exp.If.from_arg_list(args) -3598 self.validate_expression(this, args) -3599 self._match_r_paren() -3600 else: -3601 index = self._index - 1 -3602 condition = self._parse_conjunction() -3603 -3604 if not condition: -3605 self._retreat(index) -3606 return None -3607 -3608 self._match(TokenType.THEN) -3609 true = self._parse_conjunction() -3610 false = self._parse_conjunction() if self._match(TokenType.ELSE) else None -3611 self._match(TokenType.END) -3612 this = self.expression(exp.If, this=condition, true=true, false=false) -3613 -3614 return self._parse_window(this) -3615 -3616 def _parse_extract(self) -> exp.Expression: -3617 this = self._parse_function() or self._parse_var() or self._parse_type() -3618 -3619 if self._match(TokenType.FROM): -3620 return self.expression(exp.Extract, this=this, expression=self._parse_bitwise()) -3621 -3622 if not self._match(TokenType.COMMA): -3623 self.raise_error("Expected FROM or comma after EXTRACT", self._prev) -3624 -3625 return self.expression(exp.Extract, this=this, expression=self._parse_bitwise()) -3626 -3627 def _parse_cast(self, strict: bool) -> exp.Expression: -3628 this = self._parse_conjunction() -3629 -3630 if not self._match(TokenType.ALIAS): -3631 if self._match(TokenType.COMMA): -3632 return self.expression( -3633 exp.CastToStrType, this=this, expression=self._parse_string() -3634 ) -3635 else: -3636 self.raise_error("Expected AS after CAST") -3637 -3638 to = self._parse_types() -3639 -3640 if not to: -3641 self.raise_error("Expected TYPE after CAST") -3642 elif to.this == exp.DataType.Type.CHAR: -3643 if self._match(TokenType.CHARACTER_SET): -3644 to = self.expression(exp.CharacterSet, this=self._parse_var_or_string()) -3645 -3646 return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to) -3647 -3648 def _parse_string_agg(self) -> exp.Expression: -3649 expression: t.Optional[exp.Expression] -3650 -3651 if self._match(TokenType.DISTINCT): -3652 args = self._parse_csv(self._parse_conjunction) -3653 expression = self.expression(exp.Distinct, expressions=[seq_get(args, 0)]) -3654 else: -3655 args = self._parse_csv(self._parse_conjunction) -3656 expression = seq_get(args, 0) -3657 -3658 index = self._index -3659 if not self._match(TokenType.R_PAREN): -3660 # postgres: STRING_AGG([DISTINCT] expression, separator [ORDER BY expression1 {ASC | DESC} [, ...]]) -3661 order = self._parse_order(this=expression) -3662 return self.expression(exp.GroupConcat, this=order, separator=seq_get(args, 1)) -3663 -3664 # Checks if we can parse an order clause: WITHIN GROUP (ORDER BY <order_by_expression_list> [ASC | DESC]). -3665 # This is done "manually", instead of letting _parse_window parse it into an exp.WithinGroup node, so that -3666 # the STRING_AGG call is parsed like in MySQL / SQLite and can thus be transpiled more easily to them. -3667 if not self._match_text_seq("WITHIN", "GROUP"): -3668 self._retreat(index) -3669 this = exp.GroupConcat.from_arg_list(args) -3670 self.validate_expression(this, args) -3671 return this -3672 -3673 self._match_l_paren() # The corresponding match_r_paren will be called in parse_function (caller) -3674 order = self._parse_order(this=expression) -3675 return self.expression(exp.GroupConcat, this=order, separator=seq_get(args, 1)) -3676 -3677 def _parse_convert(self, strict: bool) -> t.Optional[exp.Expression]: -3678 to: t.Optional[exp.Expression] -3679 this = self._parse_bitwise() -3680 -3681 if self._match(TokenType.USING): -3682 to = self.expression(exp.CharacterSet, this=self._parse_var()) -3683 elif self._match(TokenType.COMMA): -3684 to = self._parse_bitwise() -3685 else: -3686 to = None -3687 -3688 # Swap the argument order if needed to produce the correct AST -3689 if self.CONVERT_TYPE_FIRST: -3690 this, to = to, this +3584 return self._parse_window( +3585 self.expression(exp.Case, this=expression, ifs=ifs, default=default) +3586 ) +3587 +3588 def _parse_if(self) -> t.Optional[exp.Expression]: +3589 if self._match(TokenType.L_PAREN): +3590 args = self._parse_csv(self._parse_conjunction) +3591 this = self.validate_expression(exp.If.from_arg_list(args), args) +3592 self._match_r_paren() +3593 else: +3594 index = self._index - 1 +3595 condition = self._parse_conjunction() +3596 +3597 if not condition: +3598 self._retreat(index) +3599 return None +3600 +3601 self._match(TokenType.THEN) +3602 true = self._parse_conjunction() +3603 false = self._parse_conjunction() if self._match(TokenType.ELSE) else None +3604 self._match(TokenType.END) +3605 this = self.expression(exp.If, this=condition, true=true, false=false) +3606 +3607 return self._parse_window(this) +3608 +3609 def _parse_extract(self) -> exp.Extract: +3610 this = self._parse_function() or self._parse_var() or self._parse_type() +3611 +3612 if self._match(TokenType.FROM): +3613 return self.expression(exp.Extract, this=this, expression=self._parse_bitwise()) +3614 +3615 if not self._match(TokenType.COMMA): +3616 self.raise_error("Expected FROM or comma after EXTRACT", self._prev) +3617 +3618 return self.expression(exp.Extract, this=this, expression=self._parse_bitwise()) +3619 +3620 def _parse_cast(self, strict: bool) -> exp.Expression: +3621 this = self._parse_conjunction() +3622 +3623 if not self._match(TokenType.ALIAS): +3624 if self._match(TokenType.COMMA): +3625 return self.expression( +3626 exp.CastToStrType, this=this, expression=self._parse_string() +3627 ) +3628 else: +3629 self.raise_error("Expected AS after CAST") +3630 +3631 to = self._parse_types() +3632 +3633 if not to: +3634 self.raise_error("Expected TYPE after CAST") +3635 elif to.this == exp.DataType.Type.CHAR: +3636 if self._match(TokenType.CHARACTER_SET): +3637 to = self.expression(exp.CharacterSet, this=self._parse_var_or_string()) +3638 elif to.this in exp.DataType.TEMPORAL_TYPES and self._match(TokenType.FORMAT): +3639 fmt = self._parse_string() +3640 +3641 return self.expression( +3642 exp.StrToDate if to.this == exp.DataType.Type.DATE else exp.StrToTime, +3643 this=this, +3644 format=exp.Literal.string( +3645 format_time( +3646 fmt.this if fmt else "", +3647 self.FORMAT_MAPPING or self.TIME_MAPPING, +3648 self.FORMAT_TRIE or self.TIME_TRIE, +3649 ) +3650 ), +3651 ) +3652 +3653 return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to) +3654 +3655 def _parse_concat(self) -> t.Optional[exp.Expression]: +3656 args = self._parse_csv(self._parse_conjunction) +3657 if self.CONCAT_NULL_OUTPUTS_STRING: +3658 args = [exp.func("COALESCE", arg, exp.Literal.string("")) for arg in args] +3659 +3660 # Some dialects (e.g. Trino) don't allow a single-argument CONCAT call, so when +3661 # we find such a call we replace it with its argument. +3662 if len(args) == 1: +3663 return args[0] +3664 +3665 return self.expression( +3666 exp.Concat if self.STRICT_STRING_CONCAT else exp.SafeConcat, expressions=args +3667 ) +3668 +3669 def _parse_string_agg(self) -> exp.Expression: +3670 expression: t.Optional[exp.Expression] +3671 +3672 if self._match(TokenType.DISTINCT): +3673 args = self._parse_csv(self._parse_conjunction) +3674 expression = self.expression(exp.Distinct, expressions=[seq_get(args, 0)]) +3675 else: +3676 args = self._parse_csv(self._parse_conjunction) +3677 expression = seq_get(args, 0) +3678 +3679 index = self._index +3680 if not self._match(TokenType.R_PAREN): +3681 # postgres: STRING_AGG([DISTINCT] expression, separator [ORDER BY expression1 {ASC | DESC} [, ...]]) +3682 order = self._parse_order(this=expression) +3683 return self.expression(exp.GroupConcat, this=order, separator=seq_get(args, 1)) +3684 +3685 # Checks if we can parse an order clause: WITHIN GROUP (ORDER BY <order_by_expression_list> [ASC | DESC]). +3686 # This is done "manually", instead of letting _parse_window parse it into an exp.WithinGroup node, so that +3687 # the STRING_AGG call is parsed like in MySQL / SQLite and can thus be transpiled more easily to them. +3688 if not self._match_text_seq("WITHIN", "GROUP"): +3689 self._retreat(index) +3690 return self.validate_expression(exp.GroupConcat.from_arg_list(args), args) 3691 -3692 return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to) -3693 -3694 def _parse_decode(self) -> t.Optional[exp.Expression]: -3695 """ -3696 There are generally two variants of the DECODE function: -3697 -3698 - DECODE(bin, charset) -3699 - DECODE(expression, search, result [, search, result] ... [, default]) -3700 -3701 The second variant will always be parsed into a CASE expression. Note that NULL -3702 needs special treatment, since we need to explicitly check for it with `IS NULL`, -3703 instead of relying on pattern matching. -3704 """ -3705 args = self._parse_csv(self._parse_conjunction) +3692 self._match_l_paren() # The corresponding match_r_paren will be called in parse_function (caller) +3693 order = self._parse_order(this=expression) +3694 return self.expression(exp.GroupConcat, this=order, separator=seq_get(args, 1)) +3695 +3696 def _parse_convert(self, strict: bool) -> t.Optional[exp.Expression]: +3697 to: t.Optional[exp.Expression] +3698 this = self._parse_bitwise() +3699 +3700 if self._match(TokenType.USING): +3701 to = self.expression(exp.CharacterSet, this=self._parse_var()) +3702 elif self._match(TokenType.COMMA): +3703 to = self._parse_bitwise() +3704 else: +3705 to = None 3706 -3707 if len(args) < 3: -3708 return self.expression(exp.Decode, this=seq_get(args, 0), charset=seq_get(args, 1)) -3709 -3710 expression, *expressions = args -3711 if not expression: -3712 return None -3713 -3714 ifs = [] -3715 for search, result in zip(expressions[::2], expressions[1::2]): -3716 if not search or not result: -3717 return None -3718 -3719 if isinstance(search, exp.Literal): -3720 ifs.append( -3721 exp.If(this=exp.EQ(this=expression.copy(), expression=search), true=result) -3722 ) -3723 elif isinstance(search, exp.Null): -3724 ifs.append( -3725 exp.If(this=exp.Is(this=expression.copy(), expression=exp.Null()), true=result) -3726 ) -3727 else: -3728 cond = exp.or_( -3729 exp.EQ(this=expression.copy(), expression=search), -3730 exp.and_( -3731 exp.Is(this=expression.copy(), expression=exp.Null()), -3732 exp.Is(this=search.copy(), expression=exp.Null()), -3733 copy=False, -3734 ), -3735 copy=False, -3736 ) -3737 ifs.append(exp.If(this=cond, true=result)) -3738 -3739 return exp.Case(ifs=ifs, default=expressions[-1] if len(expressions) % 2 == 1 else None) -3740 -3741 def _parse_json_key_value(self) -> t.Optional[exp.Expression]: -3742 self._match_text_seq("KEY") -3743 key = self._parse_field() -3744 self._match(TokenType.COLON) -3745 self._match_text_seq("VALUE") -3746 value = self._parse_field() -3747 if not key and not value: -3748 return None -3749 return self.expression(exp.JSONKeyValue, this=key, expression=value) -3750 -3751 def _parse_json_object(self) -> exp.Expression: -3752 expressions = self._parse_csv(self._parse_json_key_value) -3753 -3754 null_handling = None -3755 if self._match_text_seq("NULL", "ON", "NULL"): -3756 null_handling = "NULL ON NULL" -3757 elif self._match_text_seq("ABSENT", "ON", "NULL"): -3758 null_handling = "ABSENT ON NULL" +3707 # Swap the argument order if needed to produce the correct AST +3708 if self.CONVERT_TYPE_FIRST: +3709 this, to = to, this +3710 +3711 return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to) +3712 +3713 def _parse_decode(self) -> t.Optional[exp.Decode | exp.Case]: +3714 """ +3715 There are generally two variants of the DECODE function: +3716 +3717 - DECODE(bin, charset) +3718 - DECODE(expression, search, result [, search, result] ... [, default]) +3719 +3720 The second variant will always be parsed into a CASE expression. Note that NULL +3721 needs special treatment, since we need to explicitly check for it with `IS NULL`, +3722 instead of relying on pattern matching. +3723 """ +3724 args = self._parse_csv(self._parse_conjunction) +3725 +3726 if len(args) < 3: +3727 return self.expression(exp.Decode, this=seq_get(args, 0), charset=seq_get(args, 1)) +3728 +3729 expression, *expressions = args +3730 if not expression: +3731 return None +3732 +3733 ifs = [] +3734 for search, result in zip(expressions[::2], expressions[1::2]): +3735 if not search or not result: +3736 return None +3737 +3738 if isinstance(search, exp.Literal): +3739 ifs.append( +3740 exp.If(this=exp.EQ(this=expression.copy(), expression=search), true=result) +3741 ) +3742 elif isinstance(search, exp.Null): +3743 ifs.append( +3744 exp.If(this=exp.Is(this=expression.copy(), expression=exp.Null()), true=result) +3745 ) +3746 else: +3747 cond = exp.or_( +3748 exp.EQ(this=expression.copy(), expression=search), +3749 exp.and_( +3750 exp.Is(this=expression.copy(), expression=exp.Null()), +3751 exp.Is(this=search.copy(), expression=exp.Null()), +3752 copy=False, +3753 ), +3754 copy=False, +3755 ) +3756 ifs.append(exp.If(this=cond, true=result)) +3757 +3758 return exp.Case(ifs=ifs, default=expressions[-1] if len(expressions) % 2 == 1 else None) 3759 -3760 unique_keys = None -3761 if self._match_text_seq("WITH", "UNIQUE"): -3762 unique_keys = True -3763 elif self._match_text_seq("WITHOUT", "UNIQUE"): -3764 unique_keys = False -3765 -3766 self._match_text_seq("KEYS") -3767 -3768 return_type = self._match_text_seq("RETURNING") and self._parse_type() -3769 format_json = self._match_text_seq("FORMAT", "JSON") -3770 encoding = self._match_text_seq("ENCODING") and self._parse_var() -3771 -3772 return self.expression( -3773 exp.JSONObject, -3774 expressions=expressions, -3775 null_handling=null_handling, -3776 unique_keys=unique_keys, -3777 return_type=return_type, -3778 format_json=format_json, -3779 encoding=encoding, -3780 ) -3781 -3782 def _parse_logarithm(self) -> exp.Expression: -3783 # Default argument order is base, expression -3784 args = self._parse_csv(self._parse_range) -3785 -3786 if len(args) > 1: -3787 if not self.LOG_BASE_FIRST: -3788 args.reverse() -3789 return exp.Log.from_arg_list(args) -3790 -3791 return self.expression( -3792 exp.Ln if self.LOG_DEFAULTS_TO_LN else exp.Log, this=seq_get(args, 0) -3793 ) -3794 -3795 def _parse_match_against(self) -> exp.Expression: -3796 expressions = self._parse_csv(self._parse_column) -3797 -3798 self._match_text_seq(")", "AGAINST", "(") -3799 -3800 this = self._parse_string() -3801 -3802 if self._match_text_seq("IN", "NATURAL", "LANGUAGE", "MODE"): -3803 modifier = "IN NATURAL LANGUAGE MODE" -3804 if self._match_text_seq("WITH", "QUERY", "EXPANSION"): -3805 modifier = f"{modifier} WITH QUERY EXPANSION" -3806 elif self._match_text_seq("IN", "BOOLEAN", "MODE"): -3807 modifier = "IN BOOLEAN MODE" -3808 elif self._match_text_seq("WITH", "QUERY", "EXPANSION"): -3809 modifier = "WITH QUERY EXPANSION" -3810 else: -3811 modifier = None -3812 -3813 return self.expression( -3814 exp.MatchAgainst, this=this, expressions=expressions, modifier=modifier -3815 ) -3816 -3817 # https://learn.microsoft.com/en-us/sql/t-sql/functions/openjson-transact-sql?view=sql-server-ver16 -3818 def _parse_open_json(self) -> exp.Expression: -3819 this = self._parse_bitwise() -3820 path = self._match(TokenType.COMMA) and self._parse_string() -3821 -3822 def _parse_open_json_column_def() -> exp.Expression: -3823 this = self._parse_field(any_token=True) -3824 kind = self._parse_types() -3825 path = self._parse_string() -3826 as_json = self._match_pair(TokenType.ALIAS, TokenType.JSON) -3827 return self.expression( -3828 exp.OpenJSONColumnDef, this=this, kind=kind, path=path, as_json=as_json -3829 ) -3830 -3831 expressions = None -3832 if self._match_pair(TokenType.R_PAREN, TokenType.WITH): -3833 self._match_l_paren() -3834 expressions = self._parse_csv(_parse_open_json_column_def) -3835 -3836 return self.expression(exp.OpenJSON, this=this, path=path, expressions=expressions) +3760 def _parse_json_key_value(self) -> t.Optional[exp.JSONKeyValue]: +3761 self._match_text_seq("KEY") +3762 key = self._parse_field() +3763 self._match(TokenType.COLON) +3764 self._match_text_seq("VALUE") +3765 value = self._parse_field() +3766 +3767 if not key and not value: +3768 return None +3769 return self.expression(exp.JSONKeyValue, this=key, expression=value) +3770 +3771 def _parse_json_object(self) -> exp.JSONObject: +3772 star = self._parse_star() +3773 expressions = [star] if star else self._parse_csv(self._parse_json_key_value) +3774 +3775 null_handling = None +3776 if self._match_text_seq("NULL", "ON", "NULL"): +3777 null_handling = "NULL ON NULL" +3778 elif self._match_text_seq("ABSENT", "ON", "NULL"): +3779 null_handling = "ABSENT ON NULL" +3780 +3781 unique_keys = None +3782 if self._match_text_seq("WITH", "UNIQUE"): +3783 unique_keys = True +3784 elif self._match_text_seq("WITHOUT", "UNIQUE"): +3785 unique_keys = False +3786 +3787 self._match_text_seq("KEYS") +3788 +3789 return_type = self._match_text_seq("RETURNING") and self._parse_type() +3790 format_json = self._match_text_seq("FORMAT", "JSON") +3791 encoding = self._match_text_seq("ENCODING") and self._parse_var() +3792 +3793 return self.expression( +3794 exp.JSONObject, +3795 expressions=expressions, +3796 null_handling=null_handling, +3797 unique_keys=unique_keys, +3798 return_type=return_type, +3799 format_json=format_json, +3800 encoding=encoding, +3801 ) +3802 +3803 def _parse_logarithm(self) -> exp.Func: +3804 # Default argument order is base, expression +3805 args = self._parse_csv(self._parse_range) +3806 +3807 if len(args) > 1: +3808 if not self.LOG_BASE_FIRST: +3809 args.reverse() +3810 return exp.Log.from_arg_list(args) +3811 +3812 return self.expression( +3813 exp.Ln if self.LOG_DEFAULTS_TO_LN else exp.Log, this=seq_get(args, 0) +3814 ) +3815 +3816 def _parse_match_against(self) -> exp.MatchAgainst: +3817 expressions = self._parse_csv(self._parse_column) +3818 +3819 self._match_text_seq(")", "AGAINST", "(") +3820 +3821 this = self._parse_string() +3822 +3823 if self._match_text_seq("IN", "NATURAL", "LANGUAGE", "MODE"): +3824 modifier = "IN NATURAL LANGUAGE MODE" +3825 if self._match_text_seq("WITH", "QUERY", "EXPANSION"): +3826 modifier = f"{modifier} WITH QUERY EXPANSION" +3827 elif self._match_text_seq("IN", "BOOLEAN", "MODE"): +3828 modifier = "IN BOOLEAN MODE" +3829 elif self._match_text_seq("WITH", "QUERY", "EXPANSION"): +3830 modifier = "WITH QUERY EXPANSION" +3831 else: +3832 modifier = None +3833 +3834 return self.expression( +3835 exp.MatchAgainst, this=this, expressions=expressions, modifier=modifier +3836 ) 3837 -3838 def _parse_position(self, haystack_first: bool = False) -> exp.Expression: -3839 args = self._parse_csv(self._parse_bitwise) -3840 -3841 if self._match(TokenType.IN): -3842 return self.expression( -3843 exp.StrPosition, this=self._parse_bitwise(), substr=seq_get(args, 0) -3844 ) -3845 -3846 if haystack_first: -3847 haystack = seq_get(args, 0) -3848 needle = seq_get(args, 1) -3849 else: -3850 needle = seq_get(args, 0) -3851 haystack = seq_get(args, 1) +3838 # https://learn.microsoft.com/en-us/sql/t-sql/functions/openjson-transact-sql?view=sql-server-ver16 +3839 def _parse_open_json(self) -> exp.OpenJSON: +3840 this = self._parse_bitwise() +3841 path = self._match(TokenType.COMMA) and self._parse_string() +3842 +3843 def _parse_open_json_column_def() -> exp.OpenJSONColumnDef: +3844 this = self._parse_field(any_token=True) +3845 kind = self._parse_types() +3846 path = self._parse_string() +3847 as_json = self._match_pair(TokenType.ALIAS, TokenType.JSON) +3848 +3849 return self.expression( +3850 exp.OpenJSONColumnDef, this=this, kind=kind, path=path, as_json=as_json +3851 ) 3852 -3853 this = exp.StrPosition(this=haystack, substr=needle, position=seq_get(args, 2)) -3854 -3855 self.validate_expression(this, args) -3856 -3857 return this -3858 -3859 def _parse_join_hint(self, func_name: str) -> exp.Expression: -3860 args = self._parse_csv(self._parse_table) -3861 return exp.JoinHint(this=func_name.upper(), expressions=args) +3853 expressions = None +3854 if self._match_pair(TokenType.R_PAREN, TokenType.WITH): +3855 self._match_l_paren() +3856 expressions = self._parse_csv(_parse_open_json_column_def) +3857 +3858 return self.expression(exp.OpenJSON, this=this, path=path, expressions=expressions) +3859 +3860 def _parse_position(self, haystack_first: bool = False) -> exp.StrPosition: +3861 args = self._parse_csv(self._parse_bitwise) 3862 -3863 def _parse_substring(self) -> exp.Expression: -3864 # Postgres supports the form: substring(string [from int] [for int]) -3865 # https://www.postgresql.org/docs/9.1/functions-string.html @ Table 9-6 -3866 -3867 args = self._parse_csv(self._parse_bitwise) -3868 -3869 if self._match(TokenType.FROM): -3870 args.append(self._parse_bitwise()) -3871 if self._match(TokenType.FOR): -3872 args.append(self._parse_bitwise()) -3873 -3874 this = exp.Substring.from_arg_list(args) -3875 self.validate_expression(this, args) -3876 -3877 return this +3863 if self._match(TokenType.IN): +3864 return self.expression( +3865 exp.StrPosition, this=self._parse_bitwise(), substr=seq_get(args, 0) +3866 ) +3867 +3868 if haystack_first: +3869 haystack = seq_get(args, 0) +3870 needle = seq_get(args, 1) +3871 else: +3872 needle = seq_get(args, 0) +3873 haystack = seq_get(args, 1) +3874 +3875 return self.expression( +3876 exp.StrPosition, this=haystack, substr=needle, position=seq_get(args, 2) +3877 ) 3878 -3879 def _parse_trim(self) -> exp.Expression: -3880 # https://www.w3resource.com/sql/character-functions/trim.php -3881 # https://docs.oracle.com/javadb/10.8.3.0/ref/rreftrimfunc.html +3879 def _parse_join_hint(self, func_name: str) -> exp.JoinHint: +3880 args = self._parse_csv(self._parse_table) +3881 return exp.JoinHint(this=func_name.upper(), expressions=args) 3882 -3883 position = None -3884 collation = None -3885 -3886 if self._match_texts(self.TRIM_TYPES): -3887 position = self._prev.text.upper() +3883 def _parse_substring(self) -> exp.Substring: +3884 # Postgres supports the form: substring(string [from int] [for int]) +3885 # https://www.postgresql.org/docs/9.1/functions-string.html @ Table 9-6 +3886 +3887 args = self._parse_csv(self._parse_bitwise) 3888 -3889 expression = self._parse_bitwise() -3890 if self._match_set((TokenType.FROM, TokenType.COMMA)): -3891 this = self._parse_bitwise() -3892 else: -3893 this = expression -3894 expression = None +3889 if self._match(TokenType.FROM): +3890 args.append(self._parse_bitwise()) +3891 if self._match(TokenType.FOR): +3892 args.append(self._parse_bitwise()) +3893 +3894 return self.validate_expression(exp.Substring.from_arg_list(args), args) 3895 -3896 if self._match(TokenType.COLLATE): -3897 collation = self._parse_bitwise() -3898 -3899 return self.expression( -3900 exp.Trim, -3901 this=this, -3902 position=position, -3903 expression=expression, -3904 collation=collation, -3905 ) -3906 -3907 def _parse_window_clause(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]: -3908 return self._match(TokenType.WINDOW) and self._parse_csv(self._parse_named_window) -3909 -3910 def _parse_named_window(self) -> t.Optional[exp.Expression]: -3911 return self._parse_window(self._parse_id_var(), alias=True) +3896 def _parse_trim(self) -> exp.Trim: +3897 # https://www.w3resource.com/sql/character-functions/trim.php +3898 # https://docs.oracle.com/javadb/10.8.3.0/ref/rreftrimfunc.html +3899 +3900 position = None +3901 collation = None +3902 +3903 if self._match_texts(self.TRIM_TYPES): +3904 position = self._prev.text.upper() +3905 +3906 expression = self._parse_bitwise() +3907 if self._match_set((TokenType.FROM, TokenType.COMMA)): +3908 this = self._parse_bitwise() +3909 else: +3910 this = expression +3911 expression = None 3912 -3913 def _parse_respect_or_ignore_nulls( -3914 self, this: t.Optional[exp.Expression] -3915 ) -> t.Optional[exp.Expression]: -3916 if self._match_text_seq("IGNORE", "NULLS"): -3917 return self.expression(exp.IgnoreNulls, this=this) -3918 if self._match_text_seq("RESPECT", "NULLS"): -3919 return self.expression(exp.RespectNulls, this=this) -3920 return this -3921 -3922 def _parse_window( -3923 self, this: t.Optional[exp.Expression], alias: bool = False -3924 ) -> t.Optional[exp.Expression]: -3925 if self._match_pair(TokenType.FILTER, TokenType.L_PAREN): -3926 this = self.expression(exp.Filter, this=this, expression=self._parse_where()) -3927 self._match_r_paren() -3928 -3929 # T-SQL allows the OVER (...) syntax after WITHIN GROUP. -3930 # https://learn.microsoft.com/en-us/sql/t-sql/functions/percentile-disc-transact-sql?view=sql-server-ver16 -3931 if self._match_text_seq("WITHIN", "GROUP"): -3932 order = self._parse_wrapped(self._parse_order) -3933 this = self.expression(exp.WithinGroup, this=this, expression=order) +3913 if self._match(TokenType.COLLATE): +3914 collation = self._parse_bitwise() +3915 +3916 return self.expression( +3917 exp.Trim, this=this, position=position, expression=expression, collation=collation +3918 ) +3919 +3920 def _parse_window_clause(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]: +3921 return self._match(TokenType.WINDOW) and self._parse_csv(self._parse_named_window) +3922 +3923 def _parse_named_window(self) -> t.Optional[exp.Expression]: +3924 return self._parse_window(self._parse_id_var(), alias=True) +3925 +3926 def _parse_respect_or_ignore_nulls( +3927 self, this: t.Optional[exp.Expression] +3928 ) -> t.Optional[exp.Expression]: +3929 if self._match_text_seq("IGNORE", "NULLS"): +3930 return self.expression(exp.IgnoreNulls, this=this) +3931 if self._match_text_seq("RESPECT", "NULLS"): +3932 return self.expression(exp.RespectNulls, this=this) +3933 return this 3934 -3935 # SQL spec defines an optional [ { IGNORE | RESPECT } NULLS ] OVER -3936 # Some dialects choose to implement and some do not. -3937 # https://dev.mysql.com/doc/refman/8.0/en/window-function-descriptions.html -3938 -3939 # There is some code above in _parse_lambda that handles -3940 # SELECT FIRST_VALUE(TABLE.COLUMN IGNORE|RESPECT NULLS) OVER ... +3935 def _parse_window( +3936 self, this: t.Optional[exp.Expression], alias: bool = False +3937 ) -> t.Optional[exp.Expression]: +3938 if self._match_pair(TokenType.FILTER, TokenType.L_PAREN): +3939 this = self.expression(exp.Filter, this=this, expression=self._parse_where()) +3940 self._match_r_paren() 3941 -3942 # The below changes handle -3943 # SELECT FIRST_VALUE(TABLE.COLUMN) IGNORE|RESPECT NULLS OVER ... -3944 -3945 # Oracle allows both formats -3946 # (https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/img_text/first_value.html) -3947 # and Snowflake chose to do the same for familiarity -3948 # https://docs.snowflake.com/en/sql-reference/functions/first_value.html#usage-notes -3949 this = self._parse_respect_or_ignore_nulls(this) -3950 -3951 # bigquery select from window x AS (partition by ...) -3952 if alias: -3953 over = None -3954 self._match(TokenType.ALIAS) -3955 elif not self._match_set(self.WINDOW_BEFORE_PAREN_TOKENS): -3956 return this -3957 else: -3958 over = self._prev.text.upper() -3959 -3960 if not self._match(TokenType.L_PAREN): -3961 return self.expression( -3962 exp.Window, this=this, alias=self._parse_id_var(False), over=over -3963 ) -3964 -3965 window_alias = self._parse_id_var(any_token=False, tokens=self.WINDOW_ALIAS_TOKENS) -3966 -3967 first = self._match(TokenType.FIRST) -3968 if self._match_text_seq("LAST"): -3969 first = False -3970 -3971 partition = self._parse_partition_by() -3972 order = self._parse_order() -3973 kind = self._match_set((TokenType.ROWS, TokenType.RANGE)) and self._prev.text -3974 -3975 if kind: -3976 self._match(TokenType.BETWEEN) -3977 start = self._parse_window_spec() -3978 self._match(TokenType.AND) -3979 end = self._parse_window_spec() -3980 -3981 spec = self.expression( -3982 exp.WindowSpec, -3983 kind=kind, -3984 start=start["value"], -3985 start_side=start["side"], -3986 end=end["value"], -3987 end_side=end["side"], -3988 ) -3989 else: -3990 spec = None -3991 -3992 self._match_r_paren() +3942 # T-SQL allows the OVER (...) syntax after WITHIN GROUP. +3943 # https://learn.microsoft.com/en-us/sql/t-sql/functions/percentile-disc-transact-sql?view=sql-server-ver16 +3944 if self._match_text_seq("WITHIN", "GROUP"): +3945 order = self._parse_wrapped(self._parse_order) +3946 this = self.expression(exp.WithinGroup, this=this, expression=order) +3947 +3948 # SQL spec defines an optional [ { IGNORE | RESPECT } NULLS ] OVER +3949 # Some dialects choose to implement and some do not. +3950 # https://dev.mysql.com/doc/refman/8.0/en/window-function-descriptions.html +3951 +3952 # There is some code above in _parse_lambda that handles +3953 # SELECT FIRST_VALUE(TABLE.COLUMN IGNORE|RESPECT NULLS) OVER ... +3954 +3955 # The below changes handle +3956 # SELECT FIRST_VALUE(TABLE.COLUMN) IGNORE|RESPECT NULLS OVER ... +3957 +3958 # Oracle allows both formats +3959 # (https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/img_text/first_value.html) +3960 # and Snowflake chose to do the same for familiarity +3961 # https://docs.snowflake.com/en/sql-reference/functions/first_value.html#usage-notes +3962 this = self._parse_respect_or_ignore_nulls(this) +3963 +3964 # bigquery select from window x AS (partition by ...) +3965 if alias: +3966 over = None +3967 self._match(TokenType.ALIAS) +3968 elif not self._match_set(self.WINDOW_BEFORE_PAREN_TOKENS): +3969 return this +3970 else: +3971 over = self._prev.text.upper() +3972 +3973 if not self._match(TokenType.L_PAREN): +3974 return self.expression( +3975 exp.Window, this=this, alias=self._parse_id_var(False), over=over +3976 ) +3977 +3978 window_alias = self._parse_id_var(any_token=False, tokens=self.WINDOW_ALIAS_TOKENS) +3979 +3980 first = self._match(TokenType.FIRST) +3981 if self._match_text_seq("LAST"): +3982 first = False +3983 +3984 partition = self._parse_partition_by() +3985 order = self._parse_order() +3986 kind = self._match_set((TokenType.ROWS, TokenType.RANGE)) and self._prev.text +3987 +3988 if kind: +3989 self._match(TokenType.BETWEEN) +3990 start = self._parse_window_spec() +3991 self._match(TokenType.AND) +3992 end = self._parse_window_spec() 3993 -3994 return self.expression( -3995 exp.Window, -3996 this=this, -3997 partition_by=partition, -3998 order=order, -3999 spec=spec, -4000 alias=window_alias, -4001 over=over, -4002 first=first, -4003 ) +3994 spec = self.expression( +3995 exp.WindowSpec, +3996 kind=kind, +3997 start=start["value"], +3998 start_side=start["side"], +3999 end=end["value"], +4000 end_side=end["side"], +4001 ) +4002 else: +4003 spec = None 4004 -4005 def _parse_window_spec(self) -> t.Dict[str, t.Optional[str | exp.Expression]]: -4006 self._match(TokenType.BETWEEN) -4007 -4008 return { -4009 "value": ( -4010 (self._match_text_seq("UNBOUNDED") and "UNBOUNDED") -4011 or (self._match_text_seq("CURRENT", "ROW") and "CURRENT ROW") -4012 or self._parse_bitwise() -4013 ), -4014 "side": self._match_texts(self.WINDOW_SIDES) and self._prev.text, -4015 } -4016 -4017 def _parse_alias( -4018 self, this: t.Optional[exp.Expression], explicit: bool = False -4019 ) -> t.Optional[exp.Expression]: -4020 any_token = self._match(TokenType.ALIAS) -4021 -4022 if explicit and not any_token: -4023 return this -4024 -4025 if self._match(TokenType.L_PAREN): -4026 aliases = self.expression( -4027 exp.Aliases, -4028 this=this, -4029 expressions=self._parse_csv(lambda: self._parse_id_var(any_token)), -4030 ) -4031 self._match_r_paren(aliases) -4032 return aliases -4033 -4034 alias = self._parse_id_var(any_token) -4035 -4036 if alias: -4037 return self.expression(exp.Alias, this=this, alias=alias) -4038 -4039 return this -4040 -4041 def _parse_id_var( -4042 self, -4043 any_token: bool = True, -4044 tokens: t.Optional[t.Collection[TokenType]] = None, -4045 ) -> t.Optional[exp.Expression]: -4046 identifier = self._parse_identifier() -4047 -4048 if identifier: -4049 return identifier -4050 -4051 if (any_token and self._advance_any()) or self._match_set(tokens or self.ID_VAR_TOKENS): -4052 quoted = self._prev.token_type == TokenType.STRING -4053 return exp.Identifier(this=self._prev.text, quoted=quoted) -4054 -4055 return None -4056 -4057 def _parse_string(self) -> t.Optional[exp.Expression]: -4058 if self._match(TokenType.STRING): -4059 return self.PRIMARY_PARSERS[TokenType.STRING](self, self._prev) -4060 return self._parse_placeholder() -4061 -4062 def _parse_string_as_identifier(self) -> t.Optional[exp.Expression]: -4063 return exp.to_identifier(self._match(TokenType.STRING) and self._prev.text, quoted=True) -4064 -4065 def _parse_number(self) -> t.Optional[exp.Expression]: -4066 if self._match(TokenType.NUMBER): -4067 return self.PRIMARY_PARSERS[TokenType.NUMBER](self, self._prev) -4068 return self._parse_placeholder() +4005 self._match_r_paren() +4006 +4007 return self.expression( +4008 exp.Window, +4009 this=this, +4010 partition_by=partition, +4011 order=order, +4012 spec=spec, +4013 alias=window_alias, +4014 over=over, +4015 first=first, +4016 ) +4017 +4018 def _parse_window_spec(self) -> t.Dict[str, t.Optional[str | exp.Expression]]: +4019 self._match(TokenType.BETWEEN) +4020 +4021 return { +4022 "value": ( +4023 (self._match_text_seq("UNBOUNDED") and "UNBOUNDED") +4024 or (self._match_text_seq("CURRENT", "ROW") and "CURRENT ROW") +4025 or self._parse_bitwise() +4026 ), +4027 "side": self._match_texts(self.WINDOW_SIDES) and self._prev.text, +4028 } +4029 +4030 def _parse_alias( +4031 self, this: t.Optional[exp.Expression], explicit: bool = False +4032 ) -> t.Optional[exp.Expression]: +4033 any_token = self._match(TokenType.ALIAS) +4034 +4035 if explicit and not any_token: +4036 return this +4037 +4038 if self._match(TokenType.L_PAREN): +4039 aliases = self.expression( +4040 exp.Aliases, +4041 this=this, +4042 expressions=self._parse_csv(lambda: self._parse_id_var(any_token)), +4043 ) +4044 self._match_r_paren(aliases) +4045 return aliases +4046 +4047 alias = self._parse_id_var(any_token) +4048 +4049 if alias: +4050 return self.expression(exp.Alias, this=this, alias=alias) +4051 +4052 return this +4053 +4054 def _parse_id_var( +4055 self, +4056 any_token: bool = True, +4057 tokens: t.Optional[t.Collection[TokenType]] = None, +4058 ) -> t.Optional[exp.Expression]: +4059 identifier = self._parse_identifier() +4060 +4061 if identifier: +4062 return identifier +4063 +4064 if (any_token and self._advance_any()) or self._match_set(tokens or self.ID_VAR_TOKENS): +4065 quoted = self._prev.token_type == TokenType.STRING +4066 return exp.Identifier(this=self._prev.text, quoted=quoted) +4067 +4068 return None 4069 -4070 def _parse_identifier(self) -> t.Optional[exp.Expression]: -4071 if self._match(TokenType.IDENTIFIER): -4072 return self.expression(exp.Identifier, this=self._prev.text, quoted=True) +4070 def _parse_string(self) -> t.Optional[exp.Expression]: +4071 if self._match(TokenType.STRING): +4072 return self.PRIMARY_PARSERS[TokenType.STRING](self, self._prev) 4073 return self._parse_placeholder() 4074 -4075 def _parse_var( -4076 self, any_token: bool = False, tokens: t.Optional[t.Collection[TokenType]] = None -4077 ) -> t.Optional[exp.Expression]: -4078 if ( -4079 (any_token and self._advance_any()) -4080 or self._match(TokenType.VAR) -4081 or (self._match_set(tokens) if tokens else False) -4082 ): -4083 return self.expression(exp.Var, this=self._prev.text) -4084 return self._parse_placeholder() -4085 -4086 def _advance_any(self) -> t.Optional[Token]: -4087 if self._curr and self._curr.token_type not in self.RESERVED_KEYWORDS: -4088 self._advance() -4089 return self._prev -4090 return None -4091 -4092 def _parse_var_or_string(self) -> t.Optional[exp.Expression]: -4093 return self._parse_var() or self._parse_string() -4094 -4095 def _parse_null(self) -> t.Optional[exp.Expression]: -4096 if self._match(TokenType.NULL): -4097 return self.PRIMARY_PARSERS[TokenType.NULL](self, self._prev) -4098 return None -4099 -4100 def _parse_boolean(self) -> t.Optional[exp.Expression]: -4101 if self._match(TokenType.TRUE): -4102 return self.PRIMARY_PARSERS[TokenType.TRUE](self, self._prev) -4103 if self._match(TokenType.FALSE): -4104 return self.PRIMARY_PARSERS[TokenType.FALSE](self, self._prev) -4105 return None -4106 -4107 def _parse_star(self) -> t.Optional[exp.Expression]: -4108 if self._match(TokenType.STAR): -4109 return self.PRIMARY_PARSERS[TokenType.STAR](self, self._prev) -4110 return None -4111 -4112 def _parse_parameter(self) -> exp.Expression: -4113 wrapped = self._match(TokenType.L_BRACE) -4114 this = self._parse_var() or self._parse_identifier() or self._parse_primary() -4115 self._match(TokenType.R_BRACE) -4116 return self.expression(exp.Parameter, this=this, wrapped=wrapped) -4117 -4118 def _parse_placeholder(self) -> t.Optional[exp.Expression]: -4119 if self._match_set(self.PLACEHOLDER_PARSERS): -4120 placeholder = self.PLACEHOLDER_PARSERS[self._prev.token_type](self) -4121 if placeholder: -4122 return placeholder -4123 self._advance(-1) -4124 return None -4125 -4126 def _parse_except(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]: -4127 if not self._match(TokenType.EXCEPT): -4128 return None -4129 if self._match(TokenType.L_PAREN, advance=False): -4130 return self._parse_wrapped_csv(self._parse_column) -4131 return self._parse_csv(self._parse_column) -4132 -4133 def _parse_replace(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]: -4134 if not self._match(TokenType.REPLACE): -4135 return None -4136 if self._match(TokenType.L_PAREN, advance=False): -4137 return self._parse_wrapped_csv(self._parse_expression) -4138 return self._parse_csv(self._parse_expression) -4139 -4140 def _parse_csv( -4141 self, parse_method: t.Callable, sep: TokenType = TokenType.COMMA -4142 ) -> t.List[t.Optional[exp.Expression]]: -4143 parse_result = parse_method() -4144 items = [parse_result] if parse_result is not None else [] +4075 def _parse_string_as_identifier(self) -> t.Optional[exp.Identifier]: +4076 return exp.to_identifier(self._match(TokenType.STRING) and self._prev.text, quoted=True) +4077 +4078 def _parse_number(self) -> t.Optional[exp.Expression]: +4079 if self._match(TokenType.NUMBER): +4080 return self.PRIMARY_PARSERS[TokenType.NUMBER](self, self._prev) +4081 return self._parse_placeholder() +4082 +4083 def _parse_identifier(self) -> t.Optional[exp.Expression]: +4084 if self._match(TokenType.IDENTIFIER): +4085 return self.expression(exp.Identifier, this=self._prev.text, quoted=True) +4086 return self._parse_placeholder() +4087 +4088 def _parse_var( +4089 self, any_token: bool = False, tokens: t.Optional[t.Collection[TokenType]] = None +4090 ) -> t.Optional[exp.Expression]: +4091 if ( +4092 (any_token and self._advance_any()) +4093 or self._match(TokenType.VAR) +4094 or (self._match_set(tokens) if tokens else False) +4095 ): +4096 return self.expression(exp.Var, this=self._prev.text) +4097 return self._parse_placeholder() +4098 +4099 def _advance_any(self) -> t.Optional[Token]: +4100 if self._curr and self._curr.token_type not in self.RESERVED_KEYWORDS: +4101 self._advance() +4102 return self._prev +4103 return None +4104 +4105 def _parse_var_or_string(self) -> t.Optional[exp.Expression]: +4106 return self._parse_var() or self._parse_string() +4107 +4108 def _parse_null(self) -> t.Optional[exp.Expression]: +4109 if self._match(TokenType.NULL): +4110 return self.PRIMARY_PARSERS[TokenType.NULL](self, self._prev) +4111 return None +4112 +4113 def _parse_boolean(self) -> t.Optional[exp.Expression]: +4114 if self._match(TokenType.TRUE): +4115 return self.PRIMARY_PARSERS[TokenType.TRUE](self, self._prev) +4116 if self._match(TokenType.FALSE): +4117 return self.PRIMARY_PARSERS[TokenType.FALSE](self, self._prev) +4118 return None +4119 +4120 def _parse_star(self) -> t.Optional[exp.Expression]: +4121 if self._match(TokenType.STAR): +4122 return self.PRIMARY_PARSERS[TokenType.STAR](self, self._prev) +4123 return None +4124 +4125 def _parse_parameter(self) -> exp.Parameter: +4126 wrapped = self._match(TokenType.L_BRACE) +4127 this = self._parse_var() or self._parse_identifier() or self._parse_primary() +4128 self._match(TokenType.R_BRACE) +4129 return self.expression(exp.Parameter, this=this, wrapped=wrapped) +4130 +4131 def _parse_placeholder(self) -> t.Optional[exp.Expression]: +4132 if self._match_set(self.PLACEHOLDER_PARSERS): +4133 placeholder = self.PLACEHOLDER_PARSERS[self._prev.token_type](self) +4134 if placeholder: +4135 return placeholder +4136 self._advance(-1) +4137 return None +4138 +4139 def _parse_except(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]: +4140 if not self._match(TokenType.EXCEPT): +4141 return None +4142 if self._match(TokenType.L_PAREN, advance=False): +4143 return self._parse_wrapped_csv(self._parse_column) +4144 return self._parse_csv(self._parse_column) 4145 -4146 while self._match(sep): -4147 self._add_comments(parse_result) -4148 parse_result = parse_method() -4149 if parse_result is not None: -4150 items.append(parse_result) -4151 -4152 return items -4153 -4154 def _parse_tokens( -4155 self, parse_method: t.Callable, expressions: t.Dict -4156 ) -> t.Optional[exp.Expression]: -4157 this = parse_method() +4146 def _parse_replace(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]: +4147 if not self._match(TokenType.REPLACE): +4148 return None +4149 if self._match(TokenType.L_PAREN, advance=False): +4150 return self._parse_wrapped_csv(self._parse_expression) +4151 return self._parse_csv(self._parse_expression) +4152 +4153 def _parse_csv( +4154 self, parse_method: t.Callable, sep: TokenType = TokenType.COMMA +4155 ) -> t.List[t.Optional[exp.Expression]]: +4156 parse_result = parse_method() +4157 items = [parse_result] if parse_result is not None else [] 4158 -4159 while self._match_set(expressions): -4160 this = self.expression( -4161 expressions[self._prev.token_type], -4162 this=this, -4163 comments=self._prev_comments, -4164 expression=parse_method(), -4165 ) +4159 while self._match(sep): +4160 self._add_comments(parse_result) +4161 parse_result = parse_method() +4162 if parse_result is not None: +4163 items.append(parse_result) +4164 +4165 return items 4166 -4167 return this -4168 -4169 def _parse_wrapped_id_vars(self, optional: bool = False) -> t.List[t.Optional[exp.Expression]]: -4170 return self._parse_wrapped_csv(self._parse_id_var, optional=optional) +4167 def _parse_tokens( +4168 self, parse_method: t.Callable, expressions: t.Dict +4169 ) -> t.Optional[exp.Expression]: +4170 this = parse_method() 4171 -4172 def _parse_wrapped_csv( -4173 self, parse_method: t.Callable, sep: TokenType = TokenType.COMMA, optional: bool = False -4174 ) -> t.List[t.Optional[exp.Expression]]: -4175 return self._parse_wrapped( -4176 lambda: self._parse_csv(parse_method, sep=sep), optional=optional -4177 ) -4178 -4179 def _parse_wrapped(self, parse_method: t.Callable, optional: bool = False) -> t.Any: -4180 wrapped = self._match(TokenType.L_PAREN) -4181 if not wrapped and not optional: -4182 self.raise_error("Expecting (") -4183 parse_result = parse_method() -4184 if wrapped: -4185 self._match_r_paren() -4186 return parse_result -4187 -4188 def _parse_select_or_expression(self, alias: bool = False) -> t.Optional[exp.Expression]: -4189 return self._parse_select() or self._parse_set_operations( -4190 self._parse_expression() if alias else self._parse_conjunction() -4191 ) -4192 -4193 def _parse_ddl_select(self) -> t.Optional[exp.Expression]: -4194 return self._parse_query_modifiers( -4195 self._parse_set_operations(self._parse_select(nested=True, parse_subquery_alias=False)) -4196 ) -4197 -4198 def _parse_transaction(self) -> exp.Expression: -4199 this = None -4200 if self._match_texts(self.TRANSACTION_KIND): -4201 this = self._prev.text -4202 -4203 self._match_texts({"TRANSACTION", "WORK"}) -4204 -4205 modes = [] -4206 while True: -4207 mode = [] -4208 while self._match(TokenType.VAR): -4209 mode.append(self._prev.text) +4172 while self._match_set(expressions): +4173 this = self.expression( +4174 expressions[self._prev.token_type], +4175 this=this, +4176 comments=self._prev_comments, +4177 expression=parse_method(), +4178 ) +4179 +4180 return this +4181 +4182 def _parse_wrapped_id_vars(self, optional: bool = False) -> t.List[t.Optional[exp.Expression]]: +4183 return self._parse_wrapped_csv(self._parse_id_var, optional=optional) +4184 +4185 def _parse_wrapped_csv( +4186 self, parse_method: t.Callable, sep: TokenType = TokenType.COMMA, optional: bool = False +4187 ) -> t.List[t.Optional[exp.Expression]]: +4188 return self._parse_wrapped( +4189 lambda: self._parse_csv(parse_method, sep=sep), optional=optional +4190 ) +4191 +4192 def _parse_wrapped(self, parse_method: t.Callable, optional: bool = False) -> t.Any: +4193 wrapped = self._match(TokenType.L_PAREN) +4194 if not wrapped and not optional: +4195 self.raise_error("Expecting (") +4196 parse_result = parse_method() +4197 if wrapped: +4198 self._match_r_paren() +4199 return parse_result +4200 +4201 def _parse_select_or_expression(self, alias: bool = False) -> t.Optional[exp.Expression]: +4202 return self._parse_select() or self._parse_set_operations( +4203 self._parse_expression() if alias else self._parse_conjunction() +4204 ) +4205 +4206 def _parse_ddl_select(self) -> t.Optional[exp.Expression]: +4207 return self._parse_query_modifiers( +4208 self._parse_set_operations(self._parse_select(nested=True, parse_subquery_alias=False)) +4209 ) 4210 -4211 if mode: -4212 modes.append(" ".join(mode)) -4213 if not self._match(TokenType.COMMA): -4214 break +4211 def _parse_transaction(self) -> exp.Transaction: +4212 this = None +4213 if self._match_texts(self.TRANSACTION_KIND): +4214 this = self._prev.text 4215 -4216 return self.expression(exp.Transaction, this=this, modes=modes) +4216 self._match_texts({"TRANSACTION", "WORK"}) 4217 -4218 def _parse_commit_or_rollback(self) -> exp.Expression: -4219 chain = None -4220 savepoint = None -4221 is_rollback = self._prev.token_type == TokenType.ROLLBACK -4222 -4223 self._match_texts({"TRANSACTION", "WORK"}) -4224 -4225 if self._match_text_seq("TO"): -4226 self._match_text_seq("SAVEPOINT") -4227 savepoint = self._parse_id_var() +4218 modes = [] +4219 while True: +4220 mode = [] +4221 while self._match(TokenType.VAR): +4222 mode.append(self._prev.text) +4223 +4224 if mode: +4225 modes.append(" ".join(mode)) +4226 if not self._match(TokenType.COMMA): +4227 break 4228 -4229 if self._match(TokenType.AND): -4230 chain = not self._match_text_seq("NO") -4231 self._match_text_seq("CHAIN") -4232 -4233 if is_rollback: -4234 return self.expression(exp.Rollback, savepoint=savepoint) -4235 return self.expression(exp.Commit, chain=chain) -4236 -4237 def _parse_add_column(self) -> t.Optional[exp.Expression]: -4238 if not self._match_text_seq("ADD"): -4239 return None -4240 -4241 self._match(TokenType.COLUMN) -4242 exists_column = self._parse_exists(not_=True) -4243 expression = self._parse_column_def(self._parse_field(any_token=True)) -4244 -4245 if expression: -4246 expression.set("exists", exists_column) -4247 -4248 # https://docs.databricks.com/delta/update-schema.html#explicitly-update-schema-to-add-columns -4249 if self._match_texts(("FIRST", "AFTER")): -4250 position = self._prev.text -4251 column_position = self.expression( -4252 exp.ColumnPosition, this=self._parse_column(), position=position -4253 ) -4254 expression.set("position", column_position) -4255 -4256 return expression -4257 -4258 def _parse_drop_column(self) -> t.Optional[exp.Expression]: -4259 drop = self._match(TokenType.DROP) and self._parse_drop() -4260 if drop and not isinstance(drop, exp.Command): -4261 drop.set("kind", drop.args.get("kind", "COLUMN")) -4262 return drop -4263 -4264 # https://docs.aws.amazon.com/athena/latest/ug/alter-table-drop-partition.html -4265 def _parse_drop_partition(self, exists: t.Optional[bool] = None) -> exp.Expression: -4266 return self.expression( -4267 exp.DropPartition, expressions=self._parse_csv(self._parse_partition), exists=exists -4268 ) +4229 return self.expression(exp.Transaction, this=this, modes=modes) +4230 +4231 def _parse_commit_or_rollback(self) -> exp.Commit | exp.Rollback: +4232 chain = None +4233 savepoint = None +4234 is_rollback = self._prev.token_type == TokenType.ROLLBACK +4235 +4236 self._match_texts({"TRANSACTION", "WORK"}) +4237 +4238 if self._match_text_seq("TO"): +4239 self._match_text_seq("SAVEPOINT") +4240 savepoint = self._parse_id_var() +4241 +4242 if self._match(TokenType.AND): +4243 chain = not self._match_text_seq("NO") +4244 self._match_text_seq("CHAIN") +4245 +4246 if is_rollback: +4247 return self.expression(exp.Rollback, savepoint=savepoint) +4248 +4249 return self.expression(exp.Commit, chain=chain) +4250 +4251 def _parse_add_column(self) -> t.Optional[exp.Expression]: +4252 if not self._match_text_seq("ADD"): +4253 return None +4254 +4255 self._match(TokenType.COLUMN) +4256 exists_column = self._parse_exists(not_=True) +4257 expression = self._parse_column_def(self._parse_field(any_token=True)) +4258 +4259 if expression: +4260 expression.set("exists", exists_column) +4261 +4262 # https://docs.databricks.com/delta/update-schema.html#explicitly-update-schema-to-add-columns +4263 if self._match_texts(("FIRST", "AFTER")): +4264 position = self._prev.text +4265 column_position = self.expression( +4266 exp.ColumnPosition, this=self._parse_column(), position=position +4267 ) +4268 expression.set("position", column_position) 4269 -4270 def _parse_add_constraint(self) -> t.Optional[exp.Expression]: -4271 this = None -4272 kind = self._prev.token_type -4273 -4274 if kind == TokenType.CONSTRAINT: -4275 this = self._parse_id_var() -4276 -4277 if self._match_text_seq("CHECK"): -4278 expression = self._parse_wrapped(self._parse_conjunction) -4279 enforced = self._match_text_seq("ENFORCED") -4280 -4281 return self.expression( -4282 exp.AddConstraint, this=this, expression=expression, enforced=enforced -4283 ) -4284 -4285 if kind == TokenType.FOREIGN_KEY or self._match(TokenType.FOREIGN_KEY): -4286 expression = self._parse_foreign_key() -4287 elif kind == TokenType.PRIMARY_KEY or self._match(TokenType.PRIMARY_KEY): -4288 expression = self._parse_primary_key() -4289 else: -4290 expression = None -4291 -4292 return self.expression(exp.AddConstraint, this=this, expression=expression) -4293 -4294 def _parse_alter_table_add(self) -> t.List[t.Optional[exp.Expression]]: -4295 index = self._index - 1 -4296 -4297 if self._match_set(self.ADD_CONSTRAINT_TOKENS): -4298 return self._parse_csv(self._parse_add_constraint) -4299 -4300 self._retreat(index) -4301 return self._parse_csv(self._parse_add_column) -4302 -4303 def _parse_alter_table_alter(self) -> exp.Expression: -4304 self._match(TokenType.COLUMN) -4305 column = self._parse_field(any_token=True) -4306 -4307 if self._match_pair(TokenType.DROP, TokenType.DEFAULT): -4308 return self.expression(exp.AlterColumn, this=column, drop=True) -4309 if self._match_pair(TokenType.SET, TokenType.DEFAULT): -4310 return self.expression(exp.AlterColumn, this=column, default=self._parse_conjunction()) -4311 -4312 self._match_text_seq("SET", "DATA") -4313 return self.expression( -4314 exp.AlterColumn, -4315 this=column, -4316 dtype=self._match_text_seq("TYPE") and self._parse_types(), -4317 collate=self._match(TokenType.COLLATE) and self._parse_term(), -4318 using=self._match(TokenType.USING) and self._parse_conjunction(), -4319 ) +4270 return expression +4271 +4272 def _parse_drop_column(self) -> t.Optional[exp.Drop | exp.Command]: +4273 drop = self._match(TokenType.DROP) and self._parse_drop() +4274 if drop and not isinstance(drop, exp.Command): +4275 drop.set("kind", drop.args.get("kind", "COLUMN")) +4276 return drop +4277 +4278 # https://docs.aws.amazon.com/athena/latest/ug/alter-table-drop-partition.html +4279 def _parse_drop_partition(self, exists: t.Optional[bool] = None) -> exp.DropPartition: +4280 return self.expression( +4281 exp.DropPartition, expressions=self._parse_csv(self._parse_partition), exists=exists +4282 ) +4283 +4284 def _parse_add_constraint(self) -> exp.AddConstraint: +4285 this = None +4286 kind = self._prev.token_type +4287 +4288 if kind == TokenType.CONSTRAINT: +4289 this = self._parse_id_var() +4290 +4291 if self._match_text_seq("CHECK"): +4292 expression = self._parse_wrapped(self._parse_conjunction) +4293 enforced = self._match_text_seq("ENFORCED") +4294 +4295 return self.expression( +4296 exp.AddConstraint, this=this, expression=expression, enforced=enforced +4297 ) +4298 +4299 if kind == TokenType.FOREIGN_KEY or self._match(TokenType.FOREIGN_KEY): +4300 expression = self._parse_foreign_key() +4301 elif kind == TokenType.PRIMARY_KEY or self._match(TokenType.PRIMARY_KEY): +4302 expression = self._parse_primary_key() +4303 else: +4304 expression = None +4305 +4306 return self.expression(exp.AddConstraint, this=this, expression=expression) +4307 +4308 def _parse_alter_table_add(self) -> t.List[t.Optional[exp.Expression]]: +4309 index = self._index - 1 +4310 +4311 if self._match_set(self.ADD_CONSTRAINT_TOKENS): +4312 return self._parse_csv(self._parse_add_constraint) +4313 +4314 self._retreat(index) +4315 return self._parse_csv(self._parse_add_column) +4316 +4317 def _parse_alter_table_alter(self) -> exp.AlterColumn: +4318 self._match(TokenType.COLUMN) +4319 column = self._parse_field(any_token=True) 4320 -4321 def _parse_alter_table_drop(self) -> t.List[t.Optional[exp.Expression]]: -4322 index = self._index - 1 -4323 -4324 partition_exists = self._parse_exists() -4325 if self._match(TokenType.PARTITION, advance=False): -4326 return self._parse_csv(lambda: self._parse_drop_partition(exists=partition_exists)) -4327 -4328 self._retreat(index) -4329 return self._parse_csv(self._parse_drop_column) -4330 -4331 def _parse_alter_table_rename(self) -> exp.Expression: -4332 self._match_text_seq("TO") -4333 return self.expression(exp.RenameTable, this=self._parse_table(schema=True)) +4321 if self._match_pair(TokenType.DROP, TokenType.DEFAULT): +4322 return self.expression(exp.AlterColumn, this=column, drop=True) +4323 if self._match_pair(TokenType.SET, TokenType.DEFAULT): +4324 return self.expression(exp.AlterColumn, this=column, default=self._parse_conjunction()) +4325 +4326 self._match_text_seq("SET", "DATA") +4327 return self.expression( +4328 exp.AlterColumn, +4329 this=column, +4330 dtype=self._match_text_seq("TYPE") and self._parse_types(), +4331 collate=self._match(TokenType.COLLATE) and self._parse_term(), +4332 using=self._match(TokenType.USING) and self._parse_conjunction(), +4333 ) 4334 -4335 def _parse_alter(self) -> t.Optional[exp.Expression]: -4336 start = self._prev +4335 def _parse_alter_table_drop(self) -> t.List[t.Optional[exp.Expression]]: +4336 index = self._index - 1 4337 -4338 if not self._match(TokenType.TABLE): -4339 return self._parse_as_command(start) -4340 -4341 exists = self._parse_exists() -4342 this = self._parse_table(schema=True) -4343 -4344 if self._next: -4345 self._advance() -4346 parser = self.ALTER_PARSERS.get(self._prev.text.upper()) if self._prev else None -4347 -4348 if parser: -4349 actions = ensure_list(parser(self)) -4350 -4351 if not self._curr: -4352 return self.expression( -4353 exp.AlterTable, -4354 this=this, -4355 exists=exists, -4356 actions=actions, -4357 ) -4358 return self._parse_as_command(start) -4359 -4360 def _parse_merge(self) -> exp.Expression: -4361 self._match(TokenType.INTO) -4362 target = self._parse_table() -4363 -4364 self._match(TokenType.USING) -4365 using = self._parse_table() -4366 -4367 self._match(TokenType.ON) -4368 on = self._parse_conjunction() -4369 -4370 whens = [] -4371 while self._match(TokenType.WHEN): -4372 matched = not self._match(TokenType.NOT) -4373 self._match_text_seq("MATCHED") -4374 source = ( -4375 False -4376 if self._match_text_seq("BY", "TARGET") -4377 else self._match_text_seq("BY", "SOURCE") -4378 ) -4379 condition = self._parse_conjunction() if self._match(TokenType.AND) else None +4338 partition_exists = self._parse_exists() +4339 if self._match(TokenType.PARTITION, advance=False): +4340 return self._parse_csv(lambda: self._parse_drop_partition(exists=partition_exists)) +4341 +4342 self._retreat(index) +4343 return self._parse_csv(self._parse_drop_column) +4344 +4345 def _parse_alter_table_rename(self) -> exp.RenameTable: +4346 self._match_text_seq("TO") +4347 return self.expression(exp.RenameTable, this=self._parse_table(schema=True)) +4348 +4349 def _parse_alter(self) -> exp.AlterTable | exp.Command: +4350 start = self._prev +4351 +4352 if not self._match(TokenType.TABLE): +4353 return self._parse_as_command(start) +4354 +4355 exists = self._parse_exists() +4356 this = self._parse_table(schema=True) +4357 +4358 if self._next: +4359 self._advance() +4360 parser = self.ALTER_PARSERS.get(self._prev.text.upper()) if self._prev else None +4361 +4362 if parser: +4363 actions = ensure_list(parser(self)) +4364 +4365 if not self._curr: +4366 return self.expression( +4367 exp.AlterTable, +4368 this=this, +4369 exists=exists, +4370 actions=actions, +4371 ) +4372 return self._parse_as_command(start) +4373 +4374 def _parse_merge(self) -> exp.Merge: +4375 self._match(TokenType.INTO) +4376 target = self._parse_table() +4377 +4378 self._match(TokenType.USING) +4379 using = self._parse_table() 4380 -4381 self._match(TokenType.THEN) -4382 -4383 if self._match(TokenType.INSERT): -4384 _this = self._parse_star() -4385 if _this: -4386 then: t.Optional[exp.Expression] = self.expression(exp.Insert, this=_this) -4387 else: -4388 then = self.expression( -4389 exp.Insert, -4390 this=self._parse_value(), -4391 expression=self._match(TokenType.VALUES) and self._parse_value(), -4392 ) -4393 elif self._match(TokenType.UPDATE): -4394 expressions = self._parse_star() -4395 if expressions: -4396 then = self.expression(exp.Update, expressions=expressions) -4397 else: -4398 then = self.expression( -4399 exp.Update, -4400 expressions=self._match(TokenType.SET) -4401 and self._parse_csv(self._parse_equality), -4402 ) -4403 elif self._match(TokenType.DELETE): -4404 then = self.expression(exp.Var, this=self._prev.text) -4405 else: -4406 then = None -4407 -4408 whens.append( -4409 self.expression( -4410 exp.When, -4411 matched=matched, -4412 source=source, -4413 condition=condition, -4414 then=then, -4415 ) -4416 ) -4417 -4418 return self.expression( -4419 exp.Merge, -4420 this=target, -4421 using=using, -4422 on=on, -4423 expressions=whens, -4424 ) -4425 -4426 def _parse_show(self) -> t.Optional[exp.Expression]: -4427 parser = self._find_parser(self.SHOW_PARSERS, self._show_trie) # type: ignore -4428 if parser: -4429 return parser(self) -4430 self._advance() -4431 return self.expression(exp.Show, this=self._prev.text.upper()) -4432 -4433 def _parse_set_item_assignment( -4434 self, kind: t.Optional[str] = None -4435 ) -> t.Optional[exp.Expression]: -4436 index = self._index -4437 -4438 if kind in {"GLOBAL", "SESSION"} and self._match_text_seq("TRANSACTION"): -4439 return self._parse_set_transaction(global_=kind == "GLOBAL") -4440 -4441 left = self._parse_primary() or self._parse_id_var() -4442 -4443 if not self._match_texts(("=", "TO")): -4444 self._retreat(index) -4445 return None +4381 self._match(TokenType.ON) +4382 on = self._parse_conjunction() +4383 +4384 whens = [] +4385 while self._match(TokenType.WHEN): +4386 matched = not self._match(TokenType.NOT) +4387 self._match_text_seq("MATCHED") +4388 source = ( +4389 False +4390 if self._match_text_seq("BY", "TARGET") +4391 else self._match_text_seq("BY", "SOURCE") +4392 ) +4393 condition = self._parse_conjunction() if self._match(TokenType.AND) else None +4394 +4395 self._match(TokenType.THEN) +4396 +4397 if self._match(TokenType.INSERT): +4398 _this = self._parse_star() +4399 if _this: +4400 then: t.Optional[exp.Expression] = self.expression(exp.Insert, this=_this) +4401 else: +4402 then = self.expression( +4403 exp.Insert, +4404 this=self._parse_value(), +4405 expression=self._match(TokenType.VALUES) and self._parse_value(), +4406 ) +4407 elif self._match(TokenType.UPDATE): +4408 expressions = self._parse_star() +4409 if expressions: +4410 then = self.expression(exp.Update, expressions=expressions) +4411 else: +4412 then = self.expression( +4413 exp.Update, +4414 expressions=self._match(TokenType.SET) +4415 and self._parse_csv(self._parse_equality), +4416 ) +4417 elif self._match(TokenType.DELETE): +4418 then = self.expression(exp.Var, this=self._prev.text) +4419 else: +4420 then = None +4421 +4422 whens.append( +4423 self.expression( +4424 exp.When, +4425 matched=matched, +4426 source=source, +4427 condition=condition, +4428 then=then, +4429 ) +4430 ) +4431 +4432 return self.expression( +4433 exp.Merge, +4434 this=target, +4435 using=using, +4436 on=on, +4437 expressions=whens, +4438 ) +4439 +4440 def _parse_show(self) -> t.Optional[exp.Expression]: +4441 parser = self._find_parser(self.SHOW_PARSERS, self.SHOW_TRIE) +4442 if parser: +4443 return parser(self) +4444 self._advance() +4445 return self.expression(exp.Show, this=self._prev.text.upper()) 4446 -4447 right = self._parse_statement() or self._parse_id_var() -4448 this = self.expression( -4449 exp.EQ, -4450 this=left, -4451 expression=right, -4452 ) -4453 -4454 return self.expression( -4455 exp.SetItem, -4456 this=this, -4457 kind=kind, -4458 ) -4459 -4460 def _parse_set_transaction(self, global_: bool = False) -> exp.Expression: -4461 self._match_text_seq("TRANSACTION") -4462 characteristics = self._parse_csv( -4463 lambda: self._parse_var_from_options(self.TRANSACTION_CHARACTERISTICS) -4464 ) -4465 return self.expression( -4466 exp.SetItem, -4467 expressions=characteristics, -4468 kind="TRANSACTION", -4469 **{"global": global_}, # type: ignore +4447 def _parse_set_item_assignment( +4448 self, kind: t.Optional[str] = None +4449 ) -> t.Optional[exp.Expression]: +4450 index = self._index +4451 +4452 if kind in {"GLOBAL", "SESSION"} and self._match_text_seq("TRANSACTION"): +4453 return self._parse_set_transaction(global_=kind == "GLOBAL") +4454 +4455 left = self._parse_primary() or self._parse_id_var() +4456 +4457 if not self._match_texts(("=", "TO")): +4458 self._retreat(index) +4459 return None +4460 +4461 right = self._parse_statement() or self._parse_id_var() +4462 this = self.expression(exp.EQ, this=left, expression=right) +4463 +4464 return self.expression(exp.SetItem, this=this, kind=kind) +4465 +4466 def _parse_set_transaction(self, global_: bool = False) -> exp.Expression: +4467 self._match_text_seq("TRANSACTION") +4468 characteristics = self._parse_csv( +4469 lambda: self._parse_var_from_options(self.TRANSACTION_CHARACTERISTICS) 4470 ) -4471 -4472 def _parse_set_item(self) -> t.Optional[exp.Expression]: -4473 parser = self._find_parser(self.SET_PARSERS, self._set_trie) # type: ignore -4474 return parser(self) if parser else self._parse_set_item_assignment(kind=None) -4475 -4476 def _parse_set(self) -> exp.Expression: -4477 index = self._index -4478 set_ = self.expression(exp.Set, expressions=self._parse_csv(self._parse_set_item)) -4479 -4480 if self._curr: -4481 self._retreat(index) -4482 return self._parse_as_command(self._prev) -4483 -4484 return set_ +4471 return self.expression( +4472 exp.SetItem, +4473 expressions=characteristics, +4474 kind="TRANSACTION", +4475 **{"global": global_}, # type: ignore +4476 ) +4477 +4478 def _parse_set_item(self) -> t.Optional[exp.Expression]: +4479 parser = self._find_parser(self.SET_PARSERS, self.SET_TRIE) +4480 return parser(self) if parser else self._parse_set_item_assignment(kind=None) +4481 +4482 def _parse_set(self) -> exp.Set | exp.Command: +4483 index = self._index +4484 set_ = self.expression(exp.Set, expressions=self._parse_csv(self._parse_set_item)) 4485 -4486 def _parse_var_from_options(self, options: t.Collection[str]) -> t.Optional[exp.Expression]: -4487 for option in options: -4488 if self._match_text_seq(*option.split(" ")): -4489 return exp.Var(this=option) -4490 return None +4486 if self._curr: +4487 self._retreat(index) +4488 return self._parse_as_command(self._prev) +4489 +4490 return set_ 4491 -4492 def _parse_as_command(self, start: Token) -> exp.Command: -4493 while self._curr: -4494 self._advance() -4495 text = self._find_sql(start, self._prev) -4496 size = len(start.text) -4497 return exp.Command(this=text[:size], expression=text[size:]) -4498 -4499 def _parse_dict_property(self, this: str) -> exp.DictProperty: -4500 settings = [] -4501 -4502 self._match_l_paren() -4503 kind = self._parse_id_var() +4492 def _parse_var_from_options(self, options: t.Collection[str]) -> t.Optional[exp.Var]: +4493 for option in options: +4494 if self._match_text_seq(*option.split(" ")): +4495 return exp.var(option) +4496 return None +4497 +4498 def _parse_as_command(self, start: Token) -> exp.Command: +4499 while self._curr: +4500 self._advance() +4501 text = self._find_sql(start, self._prev) +4502 size = len(start.text) +4503 return exp.Command(this=text[:size], expression=text[size:]) 4504 -4505 if self._match(TokenType.L_PAREN): -4506 while True: -4507 key = self._parse_id_var() -4508 value = self._parse_primary() -4509 -4510 if not key and value is None: -4511 break -4512 settings.append(self.expression(exp.DictSubProperty, this=key, value=value)) -4513 self._match(TokenType.R_PAREN) -4514 -4515 self._match_r_paren() -4516 -4517 return self.expression( -4518 exp.DictProperty, -4519 this=this, -4520 kind=kind.this if kind else None, -4521 settings=settings, -4522 ) -4523 -4524 def _parse_dict_range(self, this: str) -> exp.DictRange: -4525 self._match_l_paren() -4526 has_min = self._match_text_seq("MIN") -4527 if has_min: -4528 min = self._parse_var() or self._parse_primary() -4529 self._match_text_seq("MAX") -4530 max = self._parse_var() or self._parse_primary() -4531 else: -4532 max = self._parse_var() or self._parse_primary() -4533 min = exp.Literal.number(0) -4534 self._match_r_paren() -4535 return self.expression(exp.DictRange, this=this, min=min, max=max) -4536 -4537 def _find_parser( -4538 self, parsers: t.Dict[str, t.Callable], trie: t.Dict -4539 ) -> t.Optional[t.Callable]: -4540 if not self._curr: -4541 return None +4505 def _parse_dict_property(self, this: str) -> exp.DictProperty: +4506 settings = [] +4507 +4508 self._match_l_paren() +4509 kind = self._parse_id_var() +4510 +4511 if self._match(TokenType.L_PAREN): +4512 while True: +4513 key = self._parse_id_var() +4514 value = self._parse_primary() +4515 +4516 if not key and value is None: +4517 break +4518 settings.append(self.expression(exp.DictSubProperty, this=key, value=value)) +4519 self._match(TokenType.R_PAREN) +4520 +4521 self._match_r_paren() +4522 +4523 return self.expression( +4524 exp.DictProperty, +4525 this=this, +4526 kind=kind.this if kind else None, +4527 settings=settings, +4528 ) +4529 +4530 def _parse_dict_range(self, this: str) -> exp.DictRange: +4531 self._match_l_paren() +4532 has_min = self._match_text_seq("MIN") +4533 if has_min: +4534 min = self._parse_var() or self._parse_primary() +4535 self._match_text_seq("MAX") +4536 max = self._parse_var() or self._parse_primary() +4537 else: +4538 max = self._parse_var() or self._parse_primary() +4539 min = exp.Literal.number(0) +4540 self._match_r_paren() +4541 return self.expression(exp.DictRange, this=this, min=min, max=max) 4542 -4543 index = self._index -4544 this = [] -4545 while True: -4546 # The current token might be multiple words -4547 curr = self._curr.text.upper() -4548 key = curr.split(" ") -4549 this.append(curr) -4550 self._advance() -4551 result, trie = in_trie(trie, key) -4552 if result == 0: -4553 break -4554 if result == 2: -4555 subparser = parsers[" ".join(this)] -4556 return subparser -4557 self._retreat(index) -4558 return None -4559 -4560 def _match(self, token_type, advance=True, expression=None): -4561 if not self._curr: -4562 return None -4563 -4564 if self._curr.token_type == token_type: -4565 if advance: -4566 self._advance() -4567 self._add_comments(expression) -4568 return True +4543 def _find_parser( +4544 self, parsers: t.Dict[str, t.Callable], trie: t.Dict +4545 ) -> t.Optional[t.Callable]: +4546 if not self._curr: +4547 return None +4548 +4549 index = self._index +4550 this = [] +4551 while True: +4552 # The current token might be multiple words +4553 curr = self._curr.text.upper() +4554 key = curr.split(" ") +4555 this.append(curr) +4556 self._advance() +4557 result, trie = in_trie(trie, key) +4558 if result == 0: +4559 break +4560 if result == 2: +4561 subparser = parsers[" ".join(this)] +4562 return subparser +4563 self._retreat(index) +4564 return None +4565 +4566 def _match(self, token_type, advance=True, expression=None): +4567 if not self._curr: +4568 return None 4569 -4570 return None -4571 -4572 def _match_set(self, types, advance=True): -4573 if not self._curr: -4574 return None +4570 if self._curr.token_type == token_type: +4571 if advance: +4572 self._advance() +4573 self._add_comments(expression) +4574 return True 4575 -4576 if self._curr.token_type in types: -4577 if advance: -4578 self._advance() -4579 return True -4580 -4581 return None -4582 -4583 def _match_pair(self, token_type_a, token_type_b, advance=True): -4584 if not self._curr or not self._next: -4585 return None +4576 return None +4577 +4578 def _match_set(self, types, advance=True): +4579 if not self._curr: +4580 return None +4581 +4582 if self._curr.token_type in types: +4583 if advance: +4584 self._advance() +4585 return True 4586 -4587 if self._curr.token_type == token_type_a and self._next.token_type == token_type_b: -4588 if advance: -4589 self._advance(2) -4590 return True -4591 -4592 return None -4593 -4594 def _match_l_paren(self, expression: t.Optional[exp.Expression] = None) -> None: -4595 if not self._match(TokenType.L_PAREN, expression=expression): -4596 self.raise_error("Expecting (") +4587 return None +4588 +4589 def _match_pair(self, token_type_a, token_type_b, advance=True): +4590 if not self._curr or not self._next: +4591 return None +4592 +4593 if self._curr.token_type == token_type_a and self._next.token_type == token_type_b: +4594 if advance: +4595 self._advance(2) +4596 return True 4597 -4598 def _match_r_paren(self, expression: t.Optional[exp.Expression] = None) -> None: -4599 if not self._match(TokenType.R_PAREN, expression=expression): -4600 self.raise_error("Expecting )") -4601 -4602 def _match_texts(self, texts, advance=True): -4603 if self._curr and self._curr.text.upper() in texts: -4604 if advance: -4605 self._advance() -4606 return True -4607 return False -4608 -4609 def _match_text_seq(self, *texts, advance=True): -4610 index = self._index -4611 for text in texts: -4612 if self._curr and self._curr.text.upper() == text: -4613 self._advance() -4614 else: -4615 self._retreat(index) -4616 return False -4617 -4618 if not advance: -4619 self._retreat(index) -4620 -4621 return True -4622 -4623 @t.overload -4624 def _replace_columns_with_dots(self, this: exp.Expression) -> exp.Expression: -4625 ... +4598 return None +4599 +4600 def _match_l_paren(self, expression: t.Optional[exp.Expression] = None) -> None: +4601 if not self._match(TokenType.L_PAREN, expression=expression): +4602 self.raise_error("Expecting (") +4603 +4604 def _match_r_paren(self, expression: t.Optional[exp.Expression] = None) -> None: +4605 if not self._match(TokenType.R_PAREN, expression=expression): +4606 self.raise_error("Expecting )") +4607 +4608 def _match_texts(self, texts, advance=True): +4609 if self._curr and self._curr.text.upper() in texts: +4610 if advance: +4611 self._advance() +4612 return True +4613 return False +4614 +4615 def _match_text_seq(self, *texts, advance=True): +4616 index = self._index +4617 for text in texts: +4618 if self._curr and self._curr.text.upper() == text: +4619 self._advance() +4620 else: +4621 self._retreat(index) +4622 return False +4623 +4624 if not advance: +4625 self._retreat(index) 4626 -4627 @t.overload -4628 def _replace_columns_with_dots( -4629 self, this: t.Optional[exp.Expression] -4630 ) -> t.Optional[exp.Expression]: +4627 return True +4628 +4629 @t.overload +4630 def _replace_columns_with_dots(self, this: exp.Expression) -> exp.Expression: 4631 ... 4632 -4633 def _replace_columns_with_dots(self, this): -4634 if isinstance(this, exp.Dot): -4635 exp.replace_children(this, self._replace_columns_with_dots) -4636 elif isinstance(this, exp.Column): -4637 exp.replace_children(this, self._replace_columns_with_dots) -4638 table = this.args.get("table") -4639 this = ( -4640 self.expression(exp.Dot, this=table, expression=this.this) -4641 if table -4642 else self.expression(exp.Var, this=this.name) -4643 ) -4644 elif isinstance(this, exp.Identifier): -4645 this = self.expression(exp.Var, this=this.name) -4646 -4647 return this -4648 -4649 def _replace_lambda( -4650 self, node: t.Optional[exp.Expression], lambda_variables: t.Set[str] -4651 ) -> t.Optional[exp.Expression]: -4652 if not node: -4653 return node +4633 @t.overload +4634 def _replace_columns_with_dots( +4635 self, this: t.Optional[exp.Expression] +4636 ) -> t.Optional[exp.Expression]: +4637 ... +4638 +4639 def _replace_columns_with_dots(self, this): +4640 if isinstance(this, exp.Dot): +4641 exp.replace_children(this, self._replace_columns_with_dots) +4642 elif isinstance(this, exp.Column): +4643 exp.replace_children(this, self._replace_columns_with_dots) +4644 table = this.args.get("table") +4645 this = ( +4646 self.expression(exp.Dot, this=table, expression=this.this) +4647 if table +4648 else self.expression(exp.Var, this=this.name) +4649 ) +4650 elif isinstance(this, exp.Identifier): +4651 this = self.expression(exp.Var, this=this.name) +4652 +4653 return this 4654 -4655 for column in node.find_all(exp.Column): -4656 if column.parts[0].name in lambda_variables: -4657 dot_or_id = column.to_dot() if column.table else column.this -4658 parent = column.parent -4659 -4660 while isinstance(parent, exp.Dot): -4661 if not isinstance(parent.parent, exp.Dot): -4662 parent.replace(dot_or_id) -4663 break -4664 parent = parent.parent -4665 else: -4666 if column is node: -4667 node = dot_or_id -4668 else: -4669 column.replace(dot_or_id) -4670 return node +4655 def _replace_lambda( +4656 self, node: t.Optional[exp.Expression], lambda_variables: t.Set[str] +4657 ) -> t.Optional[exp.Expression]: +4658 if not node: +4659 return node +4660 +4661 for column in node.find_all(exp.Column): +4662 if column.parts[0].name in lambda_variables: +4663 dot_or_id = column.to_dot() if column.table else column.this +4664 parent = column.parent +4665 +4666 while isinstance(parent, exp.Dot): +4667 if not isinstance(parent.parent, exp.Dot): +4668 parent.replace(dot_or_id) +4669 break +4670 parent = parent.parent +4671 else: +4672 if column is node: +4673 node = dot_or_id +4674 else: +4675 column.replace(dot_or_id) +4676 return node
      @@ -4777,19 +4783,20 @@
      -
      20def parse_var_map(args: t.List) -> exp.StarMap | exp.VarMap:
      -21    if len(args) == 1 and args[0].is_star:
      -22        return exp.StarMap(this=args[0])
      -23
      -24    keys = []
      -25    values = []
      -26    for i in range(0, len(args), 2):
      -27        keys.append(args[i])
      -28        values.append(args[i + 1])
      -29    return exp.VarMap(
      -30        keys=exp.Array(expressions=keys),
      -31        values=exp.Array(expressions=values),
      -32    )
      +            
      21def parse_var_map(args: t.List) -> exp.StarMap | exp.VarMap:
      +22    if len(args) == 1 and args[0].is_star:
      +23        return exp.StarMap(this=args[0])
      +24
      +25    keys = []
      +26    values = []
      +27    for i in range(0, len(args), 2):
      +28        keys.append(args[i])
      +29        values.append(args[i + 1])
      +30
      +31    return exp.VarMap(
      +32        keys=exp.Array(expressions=keys),
      +33        values=exp.Array(expressions=values),
      +34    )
       
      @@ -4801,15 +4808,15 @@
      def - parse_like(args: List) -> sqlglot.expressions.Expression: + parse_like(args: List) -> sqlglot.expressions.Escape | sqlglot.expressions.Like:
      -
      35def parse_like(args: t.List) -> exp.Expression:
      -36    like = exp.Like(this=seq_get(args, 1), expression=seq_get(args, 0))
      -37    return exp.Escape(this=like, expression=seq_get(args, 2)) if len(args) > 2 else like
      +            
      37def parse_like(args: t.List) -> exp.Escape | exp.Like:
      +38    like = exp.Like(this=seq_get(args, 1), expression=seq_get(args, 0))
      +39    return exp.Escape(this=like, expression=seq_get(args, 2)) if len(args) > 2 else like
       
      @@ -4827,12 +4834,12 @@
      -
      40def binary_range_parser(
      -41    expr_type: t.Type[exp.Expression],
      -42) -> t.Callable[[Parser, t.Optional[exp.Expression]], t.Optional[exp.Expression]]:
      -43    return lambda self, this: self._parse_escape(
      -44        self.expression(expr_type, this=this, expression=self._parse_bitwise())
      -45    )
      +            
      42def binary_range_parser(
      +43    expr_type: t.Type[exp.Expression],
      +44) -> t.Callable[[Parser, t.Optional[exp.Expression]], t.Optional[exp.Expression]]:
      +45    return lambda self, this: self._parse_escape(
      +46        self.expression(expr_type, this=this, expression=self._parse_bitwise())
      +47    )
       
      @@ -4850,152 +4857,149 @@
      -
        57class Parser(metaclass=_Parser):
      -  58    """
      -  59    Parser consumes a list of tokens produced by the `sqlglot.tokens.Tokenizer` and produces
      -  60    a parsed syntax tree.
      -  61
      -  62    Args:
      -  63        error_level: the desired error level.
      -  64            Default: ErrorLevel.IMMEDIATE
      -  65        error_message_context: determines the amount of context to capture from a
      -  66            query string when displaying the error message (in number of characters).
      -  67            Default: 50.
      -  68        index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.
      -  69            Default: 0
      -  70        alias_post_tablesample: If the table alias comes after tablesample.
      -  71            Default: False
      -  72        max_errors: Maximum number of error messages to include in a raised ParseError.
      -  73            This is only relevant if error_level is ErrorLevel.RAISE.
      -  74            Default: 3
      -  75        null_ordering: Indicates the default null ordering method to use if not explicitly set.
      -  76            Options are "nulls_are_small", "nulls_are_large", "nulls_are_last".
      -  77            Default: "nulls_are_small"
      -  78    """
      -  79
      -  80    FUNCTIONS: t.Dict[str, t.Callable] = {
      -  81        **{name: f.from_arg_list for f in exp.ALL_FUNCTIONS for name in f.sql_names()},
      -  82        "DATE_TO_DATE_STR": lambda args: exp.Cast(
      -  83            this=seq_get(args, 0),
      -  84            to=exp.DataType(this=exp.DataType.Type.TEXT),
      -  85        ),
      -  86        "GLOB": lambda args: exp.Glob(this=seq_get(args, 1), expression=seq_get(args, 0)),
      -  87        "IFNULL": exp.Coalesce.from_arg_list,
      -  88        "LIKE": parse_like,
      -  89        "TIME_TO_TIME_STR": lambda args: exp.Cast(
      -  90            this=seq_get(args, 0),
      -  91            to=exp.DataType(this=exp.DataType.Type.TEXT),
      -  92        ),
      -  93        "TS_OR_DS_TO_DATE_STR": lambda args: exp.Substring(
      -  94            this=exp.Cast(
      -  95                this=seq_get(args, 0),
      -  96                to=exp.DataType(this=exp.DataType.Type.TEXT),
      -  97            ),
      -  98            start=exp.Literal.number(1),
      -  99            length=exp.Literal.number(10),
      - 100        ),
      - 101        "VAR_MAP": parse_var_map,
      - 102    }
      - 103
      - 104    NO_PAREN_FUNCTIONS = {
      - 105        TokenType.CURRENT_DATE: exp.CurrentDate,
      - 106        TokenType.CURRENT_DATETIME: exp.CurrentDate,
      - 107        TokenType.CURRENT_TIME: exp.CurrentTime,
      - 108        TokenType.CURRENT_TIMESTAMP: exp.CurrentTimestamp,
      - 109        TokenType.CURRENT_USER: exp.CurrentUser,
      - 110    }
      - 111
      - 112    JOIN_HINTS: t.Set[str] = set()
      - 113
      - 114    NESTED_TYPE_TOKENS = {
      - 115        TokenType.ARRAY,
      - 116        TokenType.MAP,
      - 117        TokenType.NULLABLE,
      - 118        TokenType.STRUCT,
      - 119    }
      - 120
      - 121    TYPE_TOKENS = {
      - 122        TokenType.BIT,
      - 123        TokenType.BOOLEAN,
      - 124        TokenType.TINYINT,
      - 125        TokenType.UTINYINT,
      - 126        TokenType.SMALLINT,
      - 127        TokenType.USMALLINT,
      - 128        TokenType.INT,
      - 129        TokenType.UINT,
      - 130        TokenType.BIGINT,
      - 131        TokenType.UBIGINT,
      - 132        TokenType.INT128,
      - 133        TokenType.UINT128,
      - 134        TokenType.INT256,
      - 135        TokenType.UINT256,
      - 136        TokenType.FLOAT,
      - 137        TokenType.DOUBLE,
      - 138        TokenType.CHAR,
      - 139        TokenType.NCHAR,
      - 140        TokenType.VARCHAR,
      - 141        TokenType.NVARCHAR,
      - 142        TokenType.TEXT,
      - 143        TokenType.MEDIUMTEXT,
      - 144        TokenType.LONGTEXT,
      - 145        TokenType.MEDIUMBLOB,
      - 146        TokenType.LONGBLOB,
      - 147        TokenType.BINARY,
      - 148        TokenType.VARBINARY,
      - 149        TokenType.JSON,
      - 150        TokenType.JSONB,
      - 151        TokenType.INTERVAL,
      - 152        TokenType.TIME,
      - 153        TokenType.TIMESTAMP,
      - 154        TokenType.TIMESTAMPTZ,
      - 155        TokenType.TIMESTAMPLTZ,
      - 156        TokenType.DATETIME,
      - 157        TokenType.DATETIME64,
      - 158        TokenType.DATE,
      - 159        TokenType.INT4RANGE,
      - 160        TokenType.INT4MULTIRANGE,
      - 161        TokenType.INT8RANGE,
      - 162        TokenType.INT8MULTIRANGE,
      - 163        TokenType.NUMRANGE,
      - 164        TokenType.NUMMULTIRANGE,
      - 165        TokenType.TSRANGE,
      - 166        TokenType.TSMULTIRANGE,
      - 167        TokenType.TSTZRANGE,
      - 168        TokenType.TSTZMULTIRANGE,
      - 169        TokenType.DATERANGE,
      - 170        TokenType.DATEMULTIRANGE,
      - 171        TokenType.DECIMAL,
      - 172        TokenType.BIGDECIMAL,
      - 173        TokenType.UUID,
      - 174        TokenType.GEOGRAPHY,
      - 175        TokenType.GEOMETRY,
      - 176        TokenType.HLLSKETCH,
      - 177        TokenType.HSTORE,
      - 178        TokenType.PSEUDO_TYPE,
      - 179        TokenType.SUPER,
      - 180        TokenType.SERIAL,
      - 181        TokenType.SMALLSERIAL,
      - 182        TokenType.BIGSERIAL,
      - 183        TokenType.XML,
      - 184        TokenType.UNIQUEIDENTIFIER,
      - 185        TokenType.MONEY,
      - 186        TokenType.SMALLMONEY,
      - 187        TokenType.ROWVERSION,
      - 188        TokenType.IMAGE,
      - 189        TokenType.VARIANT,
      - 190        TokenType.OBJECT,
      - 191        TokenType.INET,
      - 192        *NESTED_TYPE_TOKENS,
      - 193    }
      - 194
      - 195    SUBQUERY_PREDICATES = {
      - 196        TokenType.ANY: exp.Any,
      - 197        TokenType.ALL: exp.All,
      - 198        TokenType.EXISTS: exp.Exists,
      - 199        TokenType.SOME: exp.Any,
      - 200    }
      - 201
      - 202    RESERVED_KEYWORDS = {*Tokenizer.SINGLE_TOKENS.values(), TokenType.SELECT}
      +            
        60class Parser(metaclass=_Parser):
      +  61    """
      +  62    Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.
      +  63
      +  64    Args:
      +  65        error_level: The desired error level.
      +  66            Default: ErrorLevel.IMMEDIATE
      +  67        error_message_context: Determines the amount of context to capture from a
      +  68            query string when displaying the error message (in number of characters).
      +  69            Default: 100
      +  70        max_errors: Maximum number of error messages to include in a raised ParseError.
      +  71            This is only relevant if error_level is ErrorLevel.RAISE.
      +  72            Default: 3
      +  73    """
      +  74
      +  75    FUNCTIONS: t.Dict[str, t.Callable] = {
      +  76        **{name: f.from_arg_list for f in exp.ALL_FUNCTIONS for name in f.sql_names()},
      +  77        "DATE_TO_DATE_STR": lambda args: exp.Cast(
      +  78            this=seq_get(args, 0),
      +  79            to=exp.DataType(this=exp.DataType.Type.TEXT),
      +  80        ),
      +  81        "GLOB": lambda args: exp.Glob(this=seq_get(args, 1), expression=seq_get(args, 0)),
      +  82        "LIKE": parse_like,
      +  83        "TIME_TO_TIME_STR": lambda args: exp.Cast(
      +  84            this=seq_get(args, 0),
      +  85            to=exp.DataType(this=exp.DataType.Type.TEXT),
      +  86        ),
      +  87        "TS_OR_DS_TO_DATE_STR": lambda args: exp.Substring(
      +  88            this=exp.Cast(
      +  89                this=seq_get(args, 0),
      +  90                to=exp.DataType(this=exp.DataType.Type.TEXT),
      +  91            ),
      +  92            start=exp.Literal.number(1),
      +  93            length=exp.Literal.number(10),
      +  94        ),
      +  95        "VAR_MAP": parse_var_map,
      +  96    }
      +  97
      +  98    NO_PAREN_FUNCTIONS = {
      +  99        TokenType.CURRENT_DATE: exp.CurrentDate,
      + 100        TokenType.CURRENT_DATETIME: exp.CurrentDate,
      + 101        TokenType.CURRENT_TIME: exp.CurrentTime,
      + 102        TokenType.CURRENT_TIMESTAMP: exp.CurrentTimestamp,
      + 103        TokenType.CURRENT_USER: exp.CurrentUser,
      + 104    }
      + 105
      + 106    NESTED_TYPE_TOKENS = {
      + 107        TokenType.ARRAY,
      + 108        TokenType.MAP,
      + 109        TokenType.NULLABLE,
      + 110        TokenType.STRUCT,
      + 111    }
      + 112
      + 113    ENUM_TYPE_TOKENS = {
      + 114        TokenType.ENUM,
      + 115    }
      + 116
      + 117    TYPE_TOKENS = {
      + 118        TokenType.BIT,
      + 119        TokenType.BOOLEAN,
      + 120        TokenType.TINYINT,
      + 121        TokenType.UTINYINT,
      + 122        TokenType.SMALLINT,
      + 123        TokenType.USMALLINT,
      + 124        TokenType.INT,
      + 125        TokenType.UINT,
      + 126        TokenType.BIGINT,
      + 127        TokenType.UBIGINT,
      + 128        TokenType.INT128,
      + 129        TokenType.UINT128,
      + 130        TokenType.INT256,
      + 131        TokenType.UINT256,
      + 132        TokenType.FLOAT,
      + 133        TokenType.DOUBLE,
      + 134        TokenType.CHAR,
      + 135        TokenType.NCHAR,
      + 136        TokenType.VARCHAR,
      + 137        TokenType.NVARCHAR,
      + 138        TokenType.TEXT,
      + 139        TokenType.MEDIUMTEXT,
      + 140        TokenType.LONGTEXT,
      + 141        TokenType.MEDIUMBLOB,
      + 142        TokenType.LONGBLOB,
      + 143        TokenType.BINARY,
      + 144        TokenType.VARBINARY,
      + 145        TokenType.JSON,
      + 146        TokenType.JSONB,
      + 147        TokenType.INTERVAL,
      + 148        TokenType.TIME,
      + 149        TokenType.TIMESTAMP,
      + 150        TokenType.TIMESTAMPTZ,
      + 151        TokenType.TIMESTAMPLTZ,
      + 152        TokenType.DATETIME,
      + 153        TokenType.DATETIME64,
      + 154        TokenType.DATE,
      + 155        TokenType.INT4RANGE,
      + 156        TokenType.INT4MULTIRANGE,
      + 157        TokenType.INT8RANGE,
      + 158        TokenType.INT8MULTIRANGE,
      + 159        TokenType.NUMRANGE,
      + 160        TokenType.NUMMULTIRANGE,
      + 161        TokenType.TSRANGE,
      + 162        TokenType.TSMULTIRANGE,
      + 163        TokenType.TSTZRANGE,
      + 164        TokenType.TSTZMULTIRANGE,
      + 165        TokenType.DATERANGE,
      + 166        TokenType.DATEMULTIRANGE,
      + 167        TokenType.DECIMAL,
      + 168        TokenType.BIGDECIMAL,
      + 169        TokenType.UUID,
      + 170        TokenType.GEOGRAPHY,
      + 171        TokenType.GEOMETRY,
      + 172        TokenType.HLLSKETCH,
      + 173        TokenType.HSTORE,
      + 174        TokenType.PSEUDO_TYPE,
      + 175        TokenType.SUPER,
      + 176        TokenType.SERIAL,
      + 177        TokenType.SMALLSERIAL,
      + 178        TokenType.BIGSERIAL,
      + 179        TokenType.XML,
      + 180        TokenType.UNIQUEIDENTIFIER,
      + 181        TokenType.MONEY,
      + 182        TokenType.SMALLMONEY,
      + 183        TokenType.ROWVERSION,
      + 184        TokenType.IMAGE,
      + 185        TokenType.VARIANT,
      + 186        TokenType.OBJECT,
      + 187        TokenType.INET,
      + 188        TokenType.ENUM,
      + 189        *NESTED_TYPE_TOKENS,
      + 190    }
      + 191
      + 192    SUBQUERY_PREDICATES = {
      + 193        TokenType.ANY: exp.Any,
      + 194        TokenType.ALL: exp.All,
      + 195        TokenType.EXISTS: exp.Exists,
      + 196        TokenType.SOME: exp.Any,
      + 197    }
      + 198
      + 199    RESERVED_KEYWORDS = {
      + 200        *Tokenizer.SINGLE_TOKENS.values(),
      + 201        TokenType.SELECT,
      + 202    }
        203
        204    DB_CREATABLES = {
        205        TokenType.DATABASE,
      @@ -5013,2751 +5017,2751 @@
        217        *DB_CREATABLES,
        218    }
        219
      - 220    ID_VAR_TOKENS = {
      - 221        TokenType.VAR,
      - 222        TokenType.ANTI,
      - 223        TokenType.APPLY,
      - 224        TokenType.ASC,
      - 225        TokenType.AUTO_INCREMENT,
      - 226        TokenType.BEGIN,
      - 227        TokenType.CACHE,
      - 228        TokenType.COLLATE,
      - 229        TokenType.COMMAND,
      - 230        TokenType.COMMENT,
      - 231        TokenType.COMMIT,
      - 232        TokenType.CONSTRAINT,
      - 233        TokenType.DEFAULT,
      - 234        TokenType.DELETE,
      - 235        TokenType.DESC,
      - 236        TokenType.DESCRIBE,
      - 237        TokenType.DICTIONARY,
      - 238        TokenType.DIV,
      - 239        TokenType.END,
      - 240        TokenType.EXECUTE,
      - 241        TokenType.ESCAPE,
      - 242        TokenType.FALSE,
      - 243        TokenType.FIRST,
      - 244        TokenType.FILTER,
      - 245        TokenType.FORMAT,
      - 246        TokenType.FULL,
      - 247        TokenType.IF,
      - 248        TokenType.IS,
      - 249        TokenType.ISNULL,
      - 250        TokenType.INTERVAL,
      - 251        TokenType.KEEP,
      - 252        TokenType.LEFT,
      - 253        TokenType.LOAD,
      - 254        TokenType.MERGE,
      - 255        TokenType.NATURAL,
      - 256        TokenType.NEXT,
      - 257        TokenType.OFFSET,
      - 258        TokenType.ORDINALITY,
      - 259        TokenType.OVERWRITE,
      - 260        TokenType.PARTITION,
      - 261        TokenType.PERCENT,
      - 262        TokenType.PIVOT,
      - 263        TokenType.PRAGMA,
      - 264        TokenType.RANGE,
      - 265        TokenType.REFERENCES,
      - 266        TokenType.RIGHT,
      - 267        TokenType.ROW,
      - 268        TokenType.ROWS,
      - 269        TokenType.SEMI,
      - 270        TokenType.SET,
      - 271        TokenType.SETTINGS,
      - 272        TokenType.SHOW,
      - 273        TokenType.TEMPORARY,
      - 274        TokenType.TOP,
      - 275        TokenType.TRUE,
      - 276        TokenType.UNIQUE,
      - 277        TokenType.UNPIVOT,
      - 278        TokenType.VOLATILE,
      - 279        TokenType.WINDOW,
      - 280        *CREATABLES,
      - 281        *SUBQUERY_PREDICATES,
      - 282        *TYPE_TOKENS,
      - 283        *NO_PAREN_FUNCTIONS,
      - 284    }
      - 285
      - 286    INTERVAL_VARS = ID_VAR_TOKENS - {TokenType.END}
      - 287
      - 288    TABLE_ALIAS_TOKENS = ID_VAR_TOKENS - {
      - 289        TokenType.APPLY,
      - 290        TokenType.ASOF,
      - 291        TokenType.FULL,
      - 292        TokenType.LEFT,
      - 293        TokenType.LOCK,
      - 294        TokenType.NATURAL,
      - 295        TokenType.OFFSET,
      - 296        TokenType.RIGHT,
      - 297        TokenType.WINDOW,
      - 298    }
      - 299
      - 300    COMMENT_TABLE_ALIAS_TOKENS = TABLE_ALIAS_TOKENS - {TokenType.IS}
      - 301
      - 302    UPDATE_ALIAS_TOKENS = TABLE_ALIAS_TOKENS - {TokenType.SET}
      - 303
      - 304    TRIM_TYPES = {"LEADING", "TRAILING", "BOTH"}
      - 305
      - 306    FUNC_TOKENS = {
      - 307        TokenType.COMMAND,
      - 308        TokenType.CURRENT_DATE,
      - 309        TokenType.CURRENT_DATETIME,
      - 310        TokenType.CURRENT_TIMESTAMP,
      - 311        TokenType.CURRENT_TIME,
      - 312        TokenType.CURRENT_USER,
      - 313        TokenType.FILTER,
      - 314        TokenType.FIRST,
      - 315        TokenType.FORMAT,
      - 316        TokenType.GLOB,
      - 317        TokenType.IDENTIFIER,
      - 318        TokenType.INDEX,
      - 319        TokenType.ISNULL,
      - 320        TokenType.ILIKE,
      - 321        TokenType.LIKE,
      - 322        TokenType.MERGE,
      - 323        TokenType.OFFSET,
      - 324        TokenType.PRIMARY_KEY,
      - 325        TokenType.RANGE,
      - 326        TokenType.REPLACE,
      - 327        TokenType.ROW,
      - 328        TokenType.UNNEST,
      - 329        TokenType.VAR,
      - 330        TokenType.LEFT,
      - 331        TokenType.RIGHT,
      - 332        TokenType.DATE,
      - 333        TokenType.DATETIME,
      - 334        TokenType.TABLE,
      - 335        TokenType.TIMESTAMP,
      - 336        TokenType.TIMESTAMPTZ,
      - 337        TokenType.WINDOW,
      - 338        *TYPE_TOKENS,
      - 339        *SUBQUERY_PREDICATES,
      - 340    }
      - 341
      - 342    CONJUNCTION = {
      - 343        TokenType.AND: exp.And,
      - 344        TokenType.OR: exp.Or,
      - 345    }
      - 346
      - 347    EQUALITY = {
      - 348        TokenType.EQ: exp.EQ,
      - 349        TokenType.NEQ: exp.NEQ,
      - 350        TokenType.NULLSAFE_EQ: exp.NullSafeEQ,
      - 351    }
      - 352
      - 353    COMPARISON = {
      - 354        TokenType.GT: exp.GT,
      - 355        TokenType.GTE: exp.GTE,
      - 356        TokenType.LT: exp.LT,
      - 357        TokenType.LTE: exp.LTE,
      - 358    }
      - 359
      - 360    BITWISE = {
      - 361        TokenType.AMP: exp.BitwiseAnd,
      - 362        TokenType.CARET: exp.BitwiseXor,
      - 363        TokenType.PIPE: exp.BitwiseOr,
      - 364        TokenType.DPIPE: exp.DPipe,
      - 365    }
      - 366
      - 367    TERM = {
      - 368        TokenType.DASH: exp.Sub,
      - 369        TokenType.PLUS: exp.Add,
      - 370        TokenType.MOD: exp.Mod,
      - 371        TokenType.COLLATE: exp.Collate,
      - 372    }
      - 373
      - 374    FACTOR = {
      - 375        TokenType.DIV: exp.IntDiv,
      - 376        TokenType.LR_ARROW: exp.Distance,
      - 377        TokenType.SLASH: exp.Div,
      - 378        TokenType.STAR: exp.Mul,
      - 379    }
      - 380
      - 381    TIMESTAMPS = {
      - 382        TokenType.TIME,
      - 383        TokenType.TIMESTAMP,
      - 384        TokenType.TIMESTAMPTZ,
      - 385        TokenType.TIMESTAMPLTZ,
      - 386    }
      - 387
      - 388    SET_OPERATIONS = {
      - 389        TokenType.UNION,
      - 390        TokenType.INTERSECT,
      - 391        TokenType.EXCEPT,
      - 392    }
      - 393
      - 394    JOIN_METHODS = {
      - 395        TokenType.NATURAL,
      - 396        TokenType.ASOF,
      - 397    }
      - 398
      - 399    JOIN_SIDES = {
      - 400        TokenType.LEFT,
      - 401        TokenType.RIGHT,
      - 402        TokenType.FULL,
      - 403    }
      - 404
      - 405    JOIN_KINDS = {
      - 406        TokenType.INNER,
      - 407        TokenType.OUTER,
      - 408        TokenType.CROSS,
      - 409        TokenType.SEMI,
      - 410        TokenType.ANTI,
      - 411    }
      - 412
      - 413    LAMBDAS = {
      - 414        TokenType.ARROW: lambda self, expressions: self.expression(
      - 415            exp.Lambda,
      - 416            this=self._replace_lambda(
      - 417                self._parse_conjunction(),
      - 418                {node.name for node in expressions},
      - 419            ),
      - 420            expressions=expressions,
      - 421        ),
      - 422        TokenType.FARROW: lambda self, expressions: self.expression(
      - 423            exp.Kwarg,
      - 424            this=exp.Var(this=expressions[0].name),
      - 425            expression=self._parse_conjunction(),
      + 220    # Tokens that can represent identifiers
      + 221    ID_VAR_TOKENS = {
      + 222        TokenType.VAR,
      + 223        TokenType.ANTI,
      + 224        TokenType.APPLY,
      + 225        TokenType.ASC,
      + 226        TokenType.AUTO_INCREMENT,
      + 227        TokenType.BEGIN,
      + 228        TokenType.CACHE,
      + 229        TokenType.CASE,
      + 230        TokenType.COLLATE,
      + 231        TokenType.COMMAND,
      + 232        TokenType.COMMENT,
      + 233        TokenType.COMMIT,
      + 234        TokenType.CONSTRAINT,
      + 235        TokenType.DEFAULT,
      + 236        TokenType.DELETE,
      + 237        TokenType.DESC,
      + 238        TokenType.DESCRIBE,
      + 239        TokenType.DICTIONARY,
      + 240        TokenType.DIV,
      + 241        TokenType.END,
      + 242        TokenType.EXECUTE,
      + 243        TokenType.ESCAPE,
      + 244        TokenType.FALSE,
      + 245        TokenType.FIRST,
      + 246        TokenType.FILTER,
      + 247        TokenType.FORMAT,
      + 248        TokenType.FULL,
      + 249        TokenType.IF,
      + 250        TokenType.IS,
      + 251        TokenType.ISNULL,
      + 252        TokenType.INTERVAL,
      + 253        TokenType.KEEP,
      + 254        TokenType.LEFT,
      + 255        TokenType.LOAD,
      + 256        TokenType.MERGE,
      + 257        TokenType.NATURAL,
      + 258        TokenType.NEXT,
      + 259        TokenType.OFFSET,
      + 260        TokenType.ORDINALITY,
      + 261        TokenType.OVERWRITE,
      + 262        TokenType.PARTITION,
      + 263        TokenType.PERCENT,
      + 264        TokenType.PIVOT,
      + 265        TokenType.PRAGMA,
      + 266        TokenType.RANGE,
      + 267        TokenType.REFERENCES,
      + 268        TokenType.RIGHT,
      + 269        TokenType.ROW,
      + 270        TokenType.ROWS,
      + 271        TokenType.SEMI,
      + 272        TokenType.SET,
      + 273        TokenType.SETTINGS,
      + 274        TokenType.SHOW,
      + 275        TokenType.TEMPORARY,
      + 276        TokenType.TOP,
      + 277        TokenType.TRUE,
      + 278        TokenType.UNIQUE,
      + 279        TokenType.UNPIVOT,
      + 280        TokenType.UPDATE,
      + 281        TokenType.VOLATILE,
      + 282        TokenType.WINDOW,
      + 283        *CREATABLES,
      + 284        *SUBQUERY_PREDICATES,
      + 285        *TYPE_TOKENS,
      + 286        *NO_PAREN_FUNCTIONS,
      + 287    }
      + 288
      + 289    INTERVAL_VARS = ID_VAR_TOKENS - {TokenType.END}
      + 290
      + 291    TABLE_ALIAS_TOKENS = ID_VAR_TOKENS - {
      + 292        TokenType.APPLY,
      + 293        TokenType.ASOF,
      + 294        TokenType.FULL,
      + 295        TokenType.LEFT,
      + 296        TokenType.LOCK,
      + 297        TokenType.NATURAL,
      + 298        TokenType.OFFSET,
      + 299        TokenType.RIGHT,
      + 300        TokenType.WINDOW,
      + 301    }
      + 302
      + 303    COMMENT_TABLE_ALIAS_TOKENS = TABLE_ALIAS_TOKENS - {TokenType.IS}
      + 304
      + 305    UPDATE_ALIAS_TOKENS = TABLE_ALIAS_TOKENS - {TokenType.SET}
      + 306
      + 307    TRIM_TYPES = {"LEADING", "TRAILING", "BOTH"}
      + 308
      + 309    FUNC_TOKENS = {
      + 310        TokenType.COMMAND,
      + 311        TokenType.CURRENT_DATE,
      + 312        TokenType.CURRENT_DATETIME,
      + 313        TokenType.CURRENT_TIMESTAMP,
      + 314        TokenType.CURRENT_TIME,
      + 315        TokenType.CURRENT_USER,
      + 316        TokenType.FILTER,
      + 317        TokenType.FIRST,
      + 318        TokenType.FORMAT,
      + 319        TokenType.GLOB,
      + 320        TokenType.IDENTIFIER,
      + 321        TokenType.INDEX,
      + 322        TokenType.ISNULL,
      + 323        TokenType.ILIKE,
      + 324        TokenType.LIKE,
      + 325        TokenType.MERGE,
      + 326        TokenType.OFFSET,
      + 327        TokenType.PRIMARY_KEY,
      + 328        TokenType.RANGE,
      + 329        TokenType.REPLACE,
      + 330        TokenType.ROW,
      + 331        TokenType.UNNEST,
      + 332        TokenType.VAR,
      + 333        TokenType.LEFT,
      + 334        TokenType.RIGHT,
      + 335        TokenType.DATE,
      + 336        TokenType.DATETIME,
      + 337        TokenType.TABLE,
      + 338        TokenType.TIMESTAMP,
      + 339        TokenType.TIMESTAMPTZ,
      + 340        TokenType.WINDOW,
      + 341        *TYPE_TOKENS,
      + 342        *SUBQUERY_PREDICATES,
      + 343    }
      + 344
      + 345    CONJUNCTION = {
      + 346        TokenType.AND: exp.And,
      + 347        TokenType.OR: exp.Or,
      + 348    }
      + 349
      + 350    EQUALITY = {
      + 351        TokenType.EQ: exp.EQ,
      + 352        TokenType.NEQ: exp.NEQ,
      + 353        TokenType.NULLSAFE_EQ: exp.NullSafeEQ,
      + 354    }
      + 355
      + 356    COMPARISON = {
      + 357        TokenType.GT: exp.GT,
      + 358        TokenType.GTE: exp.GTE,
      + 359        TokenType.LT: exp.LT,
      + 360        TokenType.LTE: exp.LTE,
      + 361    }
      + 362
      + 363    BITWISE = {
      + 364        TokenType.AMP: exp.BitwiseAnd,
      + 365        TokenType.CARET: exp.BitwiseXor,
      + 366        TokenType.PIPE: exp.BitwiseOr,
      + 367        TokenType.DPIPE: exp.DPipe,
      + 368    }
      + 369
      + 370    TERM = {
      + 371        TokenType.DASH: exp.Sub,
      + 372        TokenType.PLUS: exp.Add,
      + 373        TokenType.MOD: exp.Mod,
      + 374        TokenType.COLLATE: exp.Collate,
      + 375    }
      + 376
      + 377    FACTOR = {
      + 378        TokenType.DIV: exp.IntDiv,
      + 379        TokenType.LR_ARROW: exp.Distance,
      + 380        TokenType.SLASH: exp.Div,
      + 381        TokenType.STAR: exp.Mul,
      + 382    }
      + 383
      + 384    TIMESTAMPS = {
      + 385        TokenType.TIME,
      + 386        TokenType.TIMESTAMP,
      + 387        TokenType.TIMESTAMPTZ,
      + 388        TokenType.TIMESTAMPLTZ,
      + 389    }
      + 390
      + 391    SET_OPERATIONS = {
      + 392        TokenType.UNION,
      + 393        TokenType.INTERSECT,
      + 394        TokenType.EXCEPT,
      + 395    }
      + 396
      + 397    JOIN_METHODS = {
      + 398        TokenType.NATURAL,
      + 399        TokenType.ASOF,
      + 400    }
      + 401
      + 402    JOIN_SIDES = {
      + 403        TokenType.LEFT,
      + 404        TokenType.RIGHT,
      + 405        TokenType.FULL,
      + 406    }
      + 407
      + 408    JOIN_KINDS = {
      + 409        TokenType.INNER,
      + 410        TokenType.OUTER,
      + 411        TokenType.CROSS,
      + 412        TokenType.SEMI,
      + 413        TokenType.ANTI,
      + 414    }
      + 415
      + 416    JOIN_HINTS: t.Set[str] = set()
      + 417
      + 418    LAMBDAS = {
      + 419        TokenType.ARROW: lambda self, expressions: self.expression(
      + 420            exp.Lambda,
      + 421            this=self._replace_lambda(
      + 422                self._parse_conjunction(),
      + 423                {node.name for node in expressions},
      + 424            ),
      + 425            expressions=expressions,
        426        ),
      - 427    }
      - 428
      - 429    COLUMN_OPERATORS = {
      - 430        TokenType.DOT: None,
      - 431        TokenType.DCOLON: lambda self, this, to: self.expression(
      - 432            exp.Cast if self.STRICT_CAST else exp.TryCast,
      - 433            this=this,
      - 434            to=to,
      - 435        ),
      - 436        TokenType.ARROW: lambda self, this, path: self.expression(
      - 437            exp.JSONExtract,
      + 427        TokenType.FARROW: lambda self, expressions: self.expression(
      + 428            exp.Kwarg,
      + 429            this=exp.var(expressions[0].name),
      + 430            expression=self._parse_conjunction(),
      + 431        ),
      + 432    }
      + 433
      + 434    COLUMN_OPERATORS = {
      + 435        TokenType.DOT: None,
      + 436        TokenType.DCOLON: lambda self, this, to: self.expression(
      + 437            exp.Cast if self.STRICT_CAST else exp.TryCast,
        438            this=this,
      - 439            expression=path,
      + 439            to=to,
        440        ),
      - 441        TokenType.DARROW: lambda self, this, path: self.expression(
      - 442            exp.JSONExtractScalar,
      + 441        TokenType.ARROW: lambda self, this, path: self.expression(
      + 442            exp.JSONExtract,
        443            this=this,
        444            expression=path,
        445        ),
      - 446        TokenType.HASH_ARROW: lambda self, this, path: self.expression(
      - 447            exp.JSONBExtract,
      + 446        TokenType.DARROW: lambda self, this, path: self.expression(
      + 447            exp.JSONExtractScalar,
        448            this=this,
        449            expression=path,
        450        ),
      - 451        TokenType.DHASH_ARROW: lambda self, this, path: self.expression(
      - 452            exp.JSONBExtractScalar,
      + 451        TokenType.HASH_ARROW: lambda self, this, path: self.expression(
      + 452            exp.JSONBExtract,
        453            this=this,
        454            expression=path,
        455        ),
      - 456        TokenType.PLACEHOLDER: lambda self, this, key: self.expression(
      - 457            exp.JSONBContains,
      + 456        TokenType.DHASH_ARROW: lambda self, this, path: self.expression(
      + 457            exp.JSONBExtractScalar,
        458            this=this,
      - 459            expression=key,
      + 459            expression=path,
        460        ),
      - 461    }
      - 462
      - 463    EXPRESSION_PARSERS = {
      - 464        exp.Cluster: lambda self: self._parse_sort(exp.Cluster, "CLUSTER", "BY"),
      - 465        exp.Column: lambda self: self._parse_column(),
      - 466        exp.Condition: lambda self: self._parse_conjunction(),
      - 467        exp.DataType: lambda self: self._parse_types(),
      - 468        exp.Expression: lambda self: self._parse_statement(),
      - 469        exp.From: lambda self: self._parse_from(),
      - 470        exp.Group: lambda self: self._parse_group(),
      - 471        exp.Having: lambda self: self._parse_having(),
      - 472        exp.Identifier: lambda self: self._parse_id_var(),
      - 473        exp.Join: lambda self: self._parse_join(),
      - 474        exp.Lambda: lambda self: self._parse_lambda(),
      - 475        exp.Lateral: lambda self: self._parse_lateral(),
      - 476        exp.Limit: lambda self: self._parse_limit(),
      - 477        exp.Offset: lambda self: self._parse_offset(),
      - 478        exp.Order: lambda self: self._parse_order(),
      - 479        exp.Ordered: lambda self: self._parse_ordered(),
      - 480        exp.Properties: lambda self: self._parse_properties(),
      - 481        exp.Qualify: lambda self: self._parse_qualify(),
      - 482        exp.Returning: lambda self: self._parse_returning(),
      - 483        exp.Sort: lambda self: self._parse_sort(exp.Sort, "SORT", "BY"),
      - 484        exp.Table: lambda self: self._parse_table_parts(),
      - 485        exp.TableAlias: lambda self: self._parse_table_alias(),
      - 486        exp.Where: lambda self: self._parse_where(),
      - 487        exp.Window: lambda self: self._parse_named_window(),
      - 488        exp.With: lambda self: self._parse_with(),
      - 489        "JOIN_TYPE": lambda self: self._parse_join_parts(),
      - 490    }
      - 491
      - 492    STATEMENT_PARSERS = {
      - 493        TokenType.ALTER: lambda self: self._parse_alter(),
      - 494        TokenType.BEGIN: lambda self: self._parse_transaction(),
      - 495        TokenType.CACHE: lambda self: self._parse_cache(),
      - 496        TokenType.COMMIT: lambda self: self._parse_commit_or_rollback(),
      - 497        TokenType.COMMENT: lambda self: self._parse_comment(),
      - 498        TokenType.CREATE: lambda self: self._parse_create(),
      - 499        TokenType.DELETE: lambda self: self._parse_delete(),
      - 500        TokenType.DESC: lambda self: self._parse_describe(),
      - 501        TokenType.DESCRIBE: lambda self: self._parse_describe(),
      - 502        TokenType.DROP: lambda self: self._parse_drop(),
      - 503        TokenType.END: lambda self: self._parse_commit_or_rollback(),
      - 504        TokenType.FROM: lambda self: exp.select("*").from_(
      - 505            t.cast(exp.From, self._parse_from(skip_from_token=True))
      - 506        ),
      - 507        TokenType.INSERT: lambda self: self._parse_insert(),
      - 508        TokenType.LOAD: lambda self: self._parse_load(),
      - 509        TokenType.MERGE: lambda self: self._parse_merge(),
      - 510        TokenType.PIVOT: lambda self: self._parse_simplified_pivot(),
      - 511        TokenType.PRAGMA: lambda self: self.expression(exp.Pragma, this=self._parse_expression()),
      - 512        TokenType.ROLLBACK: lambda self: self._parse_commit_or_rollback(),
      - 513        TokenType.SET: lambda self: self._parse_set(),
      - 514        TokenType.UNCACHE: lambda self: self._parse_uncache(),
      - 515        TokenType.UPDATE: lambda self: self._parse_update(),
      - 516        TokenType.USE: lambda self: self.expression(
      - 517            exp.Use,
      - 518            kind=self._match_texts(("ROLE", "WAREHOUSE", "DATABASE", "SCHEMA"))
      - 519            and exp.Var(this=self._prev.text),
      - 520            this=self._parse_table(schema=False),
      - 521        ),
      - 522    }
      - 523
      - 524    UNARY_PARSERS = {
      - 525        TokenType.PLUS: lambda self: self._parse_unary(),  # Unary + is handled as a no-op
      - 526        TokenType.NOT: lambda self: self.expression(exp.Not, this=self._parse_equality()),
      - 527        TokenType.TILDA: lambda self: self.expression(exp.BitwiseNot, this=self._parse_unary()),
      - 528        TokenType.DASH: lambda self: self.expression(exp.Neg, this=self._parse_unary()),
      - 529    }
      - 530
      - 531    PRIMARY_PARSERS = {
      - 532        TokenType.STRING: lambda self, token: self.expression(
      - 533            exp.Literal, this=token.text, is_string=True
      - 534        ),
      - 535        TokenType.NUMBER: lambda self, token: self.expression(
      - 536            exp.Literal, this=token.text, is_string=False
      - 537        ),
      - 538        TokenType.STAR: lambda self, _: self.expression(
      - 539            exp.Star,
      - 540            **{"except": self._parse_except(), "replace": self._parse_replace()},
      - 541        ),
      - 542        TokenType.NULL: lambda self, _: self.expression(exp.Null),
      - 543        TokenType.TRUE: lambda self, _: self.expression(exp.Boolean, this=True),
      - 544        TokenType.FALSE: lambda self, _: self.expression(exp.Boolean, this=False),
      - 545        TokenType.BIT_STRING: lambda self, token: self.expression(exp.BitString, this=token.text),
      - 546        TokenType.HEX_STRING: lambda self, token: self.expression(exp.HexString, this=token.text),
      - 547        TokenType.BYTE_STRING: lambda self, token: self.expression(exp.ByteString, this=token.text),
      - 548        TokenType.INTRODUCER: lambda self, token: self._parse_introducer(token),
      - 549        TokenType.NATIONAL_STRING: lambda self, token: self.expression(
      - 550            exp.National, this=token.text
      - 551        ),
      - 552        TokenType.RAW_STRING: lambda self, token: self.expression(exp.RawString, this=token.text),
      - 553        TokenType.SESSION_PARAMETER: lambda self, _: self._parse_session_parameter(),
      - 554    }
      - 555
      - 556    PLACEHOLDER_PARSERS = {
      - 557        TokenType.PLACEHOLDER: lambda self: self.expression(exp.Placeholder),
      - 558        TokenType.PARAMETER: lambda self: self._parse_parameter(),
      - 559        TokenType.COLON: lambda self: self.expression(exp.Placeholder, this=self._prev.text)
      - 560        if self._match_set((TokenType.NUMBER, TokenType.VAR))
      - 561        else None,
      - 562    }
      - 563
      - 564    RANGE_PARSERS = {
      - 565        TokenType.BETWEEN: lambda self, this: self._parse_between(this),
      - 566        TokenType.GLOB: binary_range_parser(exp.Glob),
      - 567        TokenType.ILIKE: binary_range_parser(exp.ILike),
      - 568        TokenType.IN: lambda self, this: self._parse_in(this),
      - 569        TokenType.IRLIKE: binary_range_parser(exp.RegexpILike),
      - 570        TokenType.IS: lambda self, this: self._parse_is(this),
      - 571        TokenType.LIKE: binary_range_parser(exp.Like),
      - 572        TokenType.OVERLAPS: binary_range_parser(exp.Overlaps),
      - 573        TokenType.RLIKE: binary_range_parser(exp.RegexpLike),
      - 574        TokenType.SIMILAR_TO: binary_range_parser(exp.SimilarTo),
      - 575    }
      - 576
      - 577    PROPERTY_PARSERS: t.Dict[str, t.Callable] = {
      - 578        "ALGORITHM": lambda self: self._parse_property_assignment(exp.AlgorithmProperty),
      - 579        "AUTO_INCREMENT": lambda self: self._parse_property_assignment(exp.AutoIncrementProperty),
      - 580        "BLOCKCOMPRESSION": lambda self: self._parse_blockcompression(),
      - 581        "CHARACTER SET": lambda self: self._parse_character_set(),
      - 582        "CHECKSUM": lambda self: self._parse_checksum(),
      - 583        "CLUSTER": lambda self: self._parse_cluster(),
      - 584        "COLLATE": lambda self: self._parse_property_assignment(exp.CollateProperty),
      - 585        "COMMENT": lambda self: self._parse_property_assignment(exp.SchemaCommentProperty),
      - 586        "DATABLOCKSIZE": lambda self, **kwargs: self._parse_datablocksize(**kwargs),
      - 587        "DEFINER": lambda self: self._parse_definer(),
      - 588        "DETERMINISTIC": lambda self: self.expression(
      - 589            exp.StabilityProperty, this=exp.Literal.string("IMMUTABLE")
      - 590        ),
      - 591        "DISTKEY": lambda self: self._parse_distkey(),
      - 592        "DISTSTYLE": lambda self: self._parse_property_assignment(exp.DistStyleProperty),
      - 593        "ENGINE": lambda self: self._parse_property_assignment(exp.EngineProperty),
      - 594        "EXECUTE": lambda self: self._parse_property_assignment(exp.ExecuteAsProperty),
      - 595        "EXTERNAL": lambda self: self.expression(exp.ExternalProperty),
      - 596        "FALLBACK": lambda self, **kwargs: self._parse_fallback(**kwargs),
      - 597        "FORMAT": lambda self: self._parse_property_assignment(exp.FileFormatProperty),
      - 598        "FREESPACE": lambda self: self._parse_freespace(),
      - 599        "IMMUTABLE": lambda self: self.expression(
      - 600            exp.StabilityProperty, this=exp.Literal.string("IMMUTABLE")
      - 601        ),
      - 602        "JOURNAL": lambda self, **kwargs: self._parse_journal(**kwargs),
      - 603        "LANGUAGE": lambda self: self._parse_property_assignment(exp.LanguageProperty),
      - 604        "LAYOUT": lambda self: self._parse_dict_property(this="LAYOUT"),
      - 605        "LIFETIME": lambda self: self._parse_dict_range(this="LIFETIME"),
      - 606        "LIKE": lambda self: self._parse_create_like(),
      - 607        "LOCATION": lambda self: self._parse_property_assignment(exp.LocationProperty),
      - 608        "LOCK": lambda self: self._parse_locking(),
      - 609        "LOCKING": lambda self: self._parse_locking(),
      - 610        "LOG": lambda self, **kwargs: self._parse_log(**kwargs),
      - 611        "MATERIALIZED": lambda self: self.expression(exp.MaterializedProperty),
      - 612        "MERGEBLOCKRATIO": lambda self, **kwargs: self._parse_mergeblockratio(**kwargs),
      - 613        "MULTISET": lambda self: self.expression(exp.SetProperty, multi=True),
      - 614        "NO": lambda self: self._parse_no_property(),
      - 615        "ON": lambda self: self._parse_on_property(),
      - 616        "ORDER BY": lambda self: self._parse_order(skip_order_token=True),
      - 617        "PARTITION BY": lambda self: self._parse_partitioned_by(),
      - 618        "PARTITIONED BY": lambda self: self._parse_partitioned_by(),
      - 619        "PARTITIONED_BY": lambda self: self._parse_partitioned_by(),
      - 620        "PRIMARY KEY": lambda self: self._parse_primary_key(in_props=True),
      - 621        "RANGE": lambda self: self._parse_dict_range(this="RANGE"),
      - 622        "RETURNS": lambda self: self._parse_returns(),
      - 623        "ROW": lambda self: self._parse_row(),
      - 624        "ROW_FORMAT": lambda self: self._parse_property_assignment(exp.RowFormatProperty),
      - 625        "SET": lambda self: self.expression(exp.SetProperty, multi=False),
      - 626        "SETTINGS": lambda self: self.expression(
      - 627            exp.SettingsProperty, expressions=self._parse_csv(self._parse_set_item)
      - 628        ),
      - 629        "SORTKEY": lambda self: self._parse_sortkey(),
      - 630        "SOURCE": lambda self: self._parse_dict_property(this="SOURCE"),
      - 631        "STABLE": lambda self: self.expression(
      - 632            exp.StabilityProperty, this=exp.Literal.string("STABLE")
      + 461        TokenType.PLACEHOLDER: lambda self, this, key: self.expression(
      + 462            exp.JSONBContains,
      + 463            this=this,
      + 464            expression=key,
      + 465        ),
      + 466    }
      + 467
      + 468    EXPRESSION_PARSERS = {
      + 469        exp.Cluster: lambda self: self._parse_sort(exp.Cluster, "CLUSTER", "BY"),
      + 470        exp.Column: lambda self: self._parse_column(),
      + 471        exp.Condition: lambda self: self._parse_conjunction(),
      + 472        exp.DataType: lambda self: self._parse_types(),
      + 473        exp.Expression: lambda self: self._parse_statement(),
      + 474        exp.From: lambda self: self._parse_from(),
      + 475        exp.Group: lambda self: self._parse_group(),
      + 476        exp.Having: lambda self: self._parse_having(),
      + 477        exp.Identifier: lambda self: self._parse_id_var(),
      + 478        exp.Join: lambda self: self._parse_join(),
      + 479        exp.Lambda: lambda self: self._parse_lambda(),
      + 480        exp.Lateral: lambda self: self._parse_lateral(),
      + 481        exp.Limit: lambda self: self._parse_limit(),
      + 482        exp.Offset: lambda self: self._parse_offset(),
      + 483        exp.Order: lambda self: self._parse_order(),
      + 484        exp.Ordered: lambda self: self._parse_ordered(),
      + 485        exp.Properties: lambda self: self._parse_properties(),
      + 486        exp.Qualify: lambda self: self._parse_qualify(),
      + 487        exp.Returning: lambda self: self._parse_returning(),
      + 488        exp.Sort: lambda self: self._parse_sort(exp.Sort, "SORT", "BY"),
      + 489        exp.Table: lambda self: self._parse_table_parts(),
      + 490        exp.TableAlias: lambda self: self._parse_table_alias(),
      + 491        exp.Where: lambda self: self._parse_where(),
      + 492        exp.Window: lambda self: self._parse_named_window(),
      + 493        exp.With: lambda self: self._parse_with(),
      + 494        "JOIN_TYPE": lambda self: self._parse_join_parts(),
      + 495    }
      + 496
      + 497    STATEMENT_PARSERS = {
      + 498        TokenType.ALTER: lambda self: self._parse_alter(),
      + 499        TokenType.BEGIN: lambda self: self._parse_transaction(),
      + 500        TokenType.CACHE: lambda self: self._parse_cache(),
      + 501        TokenType.COMMIT: lambda self: self._parse_commit_or_rollback(),
      + 502        TokenType.COMMENT: lambda self: self._parse_comment(),
      + 503        TokenType.CREATE: lambda self: self._parse_create(),
      + 504        TokenType.DELETE: lambda self: self._parse_delete(),
      + 505        TokenType.DESC: lambda self: self._parse_describe(),
      + 506        TokenType.DESCRIBE: lambda self: self._parse_describe(),
      + 507        TokenType.DROP: lambda self: self._parse_drop(),
      + 508        TokenType.END: lambda self: self._parse_commit_or_rollback(),
      + 509        TokenType.FROM: lambda self: exp.select("*").from_(
      + 510            t.cast(exp.From, self._parse_from(skip_from_token=True))
      + 511        ),
      + 512        TokenType.INSERT: lambda self: self._parse_insert(),
      + 513        TokenType.LOAD: lambda self: self._parse_load(),
      + 514        TokenType.MERGE: lambda self: self._parse_merge(),
      + 515        TokenType.PIVOT: lambda self: self._parse_simplified_pivot(),
      + 516        TokenType.PRAGMA: lambda self: self.expression(exp.Pragma, this=self._parse_expression()),
      + 517        TokenType.ROLLBACK: lambda self: self._parse_commit_or_rollback(),
      + 518        TokenType.SET: lambda self: self._parse_set(),
      + 519        TokenType.UNCACHE: lambda self: self._parse_uncache(),
      + 520        TokenType.UPDATE: lambda self: self._parse_update(),
      + 521        TokenType.USE: lambda self: self.expression(
      + 522            exp.Use,
      + 523            kind=self._match_texts(("ROLE", "WAREHOUSE", "DATABASE", "SCHEMA"))
      + 524            and exp.var(self._prev.text),
      + 525            this=self._parse_table(schema=False),
      + 526        ),
      + 527    }
      + 528
      + 529    UNARY_PARSERS = {
      + 530        TokenType.PLUS: lambda self: self._parse_unary(),  # Unary + is handled as a no-op
      + 531        TokenType.NOT: lambda self: self.expression(exp.Not, this=self._parse_equality()),
      + 532        TokenType.TILDA: lambda self: self.expression(exp.BitwiseNot, this=self._parse_unary()),
      + 533        TokenType.DASH: lambda self: self.expression(exp.Neg, this=self._parse_unary()),
      + 534    }
      + 535
      + 536    PRIMARY_PARSERS = {
      + 537        TokenType.STRING: lambda self, token: self.expression(
      + 538            exp.Literal, this=token.text, is_string=True
      + 539        ),
      + 540        TokenType.NUMBER: lambda self, token: self.expression(
      + 541            exp.Literal, this=token.text, is_string=False
      + 542        ),
      + 543        TokenType.STAR: lambda self, _: self.expression(
      + 544            exp.Star,
      + 545            **{"except": self._parse_except(), "replace": self._parse_replace()},
      + 546        ),
      + 547        TokenType.NULL: lambda self, _: self.expression(exp.Null),
      + 548        TokenType.TRUE: lambda self, _: self.expression(exp.Boolean, this=True),
      + 549        TokenType.FALSE: lambda self, _: self.expression(exp.Boolean, this=False),
      + 550        TokenType.BIT_STRING: lambda self, token: self.expression(exp.BitString, this=token.text),
      + 551        TokenType.HEX_STRING: lambda self, token: self.expression(exp.HexString, this=token.text),
      + 552        TokenType.BYTE_STRING: lambda self, token: self.expression(exp.ByteString, this=token.text),
      + 553        TokenType.INTRODUCER: lambda self, token: self._parse_introducer(token),
      + 554        TokenType.NATIONAL_STRING: lambda self, token: self.expression(
      + 555            exp.National, this=token.text
      + 556        ),
      + 557        TokenType.RAW_STRING: lambda self, token: self.expression(exp.RawString, this=token.text),
      + 558        TokenType.SESSION_PARAMETER: lambda self, _: self._parse_session_parameter(),
      + 559    }
      + 560
      + 561    PLACEHOLDER_PARSERS = {
      + 562        TokenType.PLACEHOLDER: lambda self: self.expression(exp.Placeholder),
      + 563        TokenType.PARAMETER: lambda self: self._parse_parameter(),
      + 564        TokenType.COLON: lambda self: self.expression(exp.Placeholder, this=self._prev.text)
      + 565        if self._match_set((TokenType.NUMBER, TokenType.VAR))
      + 566        else None,
      + 567    }
      + 568
      + 569    RANGE_PARSERS = {
      + 570        TokenType.BETWEEN: lambda self, this: self._parse_between(this),
      + 571        TokenType.GLOB: binary_range_parser(exp.Glob),
      + 572        TokenType.ILIKE: binary_range_parser(exp.ILike),
      + 573        TokenType.IN: lambda self, this: self._parse_in(this),
      + 574        TokenType.IRLIKE: binary_range_parser(exp.RegexpILike),
      + 575        TokenType.IS: lambda self, this: self._parse_is(this),
      + 576        TokenType.LIKE: binary_range_parser(exp.Like),
      + 577        TokenType.OVERLAPS: binary_range_parser(exp.Overlaps),
      + 578        TokenType.RLIKE: binary_range_parser(exp.RegexpLike),
      + 579        TokenType.SIMILAR_TO: binary_range_parser(exp.SimilarTo),
      + 580    }
      + 581
      + 582    PROPERTY_PARSERS: t.Dict[str, t.Callable] = {
      + 583        "ALGORITHM": lambda self: self._parse_property_assignment(exp.AlgorithmProperty),
      + 584        "AUTO_INCREMENT": lambda self: self._parse_property_assignment(exp.AutoIncrementProperty),
      + 585        "BLOCKCOMPRESSION": lambda self: self._parse_blockcompression(),
      + 586        "CHARACTER SET": lambda self: self._parse_character_set(),
      + 587        "CHECKSUM": lambda self: self._parse_checksum(),
      + 588        "CLUSTER": lambda self: self._parse_cluster(),
      + 589        "COLLATE": lambda self: self._parse_property_assignment(exp.CollateProperty),
      + 590        "COMMENT": lambda self: self._parse_property_assignment(exp.SchemaCommentProperty),
      + 591        "DATABLOCKSIZE": lambda self, **kwargs: self._parse_datablocksize(**kwargs),
      + 592        "DEFINER": lambda self: self._parse_definer(),
      + 593        "DETERMINISTIC": lambda self: self.expression(
      + 594            exp.StabilityProperty, this=exp.Literal.string("IMMUTABLE")
      + 595        ),
      + 596        "DISTKEY": lambda self: self._parse_distkey(),
      + 597        "DISTSTYLE": lambda self: self._parse_property_assignment(exp.DistStyleProperty),
      + 598        "ENGINE": lambda self: self._parse_property_assignment(exp.EngineProperty),
      + 599        "EXECUTE": lambda self: self._parse_property_assignment(exp.ExecuteAsProperty),
      + 600        "EXTERNAL": lambda self: self.expression(exp.ExternalProperty),
      + 601        "FALLBACK": lambda self, **kwargs: self._parse_fallback(**kwargs),
      + 602        "FORMAT": lambda self: self._parse_property_assignment(exp.FileFormatProperty),
      + 603        "FREESPACE": lambda self: self._parse_freespace(),
      + 604        "IMMUTABLE": lambda self: self.expression(
      + 605            exp.StabilityProperty, this=exp.Literal.string("IMMUTABLE")
      + 606        ),
      + 607        "JOURNAL": lambda self, **kwargs: self._parse_journal(**kwargs),
      + 608        "LANGUAGE": lambda self: self._parse_property_assignment(exp.LanguageProperty),
      + 609        "LAYOUT": lambda self: self._parse_dict_property(this="LAYOUT"),
      + 610        "LIFETIME": lambda self: self._parse_dict_range(this="LIFETIME"),
      + 611        "LIKE": lambda self: self._parse_create_like(),
      + 612        "LOCATION": lambda self: self._parse_property_assignment(exp.LocationProperty),
      + 613        "LOCK": lambda self: self._parse_locking(),
      + 614        "LOCKING": lambda self: self._parse_locking(),
      + 615        "LOG": lambda self, **kwargs: self._parse_log(**kwargs),
      + 616        "MATERIALIZED": lambda self: self.expression(exp.MaterializedProperty),
      + 617        "MERGEBLOCKRATIO": lambda self, **kwargs: self._parse_mergeblockratio(**kwargs),
      + 618        "MULTISET": lambda self: self.expression(exp.SetProperty, multi=True),
      + 619        "NO": lambda self: self._parse_no_property(),
      + 620        "ON": lambda self: self._parse_on_property(),
      + 621        "ORDER BY": lambda self: self._parse_order(skip_order_token=True),
      + 622        "PARTITION BY": lambda self: self._parse_partitioned_by(),
      + 623        "PARTITIONED BY": lambda self: self._parse_partitioned_by(),
      + 624        "PARTITIONED_BY": lambda self: self._parse_partitioned_by(),
      + 625        "PRIMARY KEY": lambda self: self._parse_primary_key(in_props=True),
      + 626        "RANGE": lambda self: self._parse_dict_range(this="RANGE"),
      + 627        "RETURNS": lambda self: self._parse_returns(),
      + 628        "ROW": lambda self: self._parse_row(),
      + 629        "ROW_FORMAT": lambda self: self._parse_property_assignment(exp.RowFormatProperty),
      + 630        "SET": lambda self: self.expression(exp.SetProperty, multi=False),
      + 631        "SETTINGS": lambda self: self.expression(
      + 632            exp.SettingsProperty, expressions=self._parse_csv(self._parse_set_item)
        633        ),
      - 634        "STORED": lambda self: self._parse_stored(),
      - 635        "TBLPROPERTIES": lambda self: self._parse_wrapped_csv(self._parse_property),
      - 636        "TEMP": lambda self: self.expression(exp.TemporaryProperty),
      - 637        "TEMPORARY": lambda self: self.expression(exp.TemporaryProperty),
      - 638        "TRANSIENT": lambda self: self.expression(exp.TransientProperty),
      - 639        "TTL": lambda self: self._parse_ttl(),
      - 640        "USING": lambda self: self._parse_property_assignment(exp.FileFormatProperty),
      - 641        "VOLATILE": lambda self: self._parse_volatile_property(),
      - 642        "WITH": lambda self: self._parse_with_property(),
      - 643    }
      - 644
      - 645    CONSTRAINT_PARSERS = {
      - 646        "AUTOINCREMENT": lambda self: self._parse_auto_increment(),
      - 647        "AUTO_INCREMENT": lambda self: self._parse_auto_increment(),
      - 648        "CASESPECIFIC": lambda self: self.expression(exp.CaseSpecificColumnConstraint, not_=False),
      - 649        "CHARACTER SET": lambda self: self.expression(
      - 650            exp.CharacterSetColumnConstraint, this=self._parse_var_or_string()
      - 651        ),
      - 652        "CHECK": lambda self: self.expression(
      - 653            exp.CheckColumnConstraint, this=self._parse_wrapped(self._parse_conjunction)
      - 654        ),
      - 655        "COLLATE": lambda self: self.expression(
      - 656            exp.CollateColumnConstraint, this=self._parse_var()
      + 634        "SORTKEY": lambda self: self._parse_sortkey(),
      + 635        "SOURCE": lambda self: self._parse_dict_property(this="SOURCE"),
      + 636        "STABLE": lambda self: self.expression(
      + 637            exp.StabilityProperty, this=exp.Literal.string("STABLE")
      + 638        ),
      + 639        "STORED": lambda self: self._parse_stored(),
      + 640        "TBLPROPERTIES": lambda self: self._parse_wrapped_csv(self._parse_property),
      + 641        "TEMP": lambda self: self.expression(exp.TemporaryProperty),
      + 642        "TEMPORARY": lambda self: self.expression(exp.TemporaryProperty),
      + 643        "TO": lambda self: self._parse_to_table(),
      + 644        "TRANSIENT": lambda self: self.expression(exp.TransientProperty),
      + 645        "TTL": lambda self: self._parse_ttl(),
      + 646        "USING": lambda self: self._parse_property_assignment(exp.FileFormatProperty),
      + 647        "VOLATILE": lambda self: self._parse_volatile_property(),
      + 648        "WITH": lambda self: self._parse_with_property(),
      + 649    }
      + 650
      + 651    CONSTRAINT_PARSERS = {
      + 652        "AUTOINCREMENT": lambda self: self._parse_auto_increment(),
      + 653        "AUTO_INCREMENT": lambda self: self._parse_auto_increment(),
      + 654        "CASESPECIFIC": lambda self: self.expression(exp.CaseSpecificColumnConstraint, not_=False),
      + 655        "CHARACTER SET": lambda self: self.expression(
      + 656            exp.CharacterSetColumnConstraint, this=self._parse_var_or_string()
        657        ),
      - 658        "COMMENT": lambda self: self.expression(
      - 659            exp.CommentColumnConstraint, this=self._parse_string()
      + 658        "CHECK": lambda self: self.expression(
      + 659            exp.CheckColumnConstraint, this=self._parse_wrapped(self._parse_conjunction)
        660        ),
      - 661        "COMPRESS": lambda self: self._parse_compress(),
      - 662        "DEFAULT": lambda self: self.expression(
      - 663            exp.DefaultColumnConstraint, this=self._parse_bitwise()
      - 664        ),
      - 665        "ENCODE": lambda self: self.expression(exp.EncodeColumnConstraint, this=self._parse_var()),
      - 666        "FOREIGN KEY": lambda self: self._parse_foreign_key(),
      - 667        "FORMAT": lambda self: self.expression(
      - 668            exp.DateFormatColumnConstraint, this=self._parse_var_or_string()
      - 669        ),
      - 670        "GENERATED": lambda self: self._parse_generated_as_identity(),
      - 671        "IDENTITY": lambda self: self._parse_auto_increment(),
      - 672        "INLINE": lambda self: self._parse_inline(),
      - 673        "LIKE": lambda self: self._parse_create_like(),
      - 674        "NOT": lambda self: self._parse_not_constraint(),
      - 675        "NULL": lambda self: self.expression(exp.NotNullColumnConstraint, allow_null=True),
      - 676        "ON": lambda self: self._match(TokenType.UPDATE)
      - 677        and self.expression(exp.OnUpdateColumnConstraint, this=self._parse_function()),
      - 678        "PATH": lambda self: self.expression(exp.PathColumnConstraint, this=self._parse_string()),
      - 679        "PRIMARY KEY": lambda self: self._parse_primary_key(),
      - 680        "REFERENCES": lambda self: self._parse_references(match=False),
      - 681        "TITLE": lambda self: self.expression(
      - 682            exp.TitleColumnConstraint, this=self._parse_var_or_string()
      - 683        ),
      - 684        "TTL": lambda self: self.expression(exp.MergeTreeTTL, expressions=[self._parse_bitwise()]),
      - 685        "UNIQUE": lambda self: self._parse_unique(),
      - 686        "UPPERCASE": lambda self: self.expression(exp.UppercaseColumnConstraint),
      - 687    }
      - 688
      - 689    ALTER_PARSERS = {
      - 690        "ADD": lambda self: self._parse_alter_table_add(),
      - 691        "ALTER": lambda self: self._parse_alter_table_alter(),
      - 692        "DELETE": lambda self: self.expression(exp.Delete, where=self._parse_where()),
      - 693        "DROP": lambda self: self._parse_alter_table_drop(),
      - 694        "RENAME": lambda self: self._parse_alter_table_rename(),
      - 695    }
      - 696
      - 697    SCHEMA_UNNAMED_CONSTRAINTS = {"CHECK", "FOREIGN KEY", "LIKE", "PRIMARY KEY", "UNIQUE"}
      - 698
      - 699    NO_PAREN_FUNCTION_PARSERS = {
      - 700        TokenType.ANY: lambda self: self.expression(exp.Any, this=self._parse_bitwise()),
      - 701        TokenType.CASE: lambda self: self._parse_case(),
      - 702        TokenType.IF: lambda self: self._parse_if(),
      - 703        TokenType.NEXT_VALUE_FOR: lambda self: self.expression(
      - 704            exp.NextValueFor,
      - 705            this=self._parse_column(),
      - 706            order=self._match(TokenType.OVER) and self._parse_wrapped(self._parse_order),
      - 707        ),
      - 708    }
      - 709
      - 710    FUNCTIONS_WITH_ALIASED_ARGS = {"STRUCT"}
      - 711
      - 712    FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
      - 713        "CAST": lambda self: self._parse_cast(self.STRICT_CAST),
      - 714        "CONVERT": lambda self: self._parse_convert(self.STRICT_CAST),
      - 715        "DECODE": lambda self: self._parse_decode(),
      - 716        "EXTRACT": lambda self: self._parse_extract(),
      - 717        "JSON_OBJECT": lambda self: self._parse_json_object(),
      - 718        "LOG": lambda self: self._parse_logarithm(),
      - 719        "MATCH": lambda self: self._parse_match_against(),
      - 720        "OPENJSON": lambda self: self._parse_open_json(),
      - 721        "POSITION": lambda self: self._parse_position(),
      - 722        "SAFE_CAST": lambda self: self._parse_cast(False),
      - 723        "STRING_AGG": lambda self: self._parse_string_agg(),
      - 724        "SUBSTRING": lambda self: self._parse_substring(),
      - 725        "TRIM": lambda self: self._parse_trim(),
      - 726        "TRY_CAST": lambda self: self._parse_cast(False),
      - 727        "TRY_CONVERT": lambda self: self._parse_convert(False),
      - 728    }
      - 729
      - 730    QUERY_MODIFIER_PARSERS = {
      - 731        "joins": lambda self: list(iter(self._parse_join, None)),
      - 732        "laterals": lambda self: list(iter(self._parse_lateral, None)),
      - 733        "match": lambda self: self._parse_match_recognize(),
      - 734        "where": lambda self: self._parse_where(),
      - 735        "group": lambda self: self._parse_group(),
      - 736        "having": lambda self: self._parse_having(),
      - 737        "qualify": lambda self: self._parse_qualify(),
      - 738        "windows": lambda self: self._parse_window_clause(),
      - 739        "order": lambda self: self._parse_order(),
      - 740        "limit": lambda self: self._parse_limit(),
      - 741        "offset": lambda self: self._parse_offset(),
      - 742        "locks": lambda self: self._parse_locks(),
      - 743        "sample": lambda self: self._parse_table_sample(as_modifier=True),
      - 744    }
      - 745
      - 746    SET_PARSERS = {
      - 747        "GLOBAL": lambda self: self._parse_set_item_assignment("GLOBAL"),
      - 748        "LOCAL": lambda self: self._parse_set_item_assignment("LOCAL"),
      - 749        "SESSION": lambda self: self._parse_set_item_assignment("SESSION"),
      - 750        "TRANSACTION": lambda self: self._parse_set_transaction(),
      + 661        "COLLATE": lambda self: self.expression(
      + 662            exp.CollateColumnConstraint, this=self._parse_var()
      + 663        ),
      + 664        "COMMENT": lambda self: self.expression(
      + 665            exp.CommentColumnConstraint, this=self._parse_string()
      + 666        ),
      + 667        "COMPRESS": lambda self: self._parse_compress(),
      + 668        "DEFAULT": lambda self: self.expression(
      + 669            exp.DefaultColumnConstraint, this=self._parse_bitwise()
      + 670        ),
      + 671        "ENCODE": lambda self: self.expression(exp.EncodeColumnConstraint, this=self._parse_var()),
      + 672        "FOREIGN KEY": lambda self: self._parse_foreign_key(),
      + 673        "FORMAT": lambda self: self.expression(
      + 674            exp.DateFormatColumnConstraint, this=self._parse_var_or_string()
      + 675        ),
      + 676        "GENERATED": lambda self: self._parse_generated_as_identity(),
      + 677        "IDENTITY": lambda self: self._parse_auto_increment(),
      + 678        "INLINE": lambda self: self._parse_inline(),
      + 679        "LIKE": lambda self: self._parse_create_like(),
      + 680        "NOT": lambda self: self._parse_not_constraint(),
      + 681        "NULL": lambda self: self.expression(exp.NotNullColumnConstraint, allow_null=True),
      + 682        "ON": lambda self: self._match(TokenType.UPDATE)
      + 683        and self.expression(exp.OnUpdateColumnConstraint, this=self._parse_function()),
      + 684        "PATH": lambda self: self.expression(exp.PathColumnConstraint, this=self._parse_string()),
      + 685        "PRIMARY KEY": lambda self: self._parse_primary_key(),
      + 686        "REFERENCES": lambda self: self._parse_references(match=False),
      + 687        "TITLE": lambda self: self.expression(
      + 688            exp.TitleColumnConstraint, this=self._parse_var_or_string()
      + 689        ),
      + 690        "TTL": lambda self: self.expression(exp.MergeTreeTTL, expressions=[self._parse_bitwise()]),
      + 691        "UNIQUE": lambda self: self._parse_unique(),
      + 692        "UPPERCASE": lambda self: self.expression(exp.UppercaseColumnConstraint),
      + 693    }
      + 694
      + 695    ALTER_PARSERS = {
      + 696        "ADD": lambda self: self._parse_alter_table_add(),
      + 697        "ALTER": lambda self: self._parse_alter_table_alter(),
      + 698        "DELETE": lambda self: self.expression(exp.Delete, where=self._parse_where()),
      + 699        "DROP": lambda self: self._parse_alter_table_drop(),
      + 700        "RENAME": lambda self: self._parse_alter_table_rename(),
      + 701    }
      + 702
      + 703    SCHEMA_UNNAMED_CONSTRAINTS = {"CHECK", "FOREIGN KEY", "LIKE", "PRIMARY KEY", "UNIQUE"}
      + 704
      + 705    NO_PAREN_FUNCTION_PARSERS = {
      + 706        TokenType.ANY: lambda self: self.expression(exp.Any, this=self._parse_bitwise()),
      + 707        TokenType.CASE: lambda self: self._parse_case(),
      + 708        TokenType.IF: lambda self: self._parse_if(),
      + 709        TokenType.NEXT_VALUE_FOR: lambda self: self.expression(
      + 710            exp.NextValueFor,
      + 711            this=self._parse_column(),
      + 712            order=self._match(TokenType.OVER) and self._parse_wrapped(self._parse_order),
      + 713        ),
      + 714    }
      + 715
      + 716    FUNCTIONS_WITH_ALIASED_ARGS = {"STRUCT"}
      + 717
      + 718    FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
      + 719        "CAST": lambda self: self._parse_cast(self.STRICT_CAST),
      + 720        "CONCAT": lambda self: self._parse_concat(),
      + 721        "CONVERT": lambda self: self._parse_convert(self.STRICT_CAST),
      + 722        "DECODE": lambda self: self._parse_decode(),
      + 723        "EXTRACT": lambda self: self._parse_extract(),
      + 724        "JSON_OBJECT": lambda self: self._parse_json_object(),
      + 725        "LOG": lambda self: self._parse_logarithm(),
      + 726        "MATCH": lambda self: self._parse_match_against(),
      + 727        "OPENJSON": lambda self: self._parse_open_json(),
      + 728        "POSITION": lambda self: self._parse_position(),
      + 729        "SAFE_CAST": lambda self: self._parse_cast(False),
      + 730        "STRING_AGG": lambda self: self._parse_string_agg(),
      + 731        "SUBSTRING": lambda self: self._parse_substring(),
      + 732        "TRIM": lambda self: self._parse_trim(),
      + 733        "TRY_CAST": lambda self: self._parse_cast(False),
      + 734        "TRY_CONVERT": lambda self: self._parse_convert(False),
      + 735    }
      + 736
      + 737    QUERY_MODIFIER_PARSERS = {
      + 738        "joins": lambda self: list(iter(self._parse_join, None)),
      + 739        "laterals": lambda self: list(iter(self._parse_lateral, None)),
      + 740        "match": lambda self: self._parse_match_recognize(),
      + 741        "where": lambda self: self._parse_where(),
      + 742        "group": lambda self: self._parse_group(),
      + 743        "having": lambda self: self._parse_having(),
      + 744        "qualify": lambda self: self._parse_qualify(),
      + 745        "windows": lambda self: self._parse_window_clause(),
      + 746        "order": lambda self: self._parse_order(),
      + 747        "limit": lambda self: self._parse_limit(),
      + 748        "offset": lambda self: self._parse_offset(),
      + 749        "locks": lambda self: self._parse_locks(),
      + 750        "sample": lambda self: self._parse_table_sample(as_modifier=True),
        751    }
        752
      - 753    SHOW_PARSERS: t.Dict[str, t.Callable] = {}
      - 754
      - 755    TYPE_LITERAL_PARSERS: t.Dict[exp.DataType.Type, t.Callable] = {}
      - 756
      - 757    MODIFIABLES = (exp.Subquery, exp.Subqueryable, exp.Table)
      - 758
      - 759    TRANSACTION_KIND = {"DEFERRED", "IMMEDIATE", "EXCLUSIVE"}
      - 760
      - 761    TRANSACTION_CHARACTERISTICS = {
      - 762        "ISOLATION LEVEL REPEATABLE READ",
      - 763        "ISOLATION LEVEL READ COMMITTED",
      - 764        "ISOLATION LEVEL READ UNCOMMITTED",
      - 765        "ISOLATION LEVEL SERIALIZABLE",
      - 766        "READ WRITE",
      - 767        "READ ONLY",
      - 768    }
      + 753    SET_PARSERS = {
      + 754        "GLOBAL": lambda self: self._parse_set_item_assignment("GLOBAL"),
      + 755        "LOCAL": lambda self: self._parse_set_item_assignment("LOCAL"),
      + 756        "SESSION": lambda self: self._parse_set_item_assignment("SESSION"),
      + 757        "TRANSACTION": lambda self: self._parse_set_transaction(),
      + 758    }
      + 759
      + 760    SHOW_PARSERS: t.Dict[str, t.Callable] = {}
      + 761
      + 762    TYPE_LITERAL_PARSERS: t.Dict[exp.DataType.Type, t.Callable] = {}
      + 763
      + 764    MODIFIABLES = (exp.Subquery, exp.Subqueryable, exp.Table)
      + 765
      + 766    DDL_SELECT_TOKENS = {TokenType.SELECT, TokenType.WITH, TokenType.L_PAREN}
      + 767
      + 768    PRE_VOLATILE_TOKENS = {TokenType.CREATE, TokenType.REPLACE, TokenType.UNIQUE}
        769
      - 770    INSERT_ALTERNATIVES = {"ABORT", "FAIL", "IGNORE", "REPLACE", "ROLLBACK"}
      - 771
      - 772    CLONE_KINDS = {"TIMESTAMP", "OFFSET", "STATEMENT"}
      - 773
      - 774    WINDOW_ALIAS_TOKENS = ID_VAR_TOKENS - {TokenType.ROWS}
      - 775    WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER}
      - 776    WINDOW_SIDES = {"FOLLOWING", "PRECEDING"}
      - 777
      - 778    ADD_CONSTRAINT_TOKENS = {TokenType.CONSTRAINT, TokenType.PRIMARY_KEY, TokenType.FOREIGN_KEY}
      + 770    TRANSACTION_KIND = {"DEFERRED", "IMMEDIATE", "EXCLUSIVE"}
      + 771    TRANSACTION_CHARACTERISTICS = {
      + 772        "ISOLATION LEVEL REPEATABLE READ",
      + 773        "ISOLATION LEVEL READ COMMITTED",
      + 774        "ISOLATION LEVEL READ UNCOMMITTED",
      + 775        "ISOLATION LEVEL SERIALIZABLE",
      + 776        "READ WRITE",
      + 777        "READ ONLY",
      + 778    }
        779
      - 780    STRICT_CAST = True
      + 780    INSERT_ALTERNATIVES = {"ABORT", "FAIL", "IGNORE", "REPLACE", "ROLLBACK"}
        781
      - 782    CONVERT_TYPE_FIRST = False
      + 782    CLONE_KINDS = {"TIMESTAMP", "OFFSET", "STATEMENT"}
        783
      - 784    PREFIXED_PIVOT_COLUMNS = False
      - 785    IDENTIFY_PIVOT_STRINGS = False
      - 786
      - 787    LOG_BASE_FIRST = True
      - 788    LOG_DEFAULTS_TO_LN = False
      + 784    WINDOW_ALIAS_TOKENS = ID_VAR_TOKENS - {TokenType.ROWS}
      + 785    WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER}
      + 786    WINDOW_SIDES = {"FOLLOWING", "PRECEDING"}
      + 787
      + 788    ADD_CONSTRAINT_TOKENS = {TokenType.CONSTRAINT, TokenType.PRIMARY_KEY, TokenType.FOREIGN_KEY}
        789
      - 790    __slots__ = (
      - 791        "error_level",
      - 792        "error_message_context",
      - 793        "sql",
      - 794        "errors",
      - 795        "index_offset",
      - 796        "unnest_column_only",
      - 797        "alias_post_tablesample",
      - 798        "max_errors",
      - 799        "null_ordering",
      - 800        "_tokens",
      - 801        "_index",
      - 802        "_curr",
      - 803        "_next",
      - 804        "_prev",
      - 805        "_prev_comments",
      - 806        "_show_trie",
      - 807        "_set_trie",
      - 808    )
      - 809
      - 810    def __init__(
      - 811        self,
      - 812        error_level: t.Optional[ErrorLevel] = None,
      - 813        error_message_context: int = 100,
      - 814        index_offset: int = 0,
      - 815        unnest_column_only: bool = False,
      - 816        alias_post_tablesample: bool = False,
      - 817        max_errors: int = 3,
      - 818        null_ordering: t.Optional[str] = None,
      - 819    ):
      - 820        self.error_level = error_level or ErrorLevel.IMMEDIATE
      - 821        self.error_message_context = error_message_context
      - 822        self.index_offset = index_offset
      - 823        self.unnest_column_only = unnest_column_only
      - 824        self.alias_post_tablesample = alias_post_tablesample
      - 825        self.max_errors = max_errors
      - 826        self.null_ordering = null_ordering
      - 827        self.reset()
      + 790    STRICT_CAST = True
      + 791
      + 792    CONCAT_NULL_OUTPUTS_STRING = False  # A NULL arg in CONCAT yields NULL by default
      + 793
      + 794    CONVERT_TYPE_FIRST = False
      + 795
      + 796    PREFIXED_PIVOT_COLUMNS = False
      + 797    IDENTIFY_PIVOT_STRINGS = False
      + 798
      + 799    LOG_BASE_FIRST = True
      + 800    LOG_DEFAULTS_TO_LN = False
      + 801
      + 802    __slots__ = (
      + 803        "error_level",
      + 804        "error_message_context",
      + 805        "max_errors",
      + 806        "sql",
      + 807        "errors",
      + 808        "_tokens",
      + 809        "_index",
      + 810        "_curr",
      + 811        "_next",
      + 812        "_prev",
      + 813        "_prev_comments",
      + 814    )
      + 815
      + 816    # Autofilled
      + 817    INDEX_OFFSET: int = 0
      + 818    UNNEST_COLUMN_ONLY: bool = False
      + 819    ALIAS_POST_TABLESAMPLE: bool = False
      + 820    STRICT_STRING_CONCAT = False
      + 821    NULL_ORDERING: str = "nulls_are_small"
      + 822    SHOW_TRIE: t.Dict = {}
      + 823    SET_TRIE: t.Dict = {}
      + 824    FORMAT_MAPPING: t.Dict[str, str] = {}
      + 825    FORMAT_TRIE: t.Dict = {}
      + 826    TIME_MAPPING: t.Dict[str, str] = {}
      + 827    TIME_TRIE: t.Dict = {}
        828
      - 829    def reset(self):
      - 830        self.sql = ""
      - 831        self.errors = []
      - 832        self._tokens = []
      - 833        self._index = 0
      - 834        self._curr = None
      - 835        self._next = None
      - 836        self._prev = None
      - 837        self._prev_comments = None
      - 838
      - 839    def parse(
      - 840        self, raw_tokens: t.List[Token], sql: t.Optional[str] = None
      - 841    ) -> t.List[t.Optional[exp.Expression]]:
      - 842        """
      - 843        Parses a list of tokens and returns a list of syntax trees, one tree
      - 844        per parsed SQL statement.
      - 845
      - 846        Args:
      - 847            raw_tokens: the list of tokens.
      - 848            sql: the original SQL string, used to produce helpful debug messages.
      + 829    def __init__(
      + 830        self,
      + 831        error_level: t.Optional[ErrorLevel] = None,
      + 832        error_message_context: int = 100,
      + 833        max_errors: int = 3,
      + 834    ):
      + 835        self.error_level = error_level or ErrorLevel.IMMEDIATE
      + 836        self.error_message_context = error_message_context
      + 837        self.max_errors = max_errors
      + 838        self.reset()
      + 839
      + 840    def reset(self):
      + 841        self.sql = ""
      + 842        self.errors = []
      + 843        self._tokens = []
      + 844        self._index = 0
      + 845        self._curr = None
      + 846        self._next = None
      + 847        self._prev = None
      + 848        self._prev_comments = None
        849
      - 850        Returns:
      - 851            The list of syntax trees.
      - 852        """
      - 853        return self._parse(
      - 854            parse_method=self.__class__._parse_statement, raw_tokens=raw_tokens, sql=sql
      - 855        )
      + 850    def parse(
      + 851        self, raw_tokens: t.List[Token], sql: t.Optional[str] = None
      + 852    ) -> t.List[t.Optional[exp.Expression]]:
      + 853        """
      + 854        Parses a list of tokens and returns a list of syntax trees, one tree
      + 855        per parsed SQL statement.
        856
      - 857    def parse_into(
      - 858        self,
      - 859        expression_types: exp.IntoType,
      - 860        raw_tokens: t.List[Token],
      - 861        sql: t.Optional[str] = None,
      - 862    ) -> t.List[t.Optional[exp.Expression]]:
      - 863        """
      - 864        Parses a list of tokens into a given Expression type. If a collection of Expression
      - 865        types is given instead, this method will try to parse the token list into each one
      - 866        of them, stopping at the first for which the parsing succeeds.
      + 857        Args:
      + 858            raw_tokens: The list of tokens.
      + 859            sql: The original SQL string, used to produce helpful debug messages.
      + 860
      + 861        Returns:
      + 862            The list of the produced syntax trees.
      + 863        """
      + 864        return self._parse(
      + 865            parse_method=self.__class__._parse_statement, raw_tokens=raw_tokens, sql=sql
      + 866        )
        867
      - 868        Args:
      - 869            expression_types: the expression type(s) to try and parse the token list into.
      - 870            raw_tokens: the list of tokens.
      - 871            sql: the original SQL string, used to produce helpful debug messages.
      - 872
      - 873        Returns:
      - 874            The target Expression.
      - 875        """
      - 876        errors = []
      - 877        for expression_type in ensure_collection(expression_types):
      - 878            parser = self.EXPRESSION_PARSERS.get(expression_type)
      - 879            if not parser:
      - 880                raise TypeError(f"No parser registered for {expression_type}")
      - 881            try:
      - 882                return self._parse(parser, raw_tokens, sql)
      - 883            except ParseError as e:
      - 884                e.errors[0]["into_expression"] = expression_type
      - 885                errors.append(e)
      - 886        raise ParseError(
      - 887            f"Failed to parse '{sql or raw_tokens}' into {expression_types}",
      - 888            errors=merge_errors(errors),
      - 889        ) from errors[-1]
      - 890
      - 891    def _parse(
      - 892        self,
      - 893        parse_method: t.Callable[[Parser], t.Optional[exp.Expression]],
      - 894        raw_tokens: t.List[Token],
      - 895        sql: t.Optional[str] = None,
      - 896    ) -> t.List[t.Optional[exp.Expression]]:
      - 897        self.reset()
      - 898        self.sql = sql or ""
      - 899        total = len(raw_tokens)
      - 900        chunks: t.List[t.List[Token]] = [[]]
      - 901
      - 902        for i, token in enumerate(raw_tokens):
      - 903            if token.token_type == TokenType.SEMICOLON:
      - 904                if i < total - 1:
      - 905                    chunks.append([])
      - 906            else:
      - 907                chunks[-1].append(token)
      - 908
      - 909        expressions = []
      - 910
      - 911        for tokens in chunks:
      - 912            self._index = -1
      - 913            self._tokens = tokens
      - 914            self._advance()
      + 868    def parse_into(
      + 869        self,
      + 870        expression_types: exp.IntoType,
      + 871        raw_tokens: t.List[Token],
      + 872        sql: t.Optional[str] = None,
      + 873    ) -> t.List[t.Optional[exp.Expression]]:
      + 874        """
      + 875        Parses a list of tokens into a given Expression type. If a collection of Expression
      + 876        types is given instead, this method will try to parse the token list into each one
      + 877        of them, stopping at the first for which the parsing succeeds.
      + 878
      + 879        Args:
      + 880            expression_types: The expression type(s) to try and parse the token list into.
      + 881            raw_tokens: The list of tokens.
      + 882            sql: The original SQL string, used to produce helpful debug messages.
      + 883
      + 884        Returns:
      + 885            The target Expression.
      + 886        """
      + 887        errors = []
      + 888        for expression_type in ensure_list(expression_types):
      + 889            parser = self.EXPRESSION_PARSERS.get(expression_type)
      + 890            if not parser:
      + 891                raise TypeError(f"No parser registered for {expression_type}")
      + 892
      + 893            try:
      + 894                return self._parse(parser, raw_tokens, sql)
      + 895            except ParseError as e:
      + 896                e.errors[0]["into_expression"] = expression_type
      + 897                errors.append(e)
      + 898
      + 899        raise ParseError(
      + 900            f"Failed to parse '{sql or raw_tokens}' into {expression_types}",
      + 901            errors=merge_errors(errors),
      + 902        ) from errors[-1]
      + 903
      + 904    def _parse(
      + 905        self,
      + 906        parse_method: t.Callable[[Parser], t.Optional[exp.Expression]],
      + 907        raw_tokens: t.List[Token],
      + 908        sql: t.Optional[str] = None,
      + 909    ) -> t.List[t.Optional[exp.Expression]]:
      + 910        self.reset()
      + 911        self.sql = sql or ""
      + 912
      + 913        total = len(raw_tokens)
      + 914        chunks: t.List[t.List[Token]] = [[]]
        915
      - 916            expressions.append(parse_method(self))
      - 917
      - 918            if self._index < len(self._tokens):
      - 919                self.raise_error("Invalid expression / Unexpected token")
      - 920
      - 921            self.check_errors()
      + 916        for i, token in enumerate(raw_tokens):
      + 917            if token.token_type == TokenType.SEMICOLON:
      + 918                if i < total - 1:
      + 919                    chunks.append([])
      + 920            else:
      + 921                chunks[-1].append(token)
        922
      - 923        return expressions
      + 923        expressions = []
        924
      - 925    def check_errors(self) -> None:
      - 926        """
      - 927        Logs or raises any found errors, depending on the chosen error level setting.
      - 928        """
      - 929        if self.error_level == ErrorLevel.WARN:
      - 930            for error in self.errors:
      - 931                logger.error(str(error))
      - 932        elif self.error_level == ErrorLevel.RAISE and self.errors:
      - 933            raise ParseError(
      - 934                concat_messages(self.errors, self.max_errors),
      - 935                errors=merge_errors(self.errors),
      - 936            )
      - 937
      - 938    def raise_error(self, message: str, token: t.Optional[Token] = None) -> None:
      - 939        """
      - 940        Appends an error in the list of recorded errors or raises it, depending on the chosen
      - 941        error level setting.
      - 942        """
      - 943        token = token or self._curr or self._prev or Token.string("")
      - 944        start = token.start
      - 945        end = token.end + 1
      - 946        start_context = self.sql[max(start - self.error_message_context, 0) : start]
      - 947        highlight = self.sql[start:end]
      - 948        end_context = self.sql[end : end + self.error_message_context]
      + 925        for tokens in chunks:
      + 926            self._index = -1
      + 927            self._tokens = tokens
      + 928            self._advance()
      + 929
      + 930            expressions.append(parse_method(self))
      + 931
      + 932            if self._index < len(self._tokens):
      + 933                self.raise_error("Invalid expression / Unexpected token")
      + 934
      + 935            self.check_errors()
      + 936
      + 937        return expressions
      + 938
      + 939    def check_errors(self) -> None:
      + 940        """Logs or raises any found errors, depending on the chosen error level setting."""
      + 941        if self.error_level == ErrorLevel.WARN:
      + 942            for error in self.errors:
      + 943                logger.error(str(error))
      + 944        elif self.error_level == ErrorLevel.RAISE and self.errors:
      + 945            raise ParseError(
      + 946                concat_messages(self.errors, self.max_errors),
      + 947                errors=merge_errors(self.errors),
      + 948            )
        949
      - 950        error = ParseError.new(
      - 951            f"{message}. Line {token.line}, Col: {token.col}.\n"
      - 952            f"  {start_context}\033[4m{highlight}\033[0m{end_context}",
      - 953            description=message,
      - 954            line=token.line,
      - 955            col=token.col,
      - 956            start_context=start_context,
      - 957            highlight=highlight,
      - 958            end_context=end_context,
      - 959        )
      - 960
      - 961        if self.error_level == ErrorLevel.IMMEDIATE:
      - 962            raise error
      - 963
      - 964        self.errors.append(error)
      - 965
      - 966    def expression(
      - 967        self, exp_class: t.Type[E], comments: t.Optional[t.List[str]] = None, **kwargs
      - 968    ) -> E:
      - 969        """
      - 970        Creates a new, validated Expression.
      - 971
      - 972        Args:
      - 973            exp_class: the expression class to instantiate.
      - 974            comments: an optional list of comments to attach to the expression.
      - 975            kwargs: the arguments to set for the expression along with their respective values.
      - 976
      - 977        Returns:
      - 978            The target expression.
      - 979        """
      - 980        instance = exp_class(**kwargs)
      - 981        instance.add_comments(comments) if comments else self._add_comments(instance)
      - 982        self.validate_expression(instance)
      - 983        return instance
      - 984
      - 985    def _add_comments(self, expression: t.Optional[exp.Expression]) -> None:
      - 986        if expression and self._prev_comments:
      - 987            expression.add_comments(self._prev_comments)
      - 988            self._prev_comments = None
      - 989
      - 990    def validate_expression(
      - 991        self, expression: exp.Expression, args: t.Optional[t.List] = None
      - 992    ) -> None:
      - 993        """
      - 994        Validates an already instantiated expression, making sure that all its mandatory arguments
      - 995        are set.
      - 996
      - 997        Args:
      - 998            expression: the expression to validate.
      - 999            args: an optional list of items that was used to instantiate the expression, if it's a Func.
      -1000        """
      -1001        if self.error_level == ErrorLevel.IGNORE:
      -1002            return
      -1003
      -1004        for error_message in expression.error_messages(args):
      -1005            self.raise_error(error_message)
      -1006
      -1007    def _find_sql(self, start: Token, end: Token) -> str:
      -1008        return self.sql[start.start : end.end + 1]
      -1009
      -1010    def _advance(self, times: int = 1) -> None:
      -1011        self._index += times
      -1012        self._curr = seq_get(self._tokens, self._index)
      -1013        self._next = seq_get(self._tokens, self._index + 1)
      -1014        if self._index > 0:
      -1015            self._prev = self._tokens[self._index - 1]
      -1016            self._prev_comments = self._prev.comments
      -1017        else:
      -1018            self._prev = None
      -1019            self._prev_comments = None
      + 950    def raise_error(self, message: str, token: t.Optional[Token] = None) -> None:
      + 951        """
      + 952        Appends an error in the list of recorded errors or raises it, depending on the chosen
      + 953        error level setting.
      + 954        """
      + 955        token = token or self._curr or self._prev or Token.string("")
      + 956        start = token.start
      + 957        end = token.end + 1
      + 958        start_context = self.sql[max(start - self.error_message_context, 0) : start]
      + 959        highlight = self.sql[start:end]
      + 960        end_context = self.sql[end : end + self.error_message_context]
      + 961
      + 962        error = ParseError.new(
      + 963            f"{message}. Line {token.line}, Col: {token.col}.\n"
      + 964            f"  {start_context}\033[4m{highlight}\033[0m{end_context}",
      + 965            description=message,
      + 966            line=token.line,
      + 967            col=token.col,
      + 968            start_context=start_context,
      + 969            highlight=highlight,
      + 970            end_context=end_context,
      + 971        )
      + 972
      + 973        if self.error_level == ErrorLevel.IMMEDIATE:
      + 974            raise error
      + 975
      + 976        self.errors.append(error)
      + 977
      + 978    def expression(
      + 979        self, exp_class: t.Type[E], comments: t.Optional[t.List[str]] = None, **kwargs
      + 980    ) -> E:
      + 981        """
      + 982        Creates a new, validated Expression.
      + 983
      + 984        Args:
      + 985            exp_class: The expression class to instantiate.
      + 986            comments: An optional list of comments to attach to the expression.
      + 987            kwargs: The arguments to set for the expression along with their respective values.
      + 988
      + 989        Returns:
      + 990            The target expression.
      + 991        """
      + 992        instance = exp_class(**kwargs)
      + 993        instance.add_comments(comments) if comments else self._add_comments(instance)
      + 994        return self.validate_expression(instance)
      + 995
      + 996    def _add_comments(self, expression: t.Optional[exp.Expression]) -> None:
      + 997        if expression and self._prev_comments:
      + 998            expression.add_comments(self._prev_comments)
      + 999            self._prev_comments = None
      +1000
      +1001    def validate_expression(self, expression: E, args: t.Optional[t.List] = None) -> E:
      +1002        """
      +1003        Validates an Expression, making sure that all its mandatory arguments are set.
      +1004
      +1005        Args:
      +1006            expression: The expression to validate.
      +1007            args: An optional list of items that was used to instantiate the expression, if it's a Func.
      +1008
      +1009        Returns:
      +1010            The validated expression.
      +1011        """
      +1012        if self.error_level != ErrorLevel.IGNORE:
      +1013            for error_message in expression.error_messages(args):
      +1014                self.raise_error(error_message)
      +1015
      +1016        return expression
      +1017
      +1018    def _find_sql(self, start: Token, end: Token) -> str:
      +1019        return self.sql[start.start : end.end + 1]
       1020
      -1021    def _retreat(self, index: int) -> None:
      -1022        if index != self._index:
      -1023            self._advance(index - self._index)
      -1024
      -1025    def _parse_command(self) -> exp.Command:
      -1026        return self.expression(exp.Command, this=self._prev.text, expression=self._parse_string())
      -1027
      -1028    def _parse_comment(self, allow_exists: bool = True) -> exp.Expression:
      -1029        start = self._prev
      -1030        exists = self._parse_exists() if allow_exists else None
      -1031
      -1032        self._match(TokenType.ON)
      -1033
      -1034        kind = self._match_set(self.CREATABLES) and self._prev
      -1035
      -1036        if not kind:
      -1037            return self._parse_as_command(start)
      -1038
      -1039        if kind.token_type in (TokenType.FUNCTION, TokenType.PROCEDURE):
      -1040            this = self._parse_user_defined_function(kind=kind.token_type)
      -1041        elif kind.token_type == TokenType.TABLE:
      -1042            this = self._parse_table(alias_tokens=self.COMMENT_TABLE_ALIAS_TOKENS)
      -1043        elif kind.token_type == TokenType.COLUMN:
      -1044            this = self._parse_column()
      -1045        else:
      -1046            this = self._parse_id_var()
      -1047
      -1048        self._match(TokenType.IS)
      +1021    def _advance(self, times: int = 1) -> None:
      +1022        self._index += times
      +1023        self._curr = seq_get(self._tokens, self._index)
      +1024        self._next = seq_get(self._tokens, self._index + 1)
      +1025
      +1026        if self._index > 0:
      +1027            self._prev = self._tokens[self._index - 1]
      +1028            self._prev_comments = self._prev.comments
      +1029        else:
      +1030            self._prev = None
      +1031            self._prev_comments = None
      +1032
      +1033    def _retreat(self, index: int) -> None:
      +1034        if index != self._index:
      +1035            self._advance(index - self._index)
      +1036
      +1037    def _parse_command(self) -> exp.Command:
      +1038        return self.expression(exp.Command, this=self._prev.text, expression=self._parse_string())
      +1039
      +1040    def _parse_comment(self, allow_exists: bool = True) -> exp.Expression:
      +1041        start = self._prev
      +1042        exists = self._parse_exists() if allow_exists else None
      +1043
      +1044        self._match(TokenType.ON)
      +1045
      +1046        kind = self._match_set(self.CREATABLES) and self._prev
      +1047        if not kind:
      +1048            return self._parse_as_command(start)
       1049
      -1050        return self.expression(
      -1051            exp.Comment, this=this, kind=kind.text, expression=self._parse_string(), exists=exists
      -1052        )
      -1053
      -1054    # https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree#mergetree-table-ttl
      -1055    def _parse_ttl(self) -> exp.Expression:
      -1056        def _parse_ttl_action() -> t.Optional[exp.Expression]:
      -1057            this = self._parse_bitwise()
      +1050        if kind.token_type in (TokenType.FUNCTION, TokenType.PROCEDURE):
      +1051            this = self._parse_user_defined_function(kind=kind.token_type)
      +1052        elif kind.token_type == TokenType.TABLE:
      +1053            this = self._parse_table(alias_tokens=self.COMMENT_TABLE_ALIAS_TOKENS)
      +1054        elif kind.token_type == TokenType.COLUMN:
      +1055            this = self._parse_column()
      +1056        else:
      +1057            this = self._parse_id_var()
       1058
      -1059            if self._match_text_seq("DELETE"):
      -1060                return self.expression(exp.MergeTreeTTLAction, this=this, delete=True)
      -1061            if self._match_text_seq("RECOMPRESS"):
      -1062                return self.expression(
      -1063                    exp.MergeTreeTTLAction, this=this, recompress=self._parse_bitwise()
      -1064                )
      -1065            if self._match_text_seq("TO", "DISK"):
      -1066                return self.expression(
      -1067                    exp.MergeTreeTTLAction, this=this, to_disk=self._parse_string()
      -1068                )
      -1069            if self._match_text_seq("TO", "VOLUME"):
      -1070                return self.expression(
      -1071                    exp.MergeTreeTTLAction, this=this, to_volume=self._parse_string()
      -1072                )
      -1073
      -1074            return this
      +1059        self._match(TokenType.IS)
      +1060
      +1061        return self.expression(
      +1062            exp.Comment, this=this, kind=kind.text, expression=self._parse_string(), exists=exists
      +1063        )
      +1064
      +1065    def _parse_to_table(
      +1066        self,
      +1067    ) -> exp.ToTableProperty:
      +1068        table = self._parse_table_parts(schema=True)
      +1069        return self.expression(exp.ToTableProperty, this=table)
      +1070
      +1071    # https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree#mergetree-table-ttl
      +1072    def _parse_ttl(self) -> exp.Expression:
      +1073        def _parse_ttl_action() -> t.Optional[exp.Expression]:
      +1074            this = self._parse_bitwise()
       1075
      -1076        expressions = self._parse_csv(_parse_ttl_action)
      -1077        where = self._parse_where()
      -1078        group = self._parse_group()
      -1079
      -1080        aggregates = None
      -1081        if group and self._match(TokenType.SET):
      -1082            aggregates = self._parse_csv(self._parse_set_item)
      -1083
      -1084        return self.expression(
      -1085            exp.MergeTreeTTL,
      -1086            expressions=expressions,
      -1087            where=where,
      -1088            group=group,
      -1089            aggregates=aggregates,
      -1090        )
      -1091
      -1092    def _parse_statement(self) -> t.Optional[exp.Expression]:
      -1093        if self._curr is None:
      -1094            return None
      -1095
      -1096        if self._match_set(self.STATEMENT_PARSERS):
      -1097            return self.STATEMENT_PARSERS[self._prev.token_type](self)
      -1098
      -1099        if self._match_set(Tokenizer.COMMANDS):
      -1100            return self._parse_command()
      -1101
      -1102        expression = self._parse_expression()
      -1103        expression = self._parse_set_operations(expression) if expression else self._parse_select()
      -1104        return self._parse_query_modifiers(expression)
      -1105
      -1106    def _parse_drop(self) -> t.Optional[exp.Drop | exp.Command]:
      -1107        start = self._prev
      -1108        temporary = self._match(TokenType.TEMPORARY)
      -1109        materialized = self._match_text_seq("MATERIALIZED")
      -1110        kind = self._match_set(self.CREATABLES) and self._prev.text
      -1111        if not kind:
      -1112            return self._parse_as_command(start)
      -1113
      -1114        return self.expression(
      -1115            exp.Drop,
      -1116            exists=self._parse_exists(),
      -1117            this=self._parse_table(schema=True),
      -1118            kind=kind,
      -1119            temporary=temporary,
      -1120            materialized=materialized,
      -1121            cascade=self._match_text_seq("CASCADE"),
      -1122            constraints=self._match_text_seq("CONSTRAINTS"),
      -1123            purge=self._match_text_seq("PURGE"),
      -1124        )
      -1125
      -1126    def _parse_exists(self, not_: bool = False) -> t.Optional[bool]:
      -1127        return (
      -1128            self._match(TokenType.IF)
      -1129            and (not not_ or self._match(TokenType.NOT))
      -1130            and self._match(TokenType.EXISTS)
      -1131        )
      -1132
      -1133    def _parse_create(self) -> t.Optional[exp.Expression]:
      -1134        start = self._prev
      -1135        replace = self._prev.text.upper() == "REPLACE" or self._match_pair(
      -1136            TokenType.OR, TokenType.REPLACE
      -1137        )
      -1138        unique = self._match(TokenType.UNIQUE)
      -1139
      -1140        if self._match_pair(TokenType.TABLE, TokenType.FUNCTION, advance=False):
      -1141            self._match(TokenType.TABLE)
      -1142
      -1143        properties = None
      -1144        create_token = self._match_set(self.CREATABLES) and self._prev
      -1145
      -1146        if not create_token:
      -1147            properties = self._parse_properties()  # exp.Properties.Location.POST_CREATE
      -1148            create_token = self._match_set(self.CREATABLES) and self._prev
      -1149
      -1150            if not properties or not create_token:
      -1151                return self._parse_as_command(start)
      -1152
      -1153        exists = self._parse_exists(not_=True)
      -1154        this = None
      -1155        expression = None
      -1156        indexes = None
      -1157        no_schema_binding = None
      -1158        begin = None
      -1159        clone = None
      -1160
      -1161        if create_token.token_type in (TokenType.FUNCTION, TokenType.PROCEDURE):
      -1162            this = self._parse_user_defined_function(kind=create_token.token_type)
      -1163            temp_properties = self._parse_properties()
      -1164            if properties and temp_properties:
      -1165                properties.expressions.extend(temp_properties.expressions)
      -1166            elif temp_properties:
      -1167                properties = temp_properties
      -1168
      -1169            self._match(TokenType.ALIAS)
      -1170            begin = self._match(TokenType.BEGIN)
      -1171            return_ = self._match_text_seq("RETURN")
      -1172            expression = self._parse_statement()
      -1173
      -1174            if return_:
      -1175                expression = self.expression(exp.Return, this=expression)
      -1176        elif create_token.token_type == TokenType.INDEX:
      -1177            this = self._parse_index(index=self._parse_id_var())
      -1178        elif create_token.token_type in self.DB_CREATABLES:
      -1179            table_parts = self._parse_table_parts(schema=True)
      +1076            if self._match_text_seq("DELETE"):
      +1077                return self.expression(exp.MergeTreeTTLAction, this=this, delete=True)
      +1078            if self._match_text_seq("RECOMPRESS"):
      +1079                return self.expression(
      +1080                    exp.MergeTreeTTLAction, this=this, recompress=self._parse_bitwise()
      +1081                )
      +1082            if self._match_text_seq("TO", "DISK"):
      +1083                return self.expression(
      +1084                    exp.MergeTreeTTLAction, this=this, to_disk=self._parse_string()
      +1085                )
      +1086            if self._match_text_seq("TO", "VOLUME"):
      +1087                return self.expression(
      +1088                    exp.MergeTreeTTLAction, this=this, to_volume=self._parse_string()
      +1089                )
      +1090
      +1091            return this
      +1092
      +1093        expressions = self._parse_csv(_parse_ttl_action)
      +1094        where = self._parse_where()
      +1095        group = self._parse_group()
      +1096
      +1097        aggregates = None
      +1098        if group and self._match(TokenType.SET):
      +1099            aggregates = self._parse_csv(self._parse_set_item)
      +1100
      +1101        return self.expression(
      +1102            exp.MergeTreeTTL,
      +1103            expressions=expressions,
      +1104            where=where,
      +1105            group=group,
      +1106            aggregates=aggregates,
      +1107        )
      +1108
      +1109    def _parse_statement(self) -> t.Optional[exp.Expression]:
      +1110        if self._curr is None:
      +1111            return None
      +1112
      +1113        if self._match_set(self.STATEMENT_PARSERS):
      +1114            return self.STATEMENT_PARSERS[self._prev.token_type](self)
      +1115
      +1116        if self._match_set(Tokenizer.COMMANDS):
      +1117            return self._parse_command()
      +1118
      +1119        expression = self._parse_expression()
      +1120        expression = self._parse_set_operations(expression) if expression else self._parse_select()
      +1121        return self._parse_query_modifiers(expression)
      +1122
      +1123    def _parse_drop(self) -> exp.Drop | exp.Command:
      +1124        start = self._prev
      +1125        temporary = self._match(TokenType.TEMPORARY)
      +1126        materialized = self._match_text_seq("MATERIALIZED")
      +1127
      +1128        kind = self._match_set(self.CREATABLES) and self._prev.text
      +1129        if not kind:
      +1130            return self._parse_as_command(start)
      +1131
      +1132        return self.expression(
      +1133            exp.Drop,
      +1134            exists=self._parse_exists(),
      +1135            this=self._parse_table(schema=True),
      +1136            kind=kind,
      +1137            temporary=temporary,
      +1138            materialized=materialized,
      +1139            cascade=self._match_text_seq("CASCADE"),
      +1140            constraints=self._match_text_seq("CONSTRAINTS"),
      +1141            purge=self._match_text_seq("PURGE"),
      +1142        )
      +1143
      +1144    def _parse_exists(self, not_: bool = False) -> t.Optional[bool]:
      +1145        return (
      +1146            self._match(TokenType.IF)
      +1147            and (not not_ or self._match(TokenType.NOT))
      +1148            and self._match(TokenType.EXISTS)
      +1149        )
      +1150
      +1151    def _parse_create(self) -> exp.Create | exp.Command:
      +1152        # Note: this can't be None because we've matched a statement parser
      +1153        start = self._prev
      +1154        replace = start.text.upper() == "REPLACE" or self._match_pair(
      +1155            TokenType.OR, TokenType.REPLACE
      +1156        )
      +1157        unique = self._match(TokenType.UNIQUE)
      +1158
      +1159        if self._match_pair(TokenType.TABLE, TokenType.FUNCTION, advance=False):
      +1160            self._advance()
      +1161
      +1162        properties = None
      +1163        create_token = self._match_set(self.CREATABLES) and self._prev
      +1164
      +1165        if not create_token:
      +1166            # exp.Properties.Location.POST_CREATE
      +1167            properties = self._parse_properties()
      +1168            create_token = self._match_set(self.CREATABLES) and self._prev
      +1169
      +1170            if not properties or not create_token:
      +1171                return self._parse_as_command(start)
      +1172
      +1173        exists = self._parse_exists(not_=True)
      +1174        this = None
      +1175        expression = None
      +1176        indexes = None
      +1177        no_schema_binding = None
      +1178        begin = None
      +1179        clone = None
       1180
      -1181            # exp.Properties.Location.POST_NAME
      -1182            if self._match(TokenType.COMMA):
      -1183                temp_properties = self._parse_properties(before=True)
      -1184                if properties and temp_properties:
      -1185                    properties.expressions.extend(temp_properties.expressions)
      -1186                elif temp_properties:
      -1187                    properties = temp_properties
      -1188
      -1189            this = self._parse_schema(this=table_parts)
      +1181        def extend_props(temp_props: t.Optional[exp.Properties]) -> None:
      +1182            nonlocal properties
      +1183            if properties and temp_props:
      +1184                properties.expressions.extend(temp_props.expressions)
      +1185            elif temp_props:
      +1186                properties = temp_props
      +1187
      +1188        if create_token.token_type in (TokenType.FUNCTION, TokenType.PROCEDURE):
      +1189            this = self._parse_user_defined_function(kind=create_token.token_type)
       1190
      -1191            # exp.Properties.Location.POST_SCHEMA and POST_WITH
      -1192            temp_properties = self._parse_properties()
      -1193            if properties and temp_properties:
      -1194                properties.expressions.extend(temp_properties.expressions)
      -1195            elif temp_properties:
      -1196                properties = temp_properties
      -1197
      -1198            self._match(TokenType.ALIAS)
      -1199
      -1200            # exp.Properties.Location.POST_ALIAS
      -1201            if not (
      -1202                self._match(TokenType.SELECT, advance=False)
      -1203                or self._match(TokenType.WITH, advance=False)
      -1204                or self._match(TokenType.L_PAREN, advance=False)
      -1205            ):
      -1206                temp_properties = self._parse_properties()
      -1207                if properties and temp_properties:
      -1208                    properties.expressions.extend(temp_properties.expressions)
      -1209                elif temp_properties:
      -1210                    properties = temp_properties
      +1191            # exp.Properties.Location.POST_SCHEMA ("schema" here is the UDF's type signature)
      +1192            extend_props(self._parse_properties())
      +1193
      +1194            self._match(TokenType.ALIAS)
      +1195            begin = self._match(TokenType.BEGIN)
      +1196            return_ = self._match_text_seq("RETURN")
      +1197            expression = self._parse_statement()
      +1198
      +1199            if return_:
      +1200                expression = self.expression(exp.Return, this=expression)
      +1201        elif create_token.token_type == TokenType.INDEX:
      +1202            this = self._parse_index(index=self._parse_id_var())
      +1203        elif create_token.token_type in self.DB_CREATABLES:
      +1204            table_parts = self._parse_table_parts(schema=True)
      +1205
      +1206            # exp.Properties.Location.POST_NAME
      +1207            self._match(TokenType.COMMA)
      +1208            extend_props(self._parse_properties(before=True))
      +1209
      +1210            this = self._parse_schema(this=table_parts)
       1211
      -1212            expression = self._parse_ddl_select()
      -1213
      -1214            if create_token.token_type == TokenType.TABLE:
      -1215                indexes = []
      -1216                while True:
      -1217                    index = self._parse_index()
      -1218
      -1219                    # exp.Properties.Location.POST_EXPRESSION or exp.Properties.Location.POST_INDEX
      -1220                    temp_properties = self._parse_properties()
      -1221                    if properties and temp_properties:
      -1222                        properties.expressions.extend(temp_properties.expressions)
      -1223                    elif temp_properties:
      -1224                        properties = temp_properties
      -1225
      -1226                    if not index:
      -1227                        break
      -1228                    else:
      -1229                        self._match(TokenType.COMMA)
      -1230                        indexes.append(index)
      -1231            elif create_token.token_type == TokenType.VIEW:
      -1232                if self._match_text_seq("WITH", "NO", "SCHEMA", "BINDING"):
      -1233                    no_schema_binding = True
      -1234
      -1235            if self._match_text_seq("CLONE"):
      -1236                clone = self._parse_table(schema=True)
      -1237                when = self._match_texts({"AT", "BEFORE"}) and self._prev.text.upper()
      -1238                clone_kind = (
      -1239                    self._match(TokenType.L_PAREN)
      -1240                    and self._match_texts(self.CLONE_KINDS)
      -1241                    and self._prev.text.upper()
      -1242                )
      -1243                clone_expression = self._match(TokenType.FARROW) and self._parse_bitwise()
      -1244                self._match(TokenType.R_PAREN)
      -1245                clone = self.expression(
      -1246                    exp.Clone, this=clone, when=when, kind=clone_kind, expression=clone_expression
      -1247                )
      -1248
      -1249        return self.expression(
      -1250            exp.Create,
      -1251            this=this,
      -1252            kind=create_token.text,
      -1253            replace=replace,
      -1254            unique=unique,
      -1255            expression=expression,
      -1256            exists=exists,
      -1257            properties=properties,
      -1258            indexes=indexes,
      -1259            no_schema_binding=no_schema_binding,
      -1260            begin=begin,
      -1261            clone=clone,
      -1262        )
      -1263
      -1264    def _parse_property_before(self) -> t.Optional[exp.Expression]:
      -1265        # only used for teradata currently
      -1266        self._match(TokenType.COMMA)
      +1212            # exp.Properties.Location.POST_SCHEMA and POST_WITH
      +1213            extend_props(self._parse_properties())
      +1214
      +1215            self._match(TokenType.ALIAS)
      +1216            if not self._match_set(self.DDL_SELECT_TOKENS, advance=False):
      +1217                # exp.Properties.Location.POST_ALIAS
      +1218                extend_props(self._parse_properties())
      +1219
      +1220            expression = self._parse_ddl_select()
      +1221
      +1222            if create_token.token_type == TokenType.TABLE:
      +1223                indexes = []
      +1224                while True:
      +1225                    index = self._parse_index()
      +1226
      +1227                    # exp.Properties.Location.POST_EXPRESSION and POST_INDEX
      +1228                    extend_props(self._parse_properties())
      +1229
      +1230                    if not index:
      +1231                        break
      +1232                    else:
      +1233                        self._match(TokenType.COMMA)
      +1234                        indexes.append(index)
      +1235            elif create_token.token_type == TokenType.VIEW:
      +1236                if self._match_text_seq("WITH", "NO", "SCHEMA", "BINDING"):
      +1237                    no_schema_binding = True
      +1238
      +1239            if self._match_text_seq("CLONE"):
      +1240                clone = self._parse_table(schema=True)
      +1241                when = self._match_texts({"AT", "BEFORE"}) and self._prev.text.upper()
      +1242                clone_kind = (
      +1243                    self._match(TokenType.L_PAREN)
      +1244                    and self._match_texts(self.CLONE_KINDS)
      +1245                    and self._prev.text.upper()
      +1246                )
      +1247                clone_expression = self._match(TokenType.FARROW) and self._parse_bitwise()
      +1248                self._match(TokenType.R_PAREN)
      +1249                clone = self.expression(
      +1250                    exp.Clone, this=clone, when=when, kind=clone_kind, expression=clone_expression
      +1251                )
      +1252
      +1253        return self.expression(
      +1254            exp.Create,
      +1255            this=this,
      +1256            kind=create_token.text,
      +1257            replace=replace,
      +1258            unique=unique,
      +1259            expression=expression,
      +1260            exists=exists,
      +1261            properties=properties,
      +1262            indexes=indexes,
      +1263            no_schema_binding=no_schema_binding,
      +1264            begin=begin,
      +1265            clone=clone,
      +1266        )
       1267
      -1268        kwargs = {
      -1269            "no": self._match_text_seq("NO"),
      -1270            "dual": self._match_text_seq("DUAL"),
      -1271            "before": self._match_text_seq("BEFORE"),
      -1272            "default": self._match_text_seq("DEFAULT"),
      -1273            "local": (self._match_text_seq("LOCAL") and "LOCAL")
      -1274            or (self._match_text_seq("NOT", "LOCAL") and "NOT LOCAL"),
      -1275            "after": self._match_text_seq("AFTER"),
      -1276            "minimum": self._match_texts(("MIN", "MINIMUM")),
      -1277            "maximum": self._match_texts(("MAX", "MAXIMUM")),
      -1278        }
      -1279
      -1280        if self._match_texts(self.PROPERTY_PARSERS):
      -1281            parser = self.PROPERTY_PARSERS[self._prev.text.upper()]
      -1282            try:
      -1283                return parser(self, **{k: v for k, v in kwargs.items() if v})
      -1284            except TypeError:
      -1285                self.raise_error(f"Cannot parse property '{self._prev.text}'")
      -1286
      -1287        return None
      -1288
      -1289    def _parse_property(self) -> t.Optional[exp.Expression]:
      -1290        if self._match_texts(self.PROPERTY_PARSERS):
      -1291            return self.PROPERTY_PARSERS[self._prev.text.upper()](self)
      +1268    def _parse_property_before(self) -> t.Optional[exp.Expression]:
      +1269        # only used for teradata currently
      +1270        self._match(TokenType.COMMA)
      +1271
      +1272        kwargs = {
      +1273            "no": self._match_text_seq("NO"),
      +1274            "dual": self._match_text_seq("DUAL"),
      +1275            "before": self._match_text_seq("BEFORE"),
      +1276            "default": self._match_text_seq("DEFAULT"),
      +1277            "local": (self._match_text_seq("LOCAL") and "LOCAL")
      +1278            or (self._match_text_seq("NOT", "LOCAL") and "NOT LOCAL"),
      +1279            "after": self._match_text_seq("AFTER"),
      +1280            "minimum": self._match_texts(("MIN", "MINIMUM")),
      +1281            "maximum": self._match_texts(("MAX", "MAXIMUM")),
      +1282        }
      +1283
      +1284        if self._match_texts(self.PROPERTY_PARSERS):
      +1285            parser = self.PROPERTY_PARSERS[self._prev.text.upper()]
      +1286            try:
      +1287                return parser(self, **{k: v for k, v in kwargs.items() if v})
      +1288            except TypeError:
      +1289                self.raise_error(f"Cannot parse property '{self._prev.text}'")
      +1290
      +1291        return None
       1292
      -1293        if self._match_pair(TokenType.DEFAULT, TokenType.CHARACTER_SET):
      -1294            return self._parse_character_set(default=True)
      -1295
      -1296        if self._match_text_seq("COMPOUND", "SORTKEY"):
      -1297            return self._parse_sortkey(compound=True)
      -1298
      -1299        if self._match_text_seq("SQL", "SECURITY"):
      -1300            return self.expression(exp.SqlSecurityProperty, definer=self._match_text_seq("DEFINER"))
      -1301
      -1302        assignment = self._match_pair(
      -1303            TokenType.VAR, TokenType.EQ, advance=False
      -1304        ) or self._match_pair(TokenType.STRING, TokenType.EQ, advance=False)
      +1293    def _parse_property(self) -> t.Optional[exp.Expression]:
      +1294        if self._match_texts(self.PROPERTY_PARSERS):
      +1295            return self.PROPERTY_PARSERS[self._prev.text.upper()](self)
      +1296
      +1297        if self._match_pair(TokenType.DEFAULT, TokenType.CHARACTER_SET):
      +1298            return self._parse_character_set(default=True)
      +1299
      +1300        if self._match_text_seq("COMPOUND", "SORTKEY"):
      +1301            return self._parse_sortkey(compound=True)
      +1302
      +1303        if self._match_text_seq("SQL", "SECURITY"):
      +1304            return self.expression(exp.SqlSecurityProperty, definer=self._match_text_seq("DEFINER"))
       1305
      -1306        if assignment:
      -1307            key = self._parse_var_or_string()
      -1308            self._match(TokenType.EQ)
      -1309            return self.expression(exp.Property, this=key, value=self._parse_column())
      -1310
      -1311        return None
      -1312
      -1313    def _parse_stored(self) -> exp.Expression:
      -1314        self._match(TokenType.ALIAS)
      -1315
      -1316        input_format = self._parse_string() if self._match_text_seq("INPUTFORMAT") else None
      -1317        output_format = self._parse_string() if self._match_text_seq("OUTPUTFORMAT") else None
      -1318
      -1319        return self.expression(
      -1320            exp.FileFormatProperty,
      -1321            this=self.expression(
      -1322                exp.InputOutputFormat, input_format=input_format, output_format=output_format
      -1323            )
      -1324            if input_format or output_format
      -1325            else self._parse_var_or_string() or self._parse_number() or self._parse_id_var(),
      -1326        )
      -1327
      -1328    def _parse_property_assignment(self, exp_class: t.Type[exp.Expression]) -> exp.Expression:
      -1329        self._match(TokenType.EQ)
      -1330        self._match(TokenType.ALIAS)
      -1331        return self.expression(exp_class, this=self._parse_field())
      -1332
      -1333    def _parse_properties(self, before: t.Optional[bool] = None) -> t.Optional[exp.Expression]:
      -1334        properties = []
      -1335
      -1336        while True:
      -1337            if before:
      -1338                prop = self._parse_property_before()
      -1339            else:
      -1340                prop = self._parse_property()
      -1341
      -1342            if not prop:
      -1343                break
      -1344            for p in ensure_list(prop):
      -1345                properties.append(p)
      -1346
      -1347        if properties:
      -1348            return self.expression(exp.Properties, expressions=properties)
      +1306        assignment = self._match_pair(
      +1307            TokenType.VAR, TokenType.EQ, advance=False
      +1308        ) or self._match_pair(TokenType.STRING, TokenType.EQ, advance=False)
      +1309
      +1310        if assignment:
      +1311            key = self._parse_var_or_string()
      +1312            self._match(TokenType.EQ)
      +1313            return self.expression(exp.Property, this=key, value=self._parse_column())
      +1314
      +1315        return None
      +1316
      +1317    def _parse_stored(self) -> exp.FileFormatProperty:
      +1318        self._match(TokenType.ALIAS)
      +1319
      +1320        input_format = self._parse_string() if self._match_text_seq("INPUTFORMAT") else None
      +1321        output_format = self._parse_string() if self._match_text_seq("OUTPUTFORMAT") else None
      +1322
      +1323        return self.expression(
      +1324            exp.FileFormatProperty,
      +1325            this=self.expression(
      +1326                exp.InputOutputFormat, input_format=input_format, output_format=output_format
      +1327            )
      +1328            if input_format or output_format
      +1329            else self._parse_var_or_string() or self._parse_number() or self._parse_id_var(),
      +1330        )
      +1331
      +1332    def _parse_property_assignment(self, exp_class: t.Type[E]) -> E:
      +1333        self._match(TokenType.EQ)
      +1334        self._match(TokenType.ALIAS)
      +1335        return self.expression(exp_class, this=self._parse_field())
      +1336
      +1337    def _parse_properties(self, before: t.Optional[bool] = None) -> t.Optional[exp.Properties]:
      +1338        properties = []
      +1339        while True:
      +1340            if before:
      +1341                prop = self._parse_property_before()
      +1342            else:
      +1343                prop = self._parse_property()
      +1344
      +1345            if not prop:
      +1346                break
      +1347            for p in ensure_list(prop):
      +1348                properties.append(p)
       1349
      -1350        return None
      -1351
      -1352    def _parse_fallback(self, no: bool = False) -> exp.Expression:
      -1353        return self.expression(
      -1354            exp.FallbackProperty, no=no, protection=self._match_text_seq("PROTECTION")
      -1355        )
      -1356
      -1357    def _parse_volatile_property(self) -> exp.Expression:
      -1358        if self._index >= 2:
      -1359            pre_volatile_token = self._tokens[self._index - 2]
      -1360        else:
      -1361            pre_volatile_token = None
      -1362
      -1363        if pre_volatile_token and pre_volatile_token.token_type in (
      -1364            TokenType.CREATE,
      -1365            TokenType.REPLACE,
      -1366            TokenType.UNIQUE,
      -1367        ):
      -1368            return exp.VolatileProperty()
      -1369
      -1370        return self.expression(exp.StabilityProperty, this=exp.Literal.string("VOLATILE"))
      -1371
      -1372    def _parse_with_property(
      -1373        self,
      -1374    ) -> t.Union[t.Optional[exp.Expression], t.List[t.Optional[exp.Expression]]]:
      -1375        self._match(TokenType.WITH)
      -1376        if self._match(TokenType.L_PAREN, advance=False):
      -1377            return self._parse_wrapped_csv(self._parse_property)
      -1378
      -1379        if self._match_text_seq("JOURNAL"):
      -1380            return self._parse_withjournaltable()
      -1381
      -1382        if self._match_text_seq("DATA"):
      -1383            return self._parse_withdata(no=False)
      -1384        elif self._match_text_seq("NO", "DATA"):
      -1385            return self._parse_withdata(no=True)
      -1386
      -1387        if not self._next:
      -1388            return None
      -1389
      -1390        return self._parse_withisolatedloading()
      -1391
      -1392    # https://dev.mysql.com/doc/refman/8.0/en/create-view.html
      -1393    def _parse_definer(self) -> t.Optional[exp.Expression]:
      -1394        self._match(TokenType.EQ)
      -1395
      -1396        user = self._parse_id_var()
      -1397        self._match(TokenType.PARAMETER)
      -1398        host = self._parse_id_var() or (self._match(TokenType.MOD) and self._prev.text)
      -1399
      -1400        if not user or not host:
      -1401            return None
      -1402
      -1403        return exp.DefinerProperty(this=f"{user}@{host}")
      -1404
      -1405    def _parse_withjournaltable(self) -> exp.Expression:
      -1406        self._match(TokenType.TABLE)
      -1407        self._match(TokenType.EQ)
      -1408        return self.expression(exp.WithJournalTableProperty, this=self._parse_table_parts())
      -1409
      -1410    def _parse_log(self, no: bool = False) -> exp.Expression:
      -1411        return self.expression(exp.LogProperty, no=no)
      -1412
      -1413    def _parse_journal(self, **kwargs) -> exp.Expression:
      -1414        return self.expression(exp.JournalProperty, **kwargs)
      -1415
      -1416    def _parse_checksum(self) -> exp.Expression:
      -1417        self._match(TokenType.EQ)
      -1418
      -1419        on = None
      -1420        if self._match(TokenType.ON):
      -1421            on = True
      -1422        elif self._match_text_seq("OFF"):
      -1423            on = False
      -1424        default = self._match(TokenType.DEFAULT)
      +1350        if properties:
      +1351            return self.expression(exp.Properties, expressions=properties)
      +1352
      +1353        return None
      +1354
      +1355    def _parse_fallback(self, no: bool = False) -> exp.FallbackProperty:
      +1356        return self.expression(
      +1357            exp.FallbackProperty, no=no, protection=self._match_text_seq("PROTECTION")
      +1358        )
      +1359
      +1360    def _parse_volatile_property(self) -> exp.VolatileProperty | exp.StabilityProperty:
      +1361        if self._index >= 2:
      +1362            pre_volatile_token = self._tokens[self._index - 2]
      +1363        else:
      +1364            pre_volatile_token = None
      +1365
      +1366        if pre_volatile_token and pre_volatile_token.token_type in self.PRE_VOLATILE_TOKENS:
      +1367            return exp.VolatileProperty()
      +1368
      +1369        return self.expression(exp.StabilityProperty, this=exp.Literal.string("VOLATILE"))
      +1370
      +1371    def _parse_with_property(
      +1372        self,
      +1373    ) -> t.Optional[exp.Expression] | t.List[t.Optional[exp.Expression]]:
      +1374        self._match(TokenType.WITH)
      +1375        if self._match(TokenType.L_PAREN, advance=False):
      +1376            return self._parse_wrapped_csv(self._parse_property)
      +1377
      +1378        if self._match_text_seq("JOURNAL"):
      +1379            return self._parse_withjournaltable()
      +1380
      +1381        if self._match_text_seq("DATA"):
      +1382            return self._parse_withdata(no=False)
      +1383        elif self._match_text_seq("NO", "DATA"):
      +1384            return self._parse_withdata(no=True)
      +1385
      +1386        if not self._next:
      +1387            return None
      +1388
      +1389        return self._parse_withisolatedloading()
      +1390
      +1391    # https://dev.mysql.com/doc/refman/8.0/en/create-view.html
      +1392    def _parse_definer(self) -> t.Optional[exp.DefinerProperty]:
      +1393        self._match(TokenType.EQ)
      +1394
      +1395        user = self._parse_id_var()
      +1396        self._match(TokenType.PARAMETER)
      +1397        host = self._parse_id_var() or (self._match(TokenType.MOD) and self._prev.text)
      +1398
      +1399        if not user or not host:
      +1400            return None
      +1401
      +1402        return exp.DefinerProperty(this=f"{user}@{host}")
      +1403
      +1404    def _parse_withjournaltable(self) -> exp.WithJournalTableProperty:
      +1405        self._match(TokenType.TABLE)
      +1406        self._match(TokenType.EQ)
      +1407        return self.expression(exp.WithJournalTableProperty, this=self._parse_table_parts())
      +1408
      +1409    def _parse_log(self, no: bool = False) -> exp.LogProperty:
      +1410        return self.expression(exp.LogProperty, no=no)
      +1411
      +1412    def _parse_journal(self, **kwargs) -> exp.JournalProperty:
      +1413        return self.expression(exp.JournalProperty, **kwargs)
      +1414
      +1415    def _parse_checksum(self) -> exp.ChecksumProperty:
      +1416        self._match(TokenType.EQ)
      +1417
      +1418        on = None
      +1419        if self._match(TokenType.ON):
      +1420            on = True
      +1421        elif self._match_text_seq("OFF"):
      +1422            on = False
      +1423
      +1424        return self.expression(exp.ChecksumProperty, on=on, default=self._match(TokenType.DEFAULT))
       1425
      -1426        return self.expression(
      -1427            exp.ChecksumProperty,
      -1428            on=on,
      -1429            default=default,
      -1430        )
      -1431
      -1432    def _parse_cluster(self) -> t.Optional[exp.Expression]:
      -1433        if not self._match_text_seq("BY"):
      -1434            self._retreat(self._index - 1)
      -1435            return None
      -1436        return self.expression(
      -1437            exp.Cluster,
      -1438            expressions=self._parse_csv(self._parse_ordered),
      -1439        )
      -1440
      -1441    def _parse_freespace(self) -> exp.Expression:
      -1442        self._match(TokenType.EQ)
      -1443        return self.expression(
      -1444            exp.FreespaceProperty, this=self._parse_number(), percent=self._match(TokenType.PERCENT)
      -1445        )
      -1446
      -1447    def _parse_mergeblockratio(self, no: bool = False, default: bool = False) -> exp.Expression:
      -1448        if self._match(TokenType.EQ):
      -1449            return self.expression(
      -1450                exp.MergeBlockRatioProperty,
      -1451                this=self._parse_number(),
      -1452                percent=self._match(TokenType.PERCENT),
      -1453            )
      -1454        return self.expression(
      -1455            exp.MergeBlockRatioProperty,
      -1456            no=no,
      -1457            default=default,
      -1458        )
      +1426    def _parse_cluster(self) -> t.Optional[exp.Cluster]:
      +1427        if not self._match_text_seq("BY"):
      +1428            self._retreat(self._index - 1)
      +1429            return None
      +1430
      +1431        return self.expression(exp.Cluster, expressions=self._parse_csv(self._parse_ordered))
      +1432
      +1433    def _parse_freespace(self) -> exp.FreespaceProperty:
      +1434        self._match(TokenType.EQ)
      +1435        return self.expression(
      +1436            exp.FreespaceProperty, this=self._parse_number(), percent=self._match(TokenType.PERCENT)
      +1437        )
      +1438
      +1439    def _parse_mergeblockratio(
      +1440        self, no: bool = False, default: bool = False
      +1441    ) -> exp.MergeBlockRatioProperty:
      +1442        if self._match(TokenType.EQ):
      +1443            return self.expression(
      +1444                exp.MergeBlockRatioProperty,
      +1445                this=self._parse_number(),
      +1446                percent=self._match(TokenType.PERCENT),
      +1447            )
      +1448
      +1449        return self.expression(exp.MergeBlockRatioProperty, no=no, default=default)
      +1450
      +1451    def _parse_datablocksize(
      +1452        self,
      +1453        default: t.Optional[bool] = None,
      +1454        minimum: t.Optional[bool] = None,
      +1455        maximum: t.Optional[bool] = None,
      +1456    ) -> exp.DataBlocksizeProperty:
      +1457        self._match(TokenType.EQ)
      +1458        size = self._parse_number()
       1459
      -1460    def _parse_datablocksize(
      -1461        self,
      -1462        default: t.Optional[bool] = None,
      -1463        minimum: t.Optional[bool] = None,
      -1464        maximum: t.Optional[bool] = None,
      -1465    ) -> exp.Expression:
      -1466        self._match(TokenType.EQ)
      -1467        size = self._parse_number()
      -1468        units = None
      -1469        if self._match_texts(("BYTES", "KBYTES", "KILOBYTES")):
      -1470            units = self._prev.text
      -1471        return self.expression(
      -1472            exp.DataBlocksizeProperty,
      -1473            size=size,
      -1474            units=units,
      -1475            default=default,
      -1476            minimum=minimum,
      -1477            maximum=maximum,
      -1478        )
      +1460        units = None
      +1461        if self._match_texts(("BYTES", "KBYTES", "KILOBYTES")):
      +1462            units = self._prev.text
      +1463
      +1464        return self.expression(
      +1465            exp.DataBlocksizeProperty,
      +1466            size=size,
      +1467            units=units,
      +1468            default=default,
      +1469            minimum=minimum,
      +1470            maximum=maximum,
      +1471        )
      +1472
      +1473    def _parse_blockcompression(self) -> exp.BlockCompressionProperty:
      +1474        self._match(TokenType.EQ)
      +1475        always = self._match_text_seq("ALWAYS")
      +1476        manual = self._match_text_seq("MANUAL")
      +1477        never = self._match_text_seq("NEVER")
      +1478        default = self._match_text_seq("DEFAULT")
       1479
      -1480    def _parse_blockcompression(self) -> exp.Expression:
      -1481        self._match(TokenType.EQ)
      -1482        always = self._match_text_seq("ALWAYS")
      -1483        manual = self._match_text_seq("MANUAL")
      -1484        never = self._match_text_seq("NEVER")
      -1485        default = self._match_text_seq("DEFAULT")
      -1486        autotemp = None
      -1487        if self._match_text_seq("AUTOTEMP"):
      -1488            autotemp = self._parse_schema()
      -1489
      -1490        return self.expression(
      -1491            exp.BlockCompressionProperty,
      -1492            always=always,
      -1493            manual=manual,
      -1494            never=never,
      -1495            default=default,
      -1496            autotemp=autotemp,
      -1497        )
      -1498
      -1499    def _parse_withisolatedloading(self) -> exp.Expression:
      -1500        no = self._match_text_seq("NO")
      -1501        concurrent = self._match_text_seq("CONCURRENT")
      -1502        self._match_text_seq("ISOLATED", "LOADING")
      -1503        for_all = self._match_text_seq("FOR", "ALL")
      -1504        for_insert = self._match_text_seq("FOR", "INSERT")
      -1505        for_none = self._match_text_seq("FOR", "NONE")
      -1506        return self.expression(
      -1507            exp.IsolatedLoadingProperty,
      -1508            no=no,
      -1509            concurrent=concurrent,
      -1510            for_all=for_all,
      -1511            for_insert=for_insert,
      -1512            for_none=for_none,
      -1513        )
      -1514
      -1515    def _parse_locking(self) -> exp.Expression:
      -1516        if self._match(TokenType.TABLE):
      -1517            kind = "TABLE"
      -1518        elif self._match(TokenType.VIEW):
      -1519            kind = "VIEW"
      -1520        elif self._match(TokenType.ROW):
      -1521            kind = "ROW"
      -1522        elif self._match_text_seq("DATABASE"):
      -1523            kind = "DATABASE"
      -1524        else:
      -1525            kind = None
      -1526
      -1527        if kind in ("DATABASE", "TABLE", "VIEW"):
      -1528            this = self._parse_table_parts()
      -1529        else:
      -1530            this = None
      -1531
      -1532        if self._match(TokenType.FOR):
      -1533            for_or_in = "FOR"
      -1534        elif self._match(TokenType.IN):
      -1535            for_or_in = "IN"
      -1536        else:
      -1537            for_or_in = None
      -1538
      -1539        if self._match_text_seq("ACCESS"):
      -1540            lock_type = "ACCESS"
      -1541        elif self._match_texts(("EXCL", "EXCLUSIVE")):
      -1542            lock_type = "EXCLUSIVE"
      -1543        elif self._match_text_seq("SHARE"):
      -1544            lock_type = "SHARE"
      -1545        elif self._match_text_seq("READ"):
      -1546            lock_type = "READ"
      -1547        elif self._match_text_seq("WRITE"):
      -1548            lock_type = "WRITE"
      -1549        elif self._match_text_seq("CHECKSUM"):
      -1550            lock_type = "CHECKSUM"
      -1551        else:
      -1552            lock_type = None
      -1553
      -1554        override = self._match_text_seq("OVERRIDE")
      -1555
      -1556        return self.expression(
      -1557            exp.LockingProperty,
      -1558            this=this,
      -1559            kind=kind,
      -1560            for_or_in=for_or_in,
      -1561            lock_type=lock_type,
      -1562            override=override,
      -1563        )
      -1564
      -1565    def _parse_partition_by(self) -> t.List[t.Optional[exp.Expression]]:
      -1566        if self._match(TokenType.PARTITION_BY):
      -1567            return self._parse_csv(self._parse_conjunction)
      -1568        return []
      -1569
      -1570    def _parse_partitioned_by(self) -> exp.Expression:
      -1571        self._match(TokenType.EQ)
      -1572        return self.expression(
      -1573            exp.PartitionedByProperty,
      -1574            this=self._parse_schema() or self._parse_bracket(self._parse_field()),
      -1575        )
      -1576
      -1577    def _parse_withdata(self, no: bool = False) -> exp.Expression:
      -1578        if self._match_text_seq("AND", "STATISTICS"):
      -1579            statistics = True
      -1580        elif self._match_text_seq("AND", "NO", "STATISTICS"):
      -1581            statistics = False
      -1582        else:
      -1583            statistics = None
      -1584
      -1585        return self.expression(exp.WithDataProperty, no=no, statistics=statistics)
      -1586
      -1587    def _parse_no_property(self) -> t.Optional[exp.Property]:
      -1588        if self._match_text_seq("PRIMARY", "INDEX"):
      -1589            return exp.NoPrimaryIndexProperty()
      -1590        return None
      -1591
      -1592    def _parse_on_property(self) -> t.Optional[exp.Property]:
      -1593        if self._match_text_seq("COMMIT", "PRESERVE", "ROWS"):
      -1594            return exp.OnCommitProperty()
      -1595        elif self._match_text_seq("COMMIT", "DELETE", "ROWS"):
      -1596            return exp.OnCommitProperty(delete=True)
      -1597        return None
      +1480        autotemp = None
      +1481        if self._match_text_seq("AUTOTEMP"):
      +1482            autotemp = self._parse_schema()
      +1483
      +1484        return self.expression(
      +1485            exp.BlockCompressionProperty,
      +1486            always=always,
      +1487            manual=manual,
      +1488            never=never,
      +1489            default=default,
      +1490            autotemp=autotemp,
      +1491        )
      +1492
      +1493    def _parse_withisolatedloading(self) -> exp.IsolatedLoadingProperty:
      +1494        no = self._match_text_seq("NO")
      +1495        concurrent = self._match_text_seq("CONCURRENT")
      +1496        self._match_text_seq("ISOLATED", "LOADING")
      +1497        for_all = self._match_text_seq("FOR", "ALL")
      +1498        for_insert = self._match_text_seq("FOR", "INSERT")
      +1499        for_none = self._match_text_seq("FOR", "NONE")
      +1500        return self.expression(
      +1501            exp.IsolatedLoadingProperty,
      +1502            no=no,
      +1503            concurrent=concurrent,
      +1504            for_all=for_all,
      +1505            for_insert=for_insert,
      +1506            for_none=for_none,
      +1507        )
      +1508
      +1509    def _parse_locking(self) -> exp.LockingProperty:
      +1510        if self._match(TokenType.TABLE):
      +1511            kind = "TABLE"
      +1512        elif self._match(TokenType.VIEW):
      +1513            kind = "VIEW"
      +1514        elif self._match(TokenType.ROW):
      +1515            kind = "ROW"
      +1516        elif self._match_text_seq("DATABASE"):
      +1517            kind = "DATABASE"
      +1518        else:
      +1519            kind = None
      +1520
      +1521        if kind in ("DATABASE", "TABLE", "VIEW"):
      +1522            this = self._parse_table_parts()
      +1523        else:
      +1524            this = None
      +1525
      +1526        if self._match(TokenType.FOR):
      +1527            for_or_in = "FOR"
      +1528        elif self._match(TokenType.IN):
      +1529            for_or_in = "IN"
      +1530        else:
      +1531            for_or_in = None
      +1532
      +1533        if self._match_text_seq("ACCESS"):
      +1534            lock_type = "ACCESS"
      +1535        elif self._match_texts(("EXCL", "EXCLUSIVE")):
      +1536            lock_type = "EXCLUSIVE"
      +1537        elif self._match_text_seq("SHARE"):
      +1538            lock_type = "SHARE"
      +1539        elif self._match_text_seq("READ"):
      +1540            lock_type = "READ"
      +1541        elif self._match_text_seq("WRITE"):
      +1542            lock_type = "WRITE"
      +1543        elif self._match_text_seq("CHECKSUM"):
      +1544            lock_type = "CHECKSUM"
      +1545        else:
      +1546            lock_type = None
      +1547
      +1548        override = self._match_text_seq("OVERRIDE")
      +1549
      +1550        return self.expression(
      +1551            exp.LockingProperty,
      +1552            this=this,
      +1553            kind=kind,
      +1554            for_or_in=for_or_in,
      +1555            lock_type=lock_type,
      +1556            override=override,
      +1557        )
      +1558
      +1559    def _parse_partition_by(self) -> t.List[t.Optional[exp.Expression]]:
      +1560        if self._match(TokenType.PARTITION_BY):
      +1561            return self._parse_csv(self._parse_conjunction)
      +1562        return []
      +1563
      +1564    def _parse_partitioned_by(self) -> exp.PartitionedByProperty:
      +1565        self._match(TokenType.EQ)
      +1566        return self.expression(
      +1567            exp.PartitionedByProperty,
      +1568            this=self._parse_schema() or self._parse_bracket(self._parse_field()),
      +1569        )
      +1570
      +1571    def _parse_withdata(self, no: bool = False) -> exp.WithDataProperty:
      +1572        if self._match_text_seq("AND", "STATISTICS"):
      +1573            statistics = True
      +1574        elif self._match_text_seq("AND", "NO", "STATISTICS"):
      +1575            statistics = False
      +1576        else:
      +1577            statistics = None
      +1578
      +1579        return self.expression(exp.WithDataProperty, no=no, statistics=statistics)
      +1580
      +1581    def _parse_no_property(self) -> t.Optional[exp.NoPrimaryIndexProperty]:
      +1582        if self._match_text_seq("PRIMARY", "INDEX"):
      +1583            return exp.NoPrimaryIndexProperty()
      +1584        return None
      +1585
      +1586    def _parse_on_property(self) -> t.Optional[exp.Expression]:
      +1587        if self._match_text_seq("COMMIT", "PRESERVE", "ROWS"):
      +1588            return exp.OnCommitProperty()
      +1589        elif self._match_text_seq("COMMIT", "DELETE", "ROWS"):
      +1590            return exp.OnCommitProperty(delete=True)
      +1591        return None
      +1592
      +1593    def _parse_distkey(self) -> exp.DistKeyProperty:
      +1594        return self.expression(exp.DistKeyProperty, this=self._parse_wrapped(self._parse_id_var))
      +1595
      +1596    def _parse_create_like(self) -> t.Optional[exp.LikeProperty]:
      +1597        table = self._parse_table(schema=True)
       1598
      -1599    def _parse_distkey(self) -> exp.Expression:
      -1600        return self.expression(exp.DistKeyProperty, this=self._parse_wrapped(self._parse_id_var))
      -1601
      -1602    def _parse_create_like(self) -> t.Optional[exp.Expression]:
      -1603        table = self._parse_table(schema=True)
      -1604        options = []
      -1605        while self._match_texts(("INCLUDING", "EXCLUDING")):
      -1606            this = self._prev.text.upper()
      -1607            id_var = self._parse_id_var()
      -1608
      -1609            if not id_var:
      -1610                return None
      -1611
      -1612            options.append(
      -1613                self.expression(
      -1614                    exp.Property,
      -1615                    this=this,
      -1616                    value=exp.Var(this=id_var.this.upper()),
      -1617                )
      -1618            )
      -1619        return self.expression(exp.LikeProperty, this=table, expressions=options)
      -1620
      -1621    def _parse_sortkey(self, compound: bool = False) -> exp.Expression:
      -1622        return self.expression(
      -1623            exp.SortKeyProperty, this=self._parse_wrapped_csv(self._parse_id_var), compound=compound
      -1624        )
      -1625
      -1626    def _parse_character_set(self, default: bool = False) -> exp.Expression:
      -1627        self._match(TokenType.EQ)
      -1628        return self.expression(
      -1629            exp.CharacterSetProperty, this=self._parse_var_or_string(), default=default
      -1630        )
      -1631
      -1632    def _parse_returns(self) -> exp.Expression:
      -1633        value: t.Optional[exp.Expression]
      -1634        is_table = self._match(TokenType.TABLE)
      -1635
      -1636        if is_table:
      -1637            if self._match(TokenType.LT):
      -1638                value = self.expression(
      -1639                    exp.Schema,
      -1640                    this="TABLE",
      -1641                    expressions=self._parse_csv(self._parse_struct_types),
      -1642                )
      -1643                if not self._match(TokenType.GT):
      -1644                    self.raise_error("Expecting >")
      -1645            else:
      -1646                value = self._parse_schema(exp.Var(this="TABLE"))
      -1647        else:
      -1648            value = self._parse_types()
      -1649
      -1650        return self.expression(exp.ReturnsProperty, this=value, is_table=is_table)
      -1651
      -1652    def _parse_describe(self) -> exp.Expression:
      -1653        kind = self._match_set(self.CREATABLES) and self._prev.text
      -1654        this = self._parse_table()
      -1655
      -1656        return self.expression(exp.Describe, this=this, kind=kind)
      -1657
      -1658    def _parse_insert(self) -> exp.Expression:
      -1659        overwrite = self._match(TokenType.OVERWRITE)
      -1660        local = self._match_text_seq("LOCAL")
      -1661        alternative = None
      -1662
      -1663        if self._match_text_seq("DIRECTORY"):
      -1664            this: t.Optional[exp.Expression] = self.expression(
      -1665                exp.Directory,
      -1666                this=self._parse_var_or_string(),
      -1667                local=local,
      -1668                row_format=self._parse_row_format(match_row=True),
      -1669            )
      -1670        else:
      -1671            if self._match(TokenType.OR):
      -1672                alternative = self._match_texts(self.INSERT_ALTERNATIVES) and self._prev.text
      -1673
      -1674            self._match(TokenType.INTO)
      -1675            self._match(TokenType.TABLE)
      -1676            this = self._parse_table(schema=True)
      -1677
      -1678        return self.expression(
      -1679            exp.Insert,
      -1680            this=this,
      -1681            exists=self._parse_exists(),
      -1682            partition=self._parse_partition(),
      -1683            expression=self._parse_ddl_select(),
      -1684            conflict=self._parse_on_conflict(),
      -1685            returning=self._parse_returning(),
      -1686            overwrite=overwrite,
      -1687            alternative=alternative,
      -1688        )
      -1689
      -1690    def _parse_on_conflict(self) -> t.Optional[exp.Expression]:
      -1691        conflict = self._match_text_seq("ON", "CONFLICT")
      -1692        duplicate = self._match_text_seq("ON", "DUPLICATE", "KEY")
      -1693
      -1694        if not (conflict or duplicate):
      -1695            return None
      -1696
      -1697        nothing = None
      -1698        expressions = None
      -1699        key = None
      -1700        constraint = None
      -1701
      -1702        if conflict:
      -1703            if self._match_text_seq("ON", "CONSTRAINT"):
      -1704                constraint = self._parse_id_var()
      -1705            else:
      -1706                key = self._parse_csv(self._parse_value)
      -1707
      -1708        self._match_text_seq("DO")
      -1709        if self._match_text_seq("NOTHING"):
      -1710            nothing = True
      -1711        else:
      -1712            self._match(TokenType.UPDATE)
      -1713            expressions = self._match(TokenType.SET) and self._parse_csv(self._parse_equality)
      +1599        options = []
      +1600        while self._match_texts(("INCLUDING", "EXCLUDING")):
      +1601            this = self._prev.text.upper()
      +1602
      +1603            id_var = self._parse_id_var()
      +1604            if not id_var:
      +1605                return None
      +1606
      +1607            options.append(
      +1608                self.expression(exp.Property, this=this, value=exp.var(id_var.this.upper()))
      +1609            )
      +1610
      +1611        return self.expression(exp.LikeProperty, this=table, expressions=options)
      +1612
      +1613    def _parse_sortkey(self, compound: bool = False) -> exp.SortKeyProperty:
      +1614        return self.expression(
      +1615            exp.SortKeyProperty, this=self._parse_wrapped_id_vars(), compound=compound
      +1616        )
      +1617
      +1618    def _parse_character_set(self, default: bool = False) -> exp.CharacterSetProperty:
      +1619        self._match(TokenType.EQ)
      +1620        return self.expression(
      +1621            exp.CharacterSetProperty, this=self._parse_var_or_string(), default=default
      +1622        )
      +1623
      +1624    def _parse_returns(self) -> exp.ReturnsProperty:
      +1625        value: t.Optional[exp.Expression]
      +1626        is_table = self._match(TokenType.TABLE)
      +1627
      +1628        if is_table:
      +1629            if self._match(TokenType.LT):
      +1630                value = self.expression(
      +1631                    exp.Schema,
      +1632                    this="TABLE",
      +1633                    expressions=self._parse_csv(self._parse_struct_types),
      +1634                )
      +1635                if not self._match(TokenType.GT):
      +1636                    self.raise_error("Expecting >")
      +1637            else:
      +1638                value = self._parse_schema(exp.var("TABLE"))
      +1639        else:
      +1640            value = self._parse_types()
      +1641
      +1642        return self.expression(exp.ReturnsProperty, this=value, is_table=is_table)
      +1643
      +1644    def _parse_describe(self) -> exp.Describe:
      +1645        kind = self._match_set(self.CREATABLES) and self._prev.text
      +1646        this = self._parse_table()
      +1647        return self.expression(exp.Describe, this=this, kind=kind)
      +1648
      +1649    def _parse_insert(self) -> exp.Insert:
      +1650        overwrite = self._match(TokenType.OVERWRITE)
      +1651        local = self._match_text_seq("LOCAL")
      +1652        alternative = None
      +1653
      +1654        if self._match_text_seq("DIRECTORY"):
      +1655            this: t.Optional[exp.Expression] = self.expression(
      +1656                exp.Directory,
      +1657                this=self._parse_var_or_string(),
      +1658                local=local,
      +1659                row_format=self._parse_row_format(match_row=True),
      +1660            )
      +1661        else:
      +1662            if self._match(TokenType.OR):
      +1663                alternative = self._match_texts(self.INSERT_ALTERNATIVES) and self._prev.text
      +1664
      +1665            self._match(TokenType.INTO)
      +1666            self._match(TokenType.TABLE)
      +1667            this = self._parse_table(schema=True)
      +1668
      +1669        return self.expression(
      +1670            exp.Insert,
      +1671            this=this,
      +1672            exists=self._parse_exists(),
      +1673            partition=self._parse_partition(),
      +1674            expression=self._parse_ddl_select(),
      +1675            conflict=self._parse_on_conflict(),
      +1676            returning=self._parse_returning(),
      +1677            overwrite=overwrite,
      +1678            alternative=alternative,
      +1679        )
      +1680
      +1681    def _parse_on_conflict(self) -> t.Optional[exp.OnConflict]:
      +1682        conflict = self._match_text_seq("ON", "CONFLICT")
      +1683        duplicate = self._match_text_seq("ON", "DUPLICATE", "KEY")
      +1684
      +1685        if not conflict and not duplicate:
      +1686            return None
      +1687
      +1688        nothing = None
      +1689        expressions = None
      +1690        key = None
      +1691        constraint = None
      +1692
      +1693        if conflict:
      +1694            if self._match_text_seq("ON", "CONSTRAINT"):
      +1695                constraint = self._parse_id_var()
      +1696            else:
      +1697                key = self._parse_csv(self._parse_value)
      +1698
      +1699        self._match_text_seq("DO")
      +1700        if self._match_text_seq("NOTHING"):
      +1701            nothing = True
      +1702        else:
      +1703            self._match(TokenType.UPDATE)
      +1704            expressions = self._match(TokenType.SET) and self._parse_csv(self._parse_equality)
      +1705
      +1706        return self.expression(
      +1707            exp.OnConflict,
      +1708            duplicate=duplicate,
      +1709            expressions=expressions,
      +1710            nothing=nothing,
      +1711            key=key,
      +1712            constraint=constraint,
      +1713        )
       1714
      -1715        return self.expression(
      -1716            exp.OnConflict,
      -1717            duplicate=duplicate,
      -1718            expressions=expressions,
      -1719            nothing=nothing,
      -1720            key=key,
      -1721            constraint=constraint,
      -1722        )
      -1723
      -1724    def _parse_returning(self) -> t.Optional[exp.Expression]:
      -1725        if not self._match(TokenType.RETURNING):
      -1726            return None
      -1727
      -1728        return self.expression(exp.Returning, expressions=self._parse_csv(self._parse_column))
      -1729
      -1730    def _parse_row(self) -> t.Optional[exp.Expression]:
      -1731        if not self._match(TokenType.FORMAT):
      -1732            return None
      -1733        return self._parse_row_format()
      +1715    def _parse_returning(self) -> t.Optional[exp.Returning]:
      +1716        if not self._match(TokenType.RETURNING):
      +1717            return None
      +1718
      +1719        return self.expression(exp.Returning, expressions=self._parse_csv(self._parse_column))
      +1720
      +1721    def _parse_row(self) -> t.Optional[exp.RowFormatSerdeProperty | exp.RowFormatDelimitedProperty]:
      +1722        if not self._match(TokenType.FORMAT):
      +1723            return None
      +1724        return self._parse_row_format()
      +1725
      +1726    def _parse_row_format(
      +1727        self, match_row: bool = False
      +1728    ) -> t.Optional[exp.RowFormatSerdeProperty | exp.RowFormatDelimitedProperty]:
      +1729        if match_row and not self._match_pair(TokenType.ROW, TokenType.FORMAT):
      +1730            return None
      +1731
      +1732        if self._match_text_seq("SERDE"):
      +1733            return self.expression(exp.RowFormatSerdeProperty, this=self._parse_string())
       1734
      -1735    def _parse_row_format(self, match_row: bool = False) -> t.Optional[exp.Expression]:
      -1736        if match_row and not self._match_pair(TokenType.ROW, TokenType.FORMAT):
      -1737            return None
      +1735        self._match_text_seq("DELIMITED")
      +1736
      +1737        kwargs = {}
       1738
      -1739        if self._match_text_seq("SERDE"):
      -1740            return self.expression(exp.RowFormatSerdeProperty, this=self._parse_string())
      -1741
      -1742        self._match_text_seq("DELIMITED")
      -1743
      -1744        kwargs = {}
      -1745
      -1746        if self._match_text_seq("FIELDS", "TERMINATED", "BY"):
      -1747            kwargs["fields"] = self._parse_string()
      -1748            if self._match_text_seq("ESCAPED", "BY"):
      -1749                kwargs["escaped"] = self._parse_string()
      -1750        if self._match_text_seq("COLLECTION", "ITEMS", "TERMINATED", "BY"):
      -1751            kwargs["collection_items"] = self._parse_string()
      -1752        if self._match_text_seq("MAP", "KEYS", "TERMINATED", "BY"):
      -1753            kwargs["map_keys"] = self._parse_string()
      -1754        if self._match_text_seq("LINES", "TERMINATED", "BY"):
      -1755            kwargs["lines"] = self._parse_string()
      -1756        if self._match_text_seq("NULL", "DEFINED", "AS"):
      -1757            kwargs["null"] = self._parse_string()
      -1758
      -1759        return self.expression(exp.RowFormatDelimitedProperty, **kwargs)  # type: ignore
      -1760
      -1761    def _parse_load(self) -> exp.Expression:
      -1762        if self._match_text_seq("DATA"):
      -1763            local = self._match_text_seq("LOCAL")
      -1764            self._match_text_seq("INPATH")
      -1765            inpath = self._parse_string()
      -1766            overwrite = self._match(TokenType.OVERWRITE)
      -1767            self._match_pair(TokenType.INTO, TokenType.TABLE)
      -1768
      -1769            return self.expression(
      -1770                exp.LoadData,
      -1771                this=self._parse_table(schema=True),
      -1772                local=local,
      -1773                overwrite=overwrite,
      -1774                inpath=inpath,
      -1775                partition=self._parse_partition(),
      -1776                input_format=self._match_text_seq("INPUTFORMAT") and self._parse_string(),
      -1777                serde=self._match_text_seq("SERDE") and self._parse_string(),
      -1778            )
      -1779        return self._parse_as_command(self._prev)
      -1780
      -1781    def _parse_delete(self) -> exp.Expression:
      -1782        self._match(TokenType.FROM)
      -1783
      -1784        return self.expression(
      -1785            exp.Delete,
      -1786            this=self._parse_table(),
      -1787            using=self._parse_csv(lambda: self._match(TokenType.USING) and self._parse_table()),
      -1788            where=self._parse_where(),
      -1789            returning=self._parse_returning(),
      -1790        )
      -1791
      -1792    def _parse_update(self) -> exp.Expression:
      -1793        return self.expression(
      -1794            exp.Update,
      -1795            **{  # type: ignore
      -1796                "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS),
      -1797                "expressions": self._match(TokenType.SET) and self._parse_csv(self._parse_equality),
      -1798                "from": self._parse_from(modifiers=True),
      -1799                "where": self._parse_where(),
      -1800                "returning": self._parse_returning(),
      -1801            },
      -1802        )
      -1803
      -1804    def _parse_uncache(self) -> exp.Expression:
      -1805        if not self._match(TokenType.TABLE):
      -1806            self.raise_error("Expecting TABLE after UNCACHE")
      -1807
      -1808        return self.expression(
      -1809            exp.Uncache,
      -1810            exists=self._parse_exists(),
      -1811            this=self._parse_table(schema=True),
      -1812        )
      -1813
      -1814    def _parse_cache(self) -> exp.Expression:
      -1815        lazy = self._match_text_seq("LAZY")
      -1816        self._match(TokenType.TABLE)
      -1817        table = self._parse_table(schema=True)
      -1818        options = []
      -1819
      -1820        if self._match_text_seq("OPTIONS"):
      -1821            self._match_l_paren()
      -1822            k = self._parse_string()
      -1823            self._match(TokenType.EQ)
      -1824            v = self._parse_string()
      -1825            options = [k, v]
      -1826            self._match_r_paren()
      +1739        if self._match_text_seq("FIELDS", "TERMINATED", "BY"):
      +1740            kwargs["fields"] = self._parse_string()
      +1741            if self._match_text_seq("ESCAPED", "BY"):
      +1742                kwargs["escaped"] = self._parse_string()
      +1743        if self._match_text_seq("COLLECTION", "ITEMS", "TERMINATED", "BY"):
      +1744            kwargs["collection_items"] = self._parse_string()
      +1745        if self._match_text_seq("MAP", "KEYS", "TERMINATED", "BY"):
      +1746            kwargs["map_keys"] = self._parse_string()
      +1747        if self._match_text_seq("LINES", "TERMINATED", "BY"):
      +1748            kwargs["lines"] = self._parse_string()
      +1749        if self._match_text_seq("NULL", "DEFINED", "AS"):
      +1750            kwargs["null"] = self._parse_string()
      +1751
      +1752        return self.expression(exp.RowFormatDelimitedProperty, **kwargs)  # type: ignore
      +1753
      +1754    def _parse_load(self) -> exp.LoadData | exp.Command:
      +1755        if self._match_text_seq("DATA"):
      +1756            local = self._match_text_seq("LOCAL")
      +1757            self._match_text_seq("INPATH")
      +1758            inpath = self._parse_string()
      +1759            overwrite = self._match(TokenType.OVERWRITE)
      +1760            self._match_pair(TokenType.INTO, TokenType.TABLE)
      +1761
      +1762            return self.expression(
      +1763                exp.LoadData,
      +1764                this=self._parse_table(schema=True),
      +1765                local=local,
      +1766                overwrite=overwrite,
      +1767                inpath=inpath,
      +1768                partition=self._parse_partition(),
      +1769                input_format=self._match_text_seq("INPUTFORMAT") and self._parse_string(),
      +1770                serde=self._match_text_seq("SERDE") and self._parse_string(),
      +1771            )
      +1772        return self._parse_as_command(self._prev)
      +1773
      +1774    def _parse_delete(self) -> exp.Delete:
      +1775        self._match(TokenType.FROM)
      +1776
      +1777        return self.expression(
      +1778            exp.Delete,
      +1779            this=self._parse_table(),
      +1780            using=self._parse_csv(lambda: self._match(TokenType.USING) and self._parse_table()),
      +1781            where=self._parse_where(),
      +1782            returning=self._parse_returning(),
      +1783        )
      +1784
      +1785    def _parse_update(self) -> exp.Update:
      +1786        return self.expression(
      +1787            exp.Update,
      +1788            **{  # type: ignore
      +1789                "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS),
      +1790                "expressions": self._match(TokenType.SET) and self._parse_csv(self._parse_equality),
      +1791                "from": self._parse_from(modifiers=True),
      +1792                "where": self._parse_where(),
      +1793                "returning": self._parse_returning(),
      +1794            },
      +1795        )
      +1796
      +1797    def _parse_uncache(self) -> exp.Uncache:
      +1798        if not self._match(TokenType.TABLE):
      +1799            self.raise_error("Expecting TABLE after UNCACHE")
      +1800
      +1801        return self.expression(
      +1802            exp.Uncache, exists=self._parse_exists(), this=self._parse_table(schema=True)
      +1803        )
      +1804
      +1805    def _parse_cache(self) -> exp.Cache:
      +1806        lazy = self._match_text_seq("LAZY")
      +1807        self._match(TokenType.TABLE)
      +1808        table = self._parse_table(schema=True)
      +1809
      +1810        options = []
      +1811        if self._match_text_seq("OPTIONS"):
      +1812            self._match_l_paren()
      +1813            k = self._parse_string()
      +1814            self._match(TokenType.EQ)
      +1815            v = self._parse_string()
      +1816            options = [k, v]
      +1817            self._match_r_paren()
      +1818
      +1819        self._match(TokenType.ALIAS)
      +1820        return self.expression(
      +1821            exp.Cache,
      +1822            this=table,
      +1823            lazy=lazy,
      +1824            options=options,
      +1825            expression=self._parse_select(nested=True),
      +1826        )
       1827
      -1828        self._match(TokenType.ALIAS)
      -1829        return self.expression(
      -1830            exp.Cache,
      -1831            this=table,
      -1832            lazy=lazy,
      -1833            options=options,
      -1834            expression=self._parse_select(nested=True),
      -1835        )
      -1836
      -1837    def _parse_partition(self) -> t.Optional[exp.Expression]:
      -1838        if not self._match(TokenType.PARTITION):
      -1839            return None
      -1840
      -1841        return self.expression(
      -1842            exp.Partition, expressions=self._parse_wrapped_csv(self._parse_conjunction)
      -1843        )
      -1844
      -1845    def _parse_value(self) -> exp.Expression:
      -1846        if self._match(TokenType.L_PAREN):
      -1847            expressions = self._parse_csv(self._parse_conjunction)
      -1848            self._match_r_paren()
      -1849            return self.expression(exp.Tuple, expressions=expressions)
      -1850
      -1851        # In presto we can have VALUES 1, 2 which results in 1 column & 2 rows.
      -1852        # Source: https://prestodb.io/docs/current/sql/values.html
      -1853        return self.expression(exp.Tuple, expressions=[self._parse_conjunction()])
      -1854
      -1855    def _parse_select(
      -1856        self, nested: bool = False, table: bool = False, parse_subquery_alias: bool = True
      -1857    ) -> t.Optional[exp.Expression]:
      -1858        cte = self._parse_with()
      -1859        if cte:
      -1860            this = self._parse_statement()
      -1861
      -1862            if not this:
      -1863                self.raise_error("Failed to parse any statement following CTE")
      -1864                return cte
      -1865
      -1866            if "with" in this.arg_types:
      -1867                this.set("with", cte)
      -1868            else:
      -1869                self.raise_error(f"{this.key} does not support CTE")
      -1870                this = cte
      -1871        elif self._match(TokenType.SELECT):
      -1872            comments = self._prev_comments
      -1873
      -1874            hint = self._parse_hint()
      -1875            all_ = self._match(TokenType.ALL)
      -1876            distinct = self._match(TokenType.DISTINCT)
      -1877
      -1878            kind = (
      -1879                self._match(TokenType.ALIAS)
      -1880                and self._match_texts(("STRUCT", "VALUE"))
      -1881                and self._prev.text
      -1882            )
      +1828    def _parse_partition(self) -> t.Optional[exp.Partition]:
      +1829        if not self._match(TokenType.PARTITION):
      +1830            return None
      +1831
      +1832        return self.expression(
      +1833            exp.Partition, expressions=self._parse_wrapped_csv(self._parse_conjunction)
      +1834        )
      +1835
      +1836    def _parse_value(self) -> exp.Tuple:
      +1837        if self._match(TokenType.L_PAREN):
      +1838            expressions = self._parse_csv(self._parse_conjunction)
      +1839            self._match_r_paren()
      +1840            return self.expression(exp.Tuple, expressions=expressions)
      +1841
      +1842        # In presto we can have VALUES 1, 2 which results in 1 column & 2 rows.
      +1843        # Source: https://prestodb.io/docs/current/sql/values.html
      +1844        return self.expression(exp.Tuple, expressions=[self._parse_conjunction()])
      +1845
      +1846    def _parse_select(
      +1847        self, nested: bool = False, table: bool = False, parse_subquery_alias: bool = True
      +1848    ) -> t.Optional[exp.Expression]:
      +1849        cte = self._parse_with()
      +1850        if cte:
      +1851            this = self._parse_statement()
      +1852
      +1853            if not this:
      +1854                self.raise_error("Failed to parse any statement following CTE")
      +1855                return cte
      +1856
      +1857            if "with" in this.arg_types:
      +1858                this.set("with", cte)
      +1859            else:
      +1860                self.raise_error(f"{this.key} does not support CTE")
      +1861                this = cte
      +1862        elif self._match(TokenType.SELECT):
      +1863            comments = self._prev_comments
      +1864
      +1865            hint = self._parse_hint()
      +1866            all_ = self._match(TokenType.ALL)
      +1867            distinct = self._match(TokenType.DISTINCT)
      +1868
      +1869            kind = (
      +1870                self._match(TokenType.ALIAS)
      +1871                and self._match_texts(("STRUCT", "VALUE"))
      +1872                and self._prev.text
      +1873            )
      +1874
      +1875            if distinct:
      +1876                distinct = self.expression(
      +1877                    exp.Distinct,
      +1878                    on=self._parse_value() if self._match(TokenType.ON) else None,
      +1879                )
      +1880
      +1881            if all_ and distinct:
      +1882                self.raise_error("Cannot specify both ALL and DISTINCT after SELECT")
       1883
      -1884            if distinct:
      -1885                distinct = self.expression(
      -1886                    exp.Distinct,
      -1887                    on=self._parse_value() if self._match(TokenType.ON) else None,
      -1888                )
      -1889
      -1890            if all_ and distinct:
      -1891                self.raise_error("Cannot specify both ALL and DISTINCT after SELECT")
      -1892
      -1893            limit = self._parse_limit(top=True)
      -1894            expressions = self._parse_csv(self._parse_expression)
      -1895
      -1896            this = self.expression(
      -1897                exp.Select,
      -1898                kind=kind,
      -1899                hint=hint,
      -1900                distinct=distinct,
      -1901                expressions=expressions,
      -1902                limit=limit,
      -1903            )
      -1904            this.comments = comments
      -1905
      -1906            into = self._parse_into()
      -1907            if into:
      -1908                this.set("into", into)
      -1909
      -1910            from_ = self._parse_from()
      -1911            if from_:
      -1912                this.set("from", from_)
      -1913
      -1914            this = self._parse_query_modifiers(this)
      -1915        elif (table or nested) and self._match(TokenType.L_PAREN):
      -1916            if self._match(TokenType.PIVOT):
      -1917                this = self._parse_simplified_pivot()
      -1918            elif self._match(TokenType.FROM):
      -1919                this = exp.select("*").from_(
      -1920                    t.cast(exp.From, self._parse_from(skip_from_token=True))
      -1921                )
      -1922            else:
      -1923                this = self._parse_table() if table else self._parse_select(nested=True)
      -1924                this = self._parse_set_operations(self._parse_query_modifiers(this))
      -1925
      -1926            self._match_r_paren()
      -1927
      -1928            # early return so that subquery unions aren't parsed again
      -1929            # SELECT * FROM (SELECT 1) UNION ALL SELECT 1
      -1930            # Union ALL should be a property of the top select node, not the subquery
      -1931            return self._parse_subquery(this, parse_alias=parse_subquery_alias)
      -1932        elif self._match(TokenType.VALUES):
      -1933            this = self.expression(
      -1934                exp.Values,
      -1935                expressions=self._parse_csv(self._parse_value),
      -1936                alias=self._parse_table_alias(),
      -1937            )
      -1938        else:
      -1939            this = None
      +1884            limit = self._parse_limit(top=True)
      +1885            expressions = self._parse_csv(self._parse_expression)
      +1886
      +1887            this = self.expression(
      +1888                exp.Select,
      +1889                kind=kind,
      +1890                hint=hint,
      +1891                distinct=distinct,
      +1892                expressions=expressions,
      +1893                limit=limit,
      +1894            )
      +1895            this.comments = comments
      +1896
      +1897            into = self._parse_into()
      +1898            if into:
      +1899                this.set("into", into)
      +1900
      +1901            from_ = self._parse_from()
      +1902            if from_:
      +1903                this.set("from", from_)
      +1904
      +1905            this = self._parse_query_modifiers(this)
      +1906        elif (table or nested) and self._match(TokenType.L_PAREN):
      +1907            if self._match(TokenType.PIVOT):
      +1908                this = self._parse_simplified_pivot()
      +1909            elif self._match(TokenType.FROM):
      +1910                this = exp.select("*").from_(
      +1911                    t.cast(exp.From, self._parse_from(skip_from_token=True))
      +1912                )
      +1913            else:
      +1914                this = self._parse_table() if table else self._parse_select(nested=True)
      +1915                this = self._parse_set_operations(self._parse_query_modifiers(this))
      +1916
      +1917            self._match_r_paren()
      +1918
      +1919            # early return so that subquery unions aren't parsed again
      +1920            # SELECT * FROM (SELECT 1) UNION ALL SELECT 1
      +1921            # Union ALL should be a property of the top select node, not the subquery
      +1922            return self._parse_subquery(this, parse_alias=parse_subquery_alias)
      +1923        elif self._match(TokenType.VALUES):
      +1924            this = self.expression(
      +1925                exp.Values,
      +1926                expressions=self._parse_csv(self._parse_value),
      +1927                alias=self._parse_table_alias(),
      +1928            )
      +1929        else:
      +1930            this = None
      +1931
      +1932        return self._parse_set_operations(this)
      +1933
      +1934    def _parse_with(self, skip_with_token: bool = False) -> t.Optional[exp.With]:
      +1935        if not skip_with_token and not self._match(TokenType.WITH):
      +1936            return None
      +1937
      +1938        comments = self._prev_comments
      +1939        recursive = self._match(TokenType.RECURSIVE)
       1940
      -1941        return self._parse_set_operations(this)
      -1942
      -1943    def _parse_with(self, skip_with_token: bool = False) -> t.Optional[exp.Expression]:
      -1944        if not skip_with_token and not self._match(TokenType.WITH):
      -1945            return None
      -1946
      -1947        comments = self._prev_comments
      -1948        recursive = self._match(TokenType.RECURSIVE)
      +1941        expressions = []
      +1942        while True:
      +1943            expressions.append(self._parse_cte())
      +1944
      +1945            if not self._match(TokenType.COMMA) and not self._match(TokenType.WITH):
      +1946                break
      +1947            else:
      +1948                self._match(TokenType.WITH)
       1949
      -1950        expressions = []
      -1951        while True:
      -1952            expressions.append(self._parse_cte())
      +1950        return self.expression(
      +1951            exp.With, comments=comments, expressions=expressions, recursive=recursive
      +1952        )
       1953
      -1954            if not self._match(TokenType.COMMA) and not self._match(TokenType.WITH):
      -1955                break
      -1956            else:
      -1957                self._match(TokenType.WITH)
      +1954    def _parse_cte(self) -> exp.CTE:
      +1955        alias = self._parse_table_alias()
      +1956        if not alias or not alias.this:
      +1957            self.raise_error("Expected CTE to have alias")
       1958
      -1959        return self.expression(
      -1960            exp.With, comments=comments, expressions=expressions, recursive=recursive
      -1961        )
      -1962
      -1963    def _parse_cte(self) -> exp.Expression:
      -1964        alias = self._parse_table_alias()
      -1965        if not alias or not alias.this:
      -1966            self.raise_error("Expected CTE to have alias")
      -1967
      -1968        self._match(TokenType.ALIAS)
      -1969
      -1970        return self.expression(
      -1971            exp.CTE,
      -1972            this=self._parse_wrapped(self._parse_statement),
      -1973            alias=alias,
      -1974        )
      -1975
      -1976    def _parse_table_alias(
      -1977        self, alias_tokens: t.Optional[t.Collection[TokenType]] = None
      -1978    ) -> t.Optional[exp.Expression]:
      -1979        any_token = self._match(TokenType.ALIAS)
      -1980        alias = (
      -1981            self._parse_id_var(any_token=any_token, tokens=alias_tokens or self.TABLE_ALIAS_TOKENS)
      -1982            or self._parse_string_as_identifier()
      -1983        )
      +1959        self._match(TokenType.ALIAS)
      +1960        return self.expression(
      +1961            exp.CTE, this=self._parse_wrapped(self._parse_statement), alias=alias
      +1962        )
      +1963
      +1964    def _parse_table_alias(
      +1965        self, alias_tokens: t.Optional[t.Collection[TokenType]] = None
      +1966    ) -> t.Optional[exp.TableAlias]:
      +1967        any_token = self._match(TokenType.ALIAS)
      +1968        alias = (
      +1969            self._parse_id_var(any_token=any_token, tokens=alias_tokens or self.TABLE_ALIAS_TOKENS)
      +1970            or self._parse_string_as_identifier()
      +1971        )
      +1972
      +1973        index = self._index
      +1974        if self._match(TokenType.L_PAREN):
      +1975            columns = self._parse_csv(self._parse_function_parameter)
      +1976            self._match_r_paren() if columns else self._retreat(index)
      +1977        else:
      +1978            columns = None
      +1979
      +1980        if not alias and not columns:
      +1981            return None
      +1982
      +1983        return self.expression(exp.TableAlias, this=alias, columns=columns)
       1984
      -1985        index = self._index
      -1986        if self._match(TokenType.L_PAREN):
      -1987            columns = self._parse_csv(self._parse_function_parameter)
      -1988            self._match_r_paren() if columns else self._retreat(index)
      -1989        else:
      -1990            columns = None
      -1991
      -1992        if not alias and not columns:
      -1993            return None
      -1994
      -1995        return self.expression(exp.TableAlias, this=alias, columns=columns)
      -1996
      -1997    def _parse_subquery(
      -1998        self, this: t.Optional[exp.Expression], parse_alias: bool = True
      -1999    ) -> t.Optional[exp.Expression]:
      -2000        if not this:
      -2001            return None
      -2002        return self.expression(
      -2003            exp.Subquery,
      -2004            this=this,
      -2005            pivots=self._parse_pivots(),
      -2006            alias=self._parse_table_alias() if parse_alias else None,
      -2007        )
      -2008
      -2009    def _parse_query_modifiers(
      -2010        self, this: t.Optional[exp.Expression]
      -2011    ) -> t.Optional[exp.Expression]:
      -2012        if isinstance(this, self.MODIFIABLES):
      -2013            for key, parser in self.QUERY_MODIFIER_PARSERS.items():
      -2014                expression = parser(self)
      -2015
      -2016                if expression:
      -2017                    this.set(key, expression)
      -2018        return this
      +1985    def _parse_subquery(
      +1986        self, this: t.Optional[exp.Expression], parse_alias: bool = True
      +1987    ) -> t.Optional[exp.Subquery]:
      +1988        if not this:
      +1989            return None
      +1990
      +1991        return self.expression(
      +1992            exp.Subquery,
      +1993            this=this,
      +1994            pivots=self._parse_pivots(),
      +1995            alias=self._parse_table_alias() if parse_alias else None,
      +1996        )
      +1997
      +1998    def _parse_query_modifiers(
      +1999        self, this: t.Optional[exp.Expression]
      +2000    ) -> t.Optional[exp.Expression]:
      +2001        if isinstance(this, self.MODIFIABLES):
      +2002            for key, parser in self.QUERY_MODIFIER_PARSERS.items():
      +2003                expression = parser(self)
      +2004
      +2005                if expression:
      +2006                    if key == "limit":
      +2007                        offset = expression.args.pop("offset", None)
      +2008                        if offset:
      +2009                            this.set("offset", exp.Offset(expression=offset))
      +2010                    this.set(key, expression)
      +2011        return this
      +2012
      +2013    def _parse_hint(self) -> t.Optional[exp.Hint]:
      +2014        if self._match(TokenType.HINT):
      +2015            hints = self._parse_csv(self._parse_function)
      +2016
      +2017            if not self._match_pair(TokenType.STAR, TokenType.SLASH):
      +2018                self.raise_error("Expected */ after HINT")
       2019
      -2020    def _parse_hint(self) -> t.Optional[exp.Expression]:
      -2021        if self._match(TokenType.HINT):
      -2022            hints = self._parse_csv(self._parse_function)
      -2023            if not self._match_pair(TokenType.STAR, TokenType.SLASH):
      -2024                self.raise_error("Expected */ after HINT")
      -2025            return self.expression(exp.Hint, expressions=hints)
      -2026
      -2027        return None
      -2028
      -2029    def _parse_into(self) -> t.Optional[exp.Expression]:
      -2030        if not self._match(TokenType.INTO):
      -2031            return None
      -2032
      -2033        temp = self._match(TokenType.TEMPORARY)
      -2034        unlogged = self._match_text_seq("UNLOGGED")
      -2035        self._match(TokenType.TABLE)
      -2036
      -2037        return self.expression(
      -2038            exp.Into, this=self._parse_table(schema=True), temporary=temp, unlogged=unlogged
      -2039        )
      -2040
      -2041    def _parse_from(
      -2042        self, modifiers: bool = False, skip_from_token: bool = False
      -2043    ) -> t.Optional[exp.From]:
      -2044        if not skip_from_token and not self._match(TokenType.FROM):
      -2045            return None
      -2046
      -2047        comments = self._prev_comments
      -2048        this = self._parse_table()
      -2049
      -2050        return self.expression(
      -2051            exp.From,
      -2052            comments=comments,
      -2053            this=self._parse_query_modifiers(this) if modifiers else this,
      -2054        )
      -2055
      -2056    def _parse_match_recognize(self) -> t.Optional[exp.Expression]:
      -2057        if not self._match(TokenType.MATCH_RECOGNIZE):
      -2058            return None
      -2059
      -2060        self._match_l_paren()
      -2061
      -2062        partition = self._parse_partition_by()
      -2063        order = self._parse_order()
      -2064        measures = (
      -2065            self._parse_csv(self._parse_expression) if self._match_text_seq("MEASURES") else None
      -2066        )
      -2067
      -2068        if self._match_text_seq("ONE", "ROW", "PER", "MATCH"):
      -2069            rows = exp.Var(this="ONE ROW PER MATCH")
      -2070        elif self._match_text_seq("ALL", "ROWS", "PER", "MATCH"):
      -2071            text = "ALL ROWS PER MATCH"
      -2072            if self._match_text_seq("SHOW", "EMPTY", "MATCHES"):
      -2073                text += f" SHOW EMPTY MATCHES"
      -2074            elif self._match_text_seq("OMIT", "EMPTY", "MATCHES"):
      -2075                text += f" OMIT EMPTY MATCHES"
      -2076            elif self._match_text_seq("WITH", "UNMATCHED", "ROWS"):
      -2077                text += f" WITH UNMATCHED ROWS"
      -2078            rows = exp.Var(this=text)
      -2079        else:
      -2080            rows = None
      -2081
      -2082        if self._match_text_seq("AFTER", "MATCH", "SKIP"):
      -2083            text = "AFTER MATCH SKIP"
      -2084            if self._match_text_seq("PAST", "LAST", "ROW"):
      -2085                text += f" PAST LAST ROW"
      -2086            elif self._match_text_seq("TO", "NEXT", "ROW"):
      -2087                text += f" TO NEXT ROW"
      -2088            elif self._match_text_seq("TO", "FIRST"):
      -2089                text += f" TO FIRST {self._advance_any().text}"  # type: ignore
      -2090            elif self._match_text_seq("TO", "LAST"):
      -2091                text += f" TO LAST {self._advance_any().text}"  # type: ignore
      -2092            after = exp.Var(this=text)
      -2093        else:
      -2094            after = None
      -2095
      -2096        if self._match_text_seq("PATTERN"):
      -2097            self._match_l_paren()
      -2098
      -2099            if not self._curr:
      -2100                self.raise_error("Expecting )", self._curr)
      -2101
      -2102            paren = 1
      -2103            start = self._curr
      -2104
      -2105            while self._curr and paren > 0:
      -2106                if self._curr.token_type == TokenType.L_PAREN:
      -2107                    paren += 1
      -2108                if self._curr.token_type == TokenType.R_PAREN:
      -2109                    paren -= 1
      -2110                end = self._prev
      -2111                self._advance()
      -2112            if paren > 0:
      -2113                self.raise_error("Expecting )", self._curr)
      -2114            pattern = exp.Var(this=self._find_sql(start, end))
      -2115        else:
      -2116            pattern = None
      -2117
      -2118        define = (
      -2119            self._parse_csv(
      -2120                lambda: self.expression(
      -2121                    exp.Alias,
      -2122                    alias=self._parse_id_var(any_token=True),
      -2123                    this=self._match(TokenType.ALIAS) and self._parse_conjunction(),
      -2124                )
      -2125            )
      -2126            if self._match_text_seq("DEFINE")
      -2127            else None
      -2128        )
      +2020            return self.expression(exp.Hint, expressions=hints)
      +2021
      +2022        return None
      +2023
      +2024    def _parse_into(self) -> t.Optional[exp.Into]:
      +2025        if not self._match(TokenType.INTO):
      +2026            return None
      +2027
      +2028        temp = self._match(TokenType.TEMPORARY)
      +2029        unlogged = self._match_text_seq("UNLOGGED")
      +2030        self._match(TokenType.TABLE)
      +2031
      +2032        return self.expression(
      +2033            exp.Into, this=self._parse_table(schema=True), temporary=temp, unlogged=unlogged
      +2034        )
      +2035
      +2036    def _parse_from(
      +2037        self, modifiers: bool = False, skip_from_token: bool = False
      +2038    ) -> t.Optional[exp.From]:
      +2039        if not skip_from_token and not self._match(TokenType.FROM):
      +2040            return None
      +2041
      +2042        comments = self._prev_comments
      +2043        this = self._parse_table()
      +2044
      +2045        return self.expression(
      +2046            exp.From,
      +2047            comments=comments,
      +2048            this=self._parse_query_modifiers(this) if modifiers else this,
      +2049        )
      +2050
      +2051    def _parse_match_recognize(self) -> t.Optional[exp.MatchRecognize]:
      +2052        if not self._match(TokenType.MATCH_RECOGNIZE):
      +2053            return None
      +2054
      +2055        self._match_l_paren()
      +2056
      +2057        partition = self._parse_partition_by()
      +2058        order = self._parse_order()
      +2059        measures = (
      +2060            self._parse_csv(self._parse_expression) if self._match_text_seq("MEASURES") else None
      +2061        )
      +2062
      +2063        if self._match_text_seq("ONE", "ROW", "PER", "MATCH"):
      +2064            rows = exp.var("ONE ROW PER MATCH")
      +2065        elif self._match_text_seq("ALL", "ROWS", "PER", "MATCH"):
      +2066            text = "ALL ROWS PER MATCH"
      +2067            if self._match_text_seq("SHOW", "EMPTY", "MATCHES"):
      +2068                text += f" SHOW EMPTY MATCHES"
      +2069            elif self._match_text_seq("OMIT", "EMPTY", "MATCHES"):
      +2070                text += f" OMIT EMPTY MATCHES"
      +2071            elif self._match_text_seq("WITH", "UNMATCHED", "ROWS"):
      +2072                text += f" WITH UNMATCHED ROWS"
      +2073            rows = exp.var(text)
      +2074        else:
      +2075            rows = None
      +2076
      +2077        if self._match_text_seq("AFTER", "MATCH", "SKIP"):
      +2078            text = "AFTER MATCH SKIP"
      +2079            if self._match_text_seq("PAST", "LAST", "ROW"):
      +2080                text += f" PAST LAST ROW"
      +2081            elif self._match_text_seq("TO", "NEXT", "ROW"):
      +2082                text += f" TO NEXT ROW"
      +2083            elif self._match_text_seq("TO", "FIRST"):
      +2084                text += f" TO FIRST {self._advance_any().text}"  # type: ignore
      +2085            elif self._match_text_seq("TO", "LAST"):
      +2086                text += f" TO LAST {self._advance_any().text}"  # type: ignore
      +2087            after = exp.var(text)
      +2088        else:
      +2089            after = None
      +2090
      +2091        if self._match_text_seq("PATTERN"):
      +2092            self._match_l_paren()
      +2093
      +2094            if not self._curr:
      +2095                self.raise_error("Expecting )", self._curr)
      +2096
      +2097            paren = 1
      +2098            start = self._curr
      +2099
      +2100            while self._curr and paren > 0:
      +2101                if self._curr.token_type == TokenType.L_PAREN:
      +2102                    paren += 1
      +2103                if self._curr.token_type == TokenType.R_PAREN:
      +2104                    paren -= 1
      +2105
      +2106                end = self._prev
      +2107                self._advance()
      +2108
      +2109            if paren > 0:
      +2110                self.raise_error("Expecting )", self._curr)
      +2111
      +2112            pattern = exp.var(self._find_sql(start, end))
      +2113        else:
      +2114            pattern = None
      +2115
      +2116        define = (
      +2117            self._parse_csv(
      +2118                lambda: self.expression(
      +2119                    exp.Alias,
      +2120                    alias=self._parse_id_var(any_token=True),
      +2121                    this=self._match(TokenType.ALIAS) and self._parse_conjunction(),
      +2122                )
      +2123            )
      +2124            if self._match_text_seq("DEFINE")
      +2125            else None
      +2126        )
      +2127
      +2128        self._match_r_paren()
       2129
      -2130        self._match_r_paren()
      -2131
      -2132        return self.expression(
      -2133            exp.MatchRecognize,
      -2134            partition_by=partition,
      -2135            order=order,
      -2136            measures=measures,
      -2137            rows=rows,
      -2138            after=after,
      -2139            pattern=pattern,
      -2140            define=define,
      -2141            alias=self._parse_table_alias(),
      -2142        )
      -2143
      -2144    def _parse_lateral(self) -> t.Optional[exp.Expression]:
      -2145        outer_apply = self._match_pair(TokenType.OUTER, TokenType.APPLY)
      -2146        cross_apply = self._match_pair(TokenType.CROSS, TokenType.APPLY)
      -2147
      -2148        if outer_apply or cross_apply:
      -2149            this = self._parse_select(table=True)
      -2150            view = None
      -2151            outer = not cross_apply
      -2152        elif self._match(TokenType.LATERAL):
      -2153            this = self._parse_select(table=True)
      -2154            view = self._match(TokenType.VIEW)
      -2155            outer = self._match(TokenType.OUTER)
      -2156        else:
      -2157            return None
      -2158
      -2159        if not this:
      -2160            this = self._parse_function() or self._parse_id_var(any_token=False)
      -2161            while self._match(TokenType.DOT):
      -2162                this = exp.Dot(
      -2163                    this=this,
      -2164                    expression=self._parse_function() or self._parse_id_var(any_token=False),
      -2165                )
      -2166
      -2167        table_alias: t.Optional[exp.Expression]
      -2168
      -2169        if view:
      -2170            table = self._parse_id_var(any_token=False)
      -2171            columns = self._parse_csv(self._parse_id_var) if self._match(TokenType.ALIAS) else []
      -2172            table_alias = self.expression(exp.TableAlias, this=table, columns=columns)
      -2173        else:
      -2174            table_alias = self._parse_table_alias()
      -2175
      -2176        expression = self.expression(
      -2177            exp.Lateral,
      -2178            this=this,
      -2179            view=view,
      -2180            outer=outer,
      -2181            alias=table_alias,
      -2182        )
      -2183
      -2184        return expression
      -2185
      -2186    def _parse_join_parts(
      -2187        self,
      -2188    ) -> t.Tuple[t.Optional[Token], t.Optional[Token], t.Optional[Token]]:
      -2189        return (
      -2190            self._match_set(self.JOIN_METHODS) and self._prev,
      -2191            self._match_set(self.JOIN_SIDES) and self._prev,
      -2192            self._match_set(self.JOIN_KINDS) and self._prev,
      -2193        )
      -2194
      -2195    def _parse_join(self, skip_join_token: bool = False) -> t.Optional[exp.Expression]:
      -2196        if self._match(TokenType.COMMA):
      -2197            return self.expression(exp.Join, this=self._parse_table())
      -2198
      -2199        index = self._index
      -2200        method, side, kind = self._parse_join_parts()
      -2201        hint = self._prev.text if self._match_texts(self.JOIN_HINTS) else None
      -2202        join = self._match(TokenType.JOIN)
      -2203
      -2204        if not skip_join_token and not join:
      -2205            self._retreat(index)
      -2206            kind = None
      -2207            method = None
      -2208            side = None
      -2209
      -2210        outer_apply = self._match_pair(TokenType.OUTER, TokenType.APPLY, False)
      -2211        cross_apply = self._match_pair(TokenType.CROSS, TokenType.APPLY, False)
      -2212
      -2213        if not skip_join_token and not join and not outer_apply and not cross_apply:
      -2214            return None
      -2215
      -2216        if outer_apply:
      -2217            side = Token(TokenType.LEFT, "LEFT")
      -2218
      -2219        kwargs: t.Dict[str, t.Any] = {"this": self._parse_table()}
      -2220
      -2221        if method:
      -2222            kwargs["method"] = method.text
      -2223        if side:
      -2224            kwargs["side"] = side.text
      -2225        if kind:
      -2226            kwargs["kind"] = kind.text
      -2227        if hint:
      -2228            kwargs["hint"] = hint
      +2130        return self.expression(
      +2131            exp.MatchRecognize,
      +2132            partition_by=partition,
      +2133            order=order,
      +2134            measures=measures,
      +2135            rows=rows,
      +2136            after=after,
      +2137            pattern=pattern,
      +2138            define=define,
      +2139            alias=self._parse_table_alias(),
      +2140        )
      +2141
      +2142    def _parse_lateral(self) -> t.Optional[exp.Lateral]:
      +2143        outer_apply = self._match_pair(TokenType.OUTER, TokenType.APPLY)
      +2144        cross_apply = self._match_pair(TokenType.CROSS, TokenType.APPLY)
      +2145
      +2146        if outer_apply or cross_apply:
      +2147            this = self._parse_select(table=True)
      +2148            view = None
      +2149            outer = not cross_apply
      +2150        elif self._match(TokenType.LATERAL):
      +2151            this = self._parse_select(table=True)
      +2152            view = self._match(TokenType.VIEW)
      +2153            outer = self._match(TokenType.OUTER)
      +2154        else:
      +2155            return None
      +2156
      +2157        if not this:
      +2158            this = self._parse_function() or self._parse_id_var(any_token=False)
      +2159            while self._match(TokenType.DOT):
      +2160                this = exp.Dot(
      +2161                    this=this,
      +2162                    expression=self._parse_function() or self._parse_id_var(any_token=False),
      +2163                )
      +2164
      +2165        if view:
      +2166            table = self._parse_id_var(any_token=False)
      +2167            columns = self._parse_csv(self._parse_id_var) if self._match(TokenType.ALIAS) else []
      +2168            table_alias: t.Optional[exp.TableAlias] = self.expression(
      +2169                exp.TableAlias, this=table, columns=columns
      +2170            )
      +2171        elif isinstance(this, exp.Subquery) and this.alias:
      +2172            # Ensures parity between the Subquery's and the Lateral's "alias" args
      +2173            table_alias = this.args["alias"].copy()
      +2174        else:
      +2175            table_alias = self._parse_table_alias()
      +2176
      +2177        return self.expression(exp.Lateral, this=this, view=view, outer=outer, alias=table_alias)
      +2178
      +2179    def _parse_join_parts(
      +2180        self,
      +2181    ) -> t.Tuple[t.Optional[Token], t.Optional[Token], t.Optional[Token]]:
      +2182        return (
      +2183            self._match_set(self.JOIN_METHODS) and self._prev,
      +2184            self._match_set(self.JOIN_SIDES) and self._prev,
      +2185            self._match_set(self.JOIN_KINDS) and self._prev,
      +2186        )
      +2187
      +2188    def _parse_join(self, skip_join_token: bool = False) -> t.Optional[exp.Join]:
      +2189        if self._match(TokenType.COMMA):
      +2190            return self.expression(exp.Join, this=self._parse_table())
      +2191
      +2192        index = self._index
      +2193        method, side, kind = self._parse_join_parts()
      +2194        hint = self._prev.text if self._match_texts(self.JOIN_HINTS) else None
      +2195        join = self._match(TokenType.JOIN)
      +2196
      +2197        if not skip_join_token and not join:
      +2198            self._retreat(index)
      +2199            kind = None
      +2200            method = None
      +2201            side = None
      +2202
      +2203        outer_apply = self._match_pair(TokenType.OUTER, TokenType.APPLY, False)
      +2204        cross_apply = self._match_pair(TokenType.CROSS, TokenType.APPLY, False)
      +2205
      +2206        if not skip_join_token and not join and not outer_apply and not cross_apply:
      +2207            return None
      +2208
      +2209        if outer_apply:
      +2210            side = Token(TokenType.LEFT, "LEFT")
      +2211
      +2212        kwargs: t.Dict[str, t.Any] = {"this": self._parse_table()}
      +2213
      +2214        if method:
      +2215            kwargs["method"] = method.text
      +2216        if side:
      +2217            kwargs["side"] = side.text
      +2218        if kind:
      +2219            kwargs["kind"] = kind.text
      +2220        if hint:
      +2221            kwargs["hint"] = hint
      +2222
      +2223        if self._match(TokenType.ON):
      +2224            kwargs["on"] = self._parse_conjunction()
      +2225        elif self._match(TokenType.USING):
      +2226            kwargs["using"] = self._parse_wrapped_id_vars()
      +2227
      +2228        return self.expression(exp.Join, **kwargs)
       2229
      -2230        if self._match(TokenType.ON):
      -2231            kwargs["on"] = self._parse_conjunction()
      -2232        elif self._match(TokenType.USING):
      -2233            kwargs["using"] = self._parse_wrapped_id_vars()
      -2234
      -2235        return self.expression(exp.Join, **kwargs)
      -2236
      -2237    def _parse_index(
      -2238        self,
      -2239        index: t.Optional[exp.Expression] = None,
      -2240    ) -> t.Optional[exp.Expression]:
      -2241        if index:
      -2242            unique = None
      -2243            primary = None
      -2244            amp = None
      -2245
      -2246            self._match(TokenType.ON)
      -2247            self._match(TokenType.TABLE)  # hive
      -2248            table = self._parse_table_parts(schema=True)
      -2249        else:
      -2250            unique = self._match(TokenType.UNIQUE)
      -2251            primary = self._match_text_seq("PRIMARY")
      -2252            amp = self._match_text_seq("AMP")
      -2253            if not self._match(TokenType.INDEX):
      -2254                return None
      -2255            index = self._parse_id_var()
      -2256            table = None
      -2257
      -2258        if self._match(TokenType.L_PAREN, advance=False):
      -2259            columns = self._parse_wrapped_csv(self._parse_ordered)
      -2260        else:
      -2261            columns = None
      -2262
      -2263        return self.expression(
      -2264            exp.Index,
      -2265            this=index,
      -2266            table=table,
      -2267            columns=columns,
      -2268            unique=unique,
      -2269            primary=primary,
      -2270            amp=amp,
      -2271            partition_by=self._parse_partition_by(),
      -2272        )
      -2273
      -2274    def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
      -2275        return (
      -2276            (not schema and self._parse_function())
      -2277            or self._parse_id_var(any_token=False)
      -2278            or self._parse_string_as_identifier()
      -2279            or self._parse_placeholder()
      -2280        )
      -2281
      -2282    def _parse_table_parts(self, schema: bool = False) -> exp.Table:
      -2283        catalog = None
      -2284        db = None
      -2285        table = self._parse_table_part(schema=schema)
      -2286
      -2287        while self._match(TokenType.DOT):
      -2288            if catalog:
      -2289                # This allows nesting the table in arbitrarily many dot expressions if needed
      -2290                table = self.expression(
      -2291                    exp.Dot, this=table, expression=self._parse_table_part(schema=schema)
      -2292                )
      -2293            else:
      -2294                catalog = db
      -2295                db = table
      -2296                table = self._parse_table_part(schema=schema)
      -2297
      -2298        if not table:
      -2299            self.raise_error(f"Expected table name but got {self._curr}")
      -2300
      -2301        return self.expression(
      -2302            exp.Table, this=table, db=db, catalog=catalog, pivots=self._parse_pivots()
      -2303        )
      -2304
      -2305    def _parse_table(
      -2306        self, schema: bool = False, alias_tokens: t.Optional[t.Collection[TokenType]] = None
      -2307    ) -> t.Optional[exp.Expression]:
      -2308        lateral = self._parse_lateral()
      -2309        if lateral:
      -2310            return lateral
      -2311
      -2312        unnest = self._parse_unnest()
      -2313        if unnest:
      -2314            return unnest
      -2315
      -2316        values = self._parse_derived_table_values()
      -2317        if values:
      -2318            return values
      -2319
      -2320        subquery = self._parse_select(table=True)
      -2321        if subquery:
      -2322            if not subquery.args.get("pivots"):
      -2323                subquery.set("pivots", self._parse_pivots())
      -2324            return subquery
      +2230    def _parse_index(
      +2231        self,
      +2232        index: t.Optional[exp.Expression] = None,
      +2233    ) -> t.Optional[exp.Index]:
      +2234        if index:
      +2235            unique = None
      +2236            primary = None
      +2237            amp = None
      +2238
      +2239            self._match(TokenType.ON)
      +2240            self._match(TokenType.TABLE)  # hive
      +2241            table = self._parse_table_parts(schema=True)
      +2242        else:
      +2243            unique = self._match(TokenType.UNIQUE)
      +2244            primary = self._match_text_seq("PRIMARY")
      +2245            amp = self._match_text_seq("AMP")
      +2246
      +2247            if not self._match(TokenType.INDEX):
      +2248                return None
      +2249
      +2250            index = self._parse_id_var()
      +2251            table = None
      +2252
      +2253        using = self._parse_field() if self._match(TokenType.USING) else None
      +2254
      +2255        if self._match(TokenType.L_PAREN, advance=False):
      +2256            columns = self._parse_wrapped_csv(self._parse_ordered)
      +2257        else:
      +2258            columns = None
      +2259
      +2260        return self.expression(
      +2261            exp.Index,
      +2262            this=index,
      +2263            table=table,
      +2264            using=using,
      +2265            columns=columns,
      +2266            unique=unique,
      +2267            primary=primary,
      +2268            amp=amp,
      +2269            partition_by=self._parse_partition_by(),
      +2270        )
      +2271
      +2272    def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
      +2273        return (
      +2274            (not schema and self._parse_function(optional_parens=False))
      +2275            or self._parse_id_var(any_token=False)
      +2276            or self._parse_string_as_identifier()
      +2277            or self._parse_placeholder()
      +2278        )
      +2279
      +2280    def _parse_table_parts(self, schema: bool = False) -> exp.Table:
      +2281        catalog = None
      +2282        db = None
      +2283        table = self._parse_table_part(schema=schema)
      +2284
      +2285        while self._match(TokenType.DOT):
      +2286            if catalog:
      +2287                # This allows nesting the table in arbitrarily many dot expressions if needed
      +2288                table = self.expression(
      +2289                    exp.Dot, this=table, expression=self._parse_table_part(schema=schema)
      +2290                )
      +2291            else:
      +2292                catalog = db
      +2293                db = table
      +2294                table = self._parse_table_part(schema=schema)
      +2295
      +2296        if not table:
      +2297            self.raise_error(f"Expected table name but got {self._curr}")
      +2298
      +2299        return self.expression(
      +2300            exp.Table, this=table, db=db, catalog=catalog, pivots=self._parse_pivots()
      +2301        )
      +2302
      +2303    def _parse_table(
      +2304        self, schema: bool = False, alias_tokens: t.Optional[t.Collection[TokenType]] = None
      +2305    ) -> t.Optional[exp.Expression]:
      +2306        lateral = self._parse_lateral()
      +2307        if lateral:
      +2308            return lateral
      +2309
      +2310        unnest = self._parse_unnest()
      +2311        if unnest:
      +2312            return unnest
      +2313
      +2314        values = self._parse_derived_table_values()
      +2315        if values:
      +2316            return values
      +2317
      +2318        subquery = self._parse_select(table=True)
      +2319        if subquery:
      +2320            if not subquery.args.get("pivots"):
      +2321                subquery.set("pivots", self._parse_pivots())
      +2322            return subquery
      +2323
      +2324        this: exp.Expression = self._parse_table_parts(schema=schema)
       2325
      -2326        this: exp.Expression = self._parse_table_parts(schema=schema)
      -2327
      -2328        if schema:
      -2329            return self._parse_schema(this=this)
      -2330
      -2331        if self.alias_post_tablesample:
      -2332            table_sample = self._parse_table_sample()
      -2333
      -2334        alias = self._parse_table_alias(alias_tokens=alias_tokens or self.TABLE_ALIAS_TOKENS)
      -2335        if alias:
      -2336            this.set("alias", alias)
      -2337
      -2338        if not this.args.get("pivots"):
      -2339            this.set("pivots", self._parse_pivots())
      -2340
      -2341        if self._match_pair(TokenType.WITH, TokenType.L_PAREN):
      -2342            this.set(
      -2343                "hints",
      -2344                self._parse_csv(lambda: self._parse_function() or self._parse_var(any_token=True)),
      -2345            )
      -2346            self._match_r_paren()
      -2347
      -2348        if not self.alias_post_tablesample:
      -2349            table_sample = self._parse_table_sample()
      -2350
      -2351        if table_sample:
      -2352            table_sample.set("this", this)
      -2353            this = table_sample
      +2326        if schema:
      +2327            return self._parse_schema(this=this)
      +2328
      +2329        if self.ALIAS_POST_TABLESAMPLE:
      +2330            table_sample = self._parse_table_sample()
      +2331
      +2332        alias = self._parse_table_alias(alias_tokens=alias_tokens or self.TABLE_ALIAS_TOKENS)
      +2333        if alias:
      +2334            this.set("alias", alias)
      +2335
      +2336        if not this.args.get("pivots"):
      +2337            this.set("pivots", self._parse_pivots())
      +2338
      +2339        if self._match_pair(TokenType.WITH, TokenType.L_PAREN):
      +2340            this.set(
      +2341                "hints",
      +2342                self._parse_csv(lambda: self._parse_function() or self._parse_var(any_token=True)),
      +2343            )
      +2344            self._match_r_paren()
      +2345
      +2346        if not self.ALIAS_POST_TABLESAMPLE:
      +2347            table_sample = self._parse_table_sample()
      +2348
      +2349        if table_sample:
      +2350            table_sample.set("this", this)
      +2351            this = table_sample
      +2352
      +2353        return this
       2354
      -2355        return this
      -2356
      -2357    def _parse_unnest(self) -> t.Optional[exp.Expression]:
      -2358        if not self._match(TokenType.UNNEST):
      -2359            return None
      -2360
      -2361        expressions = self._parse_wrapped_csv(self._parse_type)
      -2362        ordinality = self._match_pair(TokenType.WITH, TokenType.ORDINALITY)
      -2363        alias = self._parse_table_alias()
      -2364
      -2365        if alias and self.unnest_column_only:
      -2366            if alias.args.get("columns"):
      -2367                self.raise_error("Unexpected extra column alias in unnest.")
      +2355    def _parse_unnest(self, with_alias: bool = True) -> t.Optional[exp.Unnest]:
      +2356        if not self._match(TokenType.UNNEST):
      +2357            return None
      +2358
      +2359        expressions = self._parse_wrapped_csv(self._parse_type)
      +2360        ordinality = self._match_pair(TokenType.WITH, TokenType.ORDINALITY)
      +2361
      +2362        alias = self._parse_table_alias() if with_alias else None
      +2363
      +2364        if alias and self.UNNEST_COLUMN_ONLY:
      +2365            if alias.args.get("columns"):
      +2366                self.raise_error("Unexpected extra column alias in unnest.")
      +2367
       2368            alias.set("columns", [alias.this])
       2369            alias.set("this", None)
       2370
       2371        offset = None
       2372        if self._match_pair(TokenType.WITH, TokenType.OFFSET):
       2373            self._match(TokenType.ALIAS)
      -2374            offset = self._parse_id_var() or exp.Identifier(this="offset")
      +2374            offset = self._parse_id_var() or exp.to_identifier("offset")
       2375
       2376        return self.expression(
      -2377            exp.Unnest,
      -2378            expressions=expressions,
      -2379            ordinality=ordinality,
      -2380            alias=alias,
      -2381            offset=offset,
      -2382        )
      -2383
      -2384    def _parse_derived_table_values(self) -> t.Optional[exp.Expression]:
      -2385        is_derived = self._match_pair(TokenType.L_PAREN, TokenType.VALUES)
      -2386        if not is_derived and not self._match(TokenType.VALUES):
      -2387            return None
      -2388
      -2389        expressions = self._parse_csv(self._parse_value)
      +2377            exp.Unnest, expressions=expressions, ordinality=ordinality, alias=alias, offset=offset
      +2378        )
      +2379
      +2380    def _parse_derived_table_values(self) -> t.Optional[exp.Values]:
      +2381        is_derived = self._match_pair(TokenType.L_PAREN, TokenType.VALUES)
      +2382        if not is_derived and not self._match(TokenType.VALUES):
      +2383            return None
      +2384
      +2385        expressions = self._parse_csv(self._parse_value)
      +2386        alias = self._parse_table_alias()
      +2387
      +2388        if is_derived:
      +2389            self._match_r_paren()
       2390
      -2391        if is_derived:
      -2392            self._match_r_paren()
      -2393
      -2394        return self.expression(exp.Values, expressions=expressions, alias=self._parse_table_alias())
      -2395
      -2396    def _parse_table_sample(self, as_modifier: bool = False) -> t.Optional[exp.Expression]:
      -2397        if not self._match(TokenType.TABLE_SAMPLE) and not (
      -2398            as_modifier and self._match_text_seq("USING", "SAMPLE")
      -2399        ):
      -2400            return None
      -2401
      -2402        bucket_numerator = None
      -2403        bucket_denominator = None
      -2404        bucket_field = None
      -2405        percent = None
      -2406        rows = None
      -2407        size = None
      -2408        seed = None
      -2409
      -2410        kind = (
      -2411            self._prev.text if self._prev.token_type == TokenType.TABLE_SAMPLE else "USING SAMPLE"
      -2412        )
      -2413        method = self._parse_var(tokens=(TokenType.ROW,))
      -2414
      -2415        self._match(TokenType.L_PAREN)
      -2416
      -2417        num = self._parse_number()
      -2418
      -2419        if self._match_text_seq("BUCKET"):
      -2420            bucket_numerator = self._parse_number()
      -2421            self._match_text_seq("OUT", "OF")
      -2422            bucket_denominator = bucket_denominator = self._parse_number()
      -2423            self._match(TokenType.ON)
      -2424            bucket_field = self._parse_field()
      -2425        elif self._match_set((TokenType.PERCENT, TokenType.MOD)):
      -2426            percent = num
      -2427        elif self._match(TokenType.ROWS):
      -2428            rows = num
      -2429        else:
      -2430            size = num
      -2431
      -2432        self._match(TokenType.R_PAREN)
      -2433
      -2434        if self._match(TokenType.L_PAREN):
      -2435            method = self._parse_var()
      -2436            seed = self._match(TokenType.COMMA) and self._parse_number()
      -2437            self._match_r_paren()
      -2438        elif self._match_texts(("SEED", "REPEATABLE")):
      -2439            seed = self._parse_wrapped(self._parse_number)
      -2440
      -2441        return self.expression(
      -2442            exp.TableSample,
      -2443            method=method,
      -2444            bucket_numerator=bucket_numerator,
      -2445            bucket_denominator=bucket_denominator,
      -2446            bucket_field=bucket_field,
      -2447            percent=percent,
      -2448            rows=rows,
      -2449            size=size,
      -2450            seed=seed,
      -2451            kind=kind,
      -2452        )
      -2453
      -2454    def _parse_pivots(self) -> t.List[t.Optional[exp.Expression]]:
      -2455        return list(iter(self._parse_pivot, None))
      -2456
      -2457    # https://duckdb.org/docs/sql/statements/pivot
      -2458    def _parse_simplified_pivot(self) -> exp.Pivot:
      -2459        def _parse_on() -> t.Optional[exp.Expression]:
      -2460            this = self._parse_bitwise()
      -2461            return self._parse_in(this) if self._match(TokenType.IN) else this
      -2462
      -2463        this = self._parse_table()
      -2464        expressions = self._match(TokenType.ON) and self._parse_csv(_parse_on)
      -2465        using = self._match(TokenType.USING) and self._parse_csv(
      -2466            lambda: self._parse_alias(self._parse_function())
      -2467        )
      -2468        group = self._parse_group()
      -2469        return self.expression(
      -2470            exp.Pivot, this=this, expressions=expressions, using=using, group=group
      -2471        )
      -2472
      -2473    def _parse_pivot(self) -> t.Optional[exp.Expression]:
      -2474        index = self._index
      -2475
      -2476        if self._match(TokenType.PIVOT):
      -2477            unpivot = False
      -2478        elif self._match(TokenType.UNPIVOT):
      -2479            unpivot = True
      -2480        else:
      -2481            return None
      -2482
      -2483        expressions = []
      -2484        field = None
      -2485
      -2486        if not self._match(TokenType.L_PAREN):
      -2487            self._retreat(index)
      -2488            return None
      -2489
      -2490        if unpivot:
      -2491            expressions = self._parse_csv(self._parse_column)
      -2492        else:
      -2493            expressions = self._parse_csv(lambda: self._parse_alias(self._parse_function()))
      -2494
      -2495        if not expressions:
      -2496            self.raise_error("Failed to parse PIVOT's aggregation list")
      -2497
      -2498        if not self._match(TokenType.FOR):
      -2499            self.raise_error("Expecting FOR")
      -2500
      -2501        value = self._parse_column()
      -2502
      -2503        if not self._match(TokenType.IN):
      -2504            self.raise_error("Expecting IN")
      -2505
      -2506        field = self._parse_in(value, alias=True)
      -2507
      -2508        self._match_r_paren()
      -2509
      -2510        pivot = self.expression(exp.Pivot, expressions=expressions, field=field, unpivot=unpivot)
      -2511
      -2512        if not self._match_set((TokenType.PIVOT, TokenType.UNPIVOT), advance=False):
      -2513            pivot.set("alias", self._parse_table_alias())
      -2514
      -2515        if not unpivot:
      -2516            names = self._pivot_column_names(t.cast(t.List[exp.Expression], expressions))
      -2517
      -2518            columns: t.List[exp.Expression] = []
      -2519            for fld in pivot.args["field"].expressions:
      -2520                field_name = fld.sql() if self.IDENTIFY_PIVOT_STRINGS else fld.alias_or_name
      -2521                for name in names:
      -2522                    if self.PREFIXED_PIVOT_COLUMNS:
      -2523                        name = f"{name}_{field_name}" if name else field_name
      -2524                    else:
      -2525                        name = f"{field_name}_{name}" if name else field_name
      -2526
      -2527                    columns.append(exp.to_identifier(name))
      -2528
      -2529            pivot.set("columns", columns)
      -2530
      -2531        return pivot
      -2532
      -2533    def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]:
      -2534        return [agg.alias for agg in aggregations]
      -2535
      -2536    def _parse_where(self, skip_where_token: bool = False) -> t.Optional[exp.Expression]:
      -2537        if not skip_where_token and not self._match(TokenType.WHERE):
      -2538            return None
      -2539
      -2540        return self.expression(
      -2541            exp.Where, comments=self._prev_comments, this=self._parse_conjunction()
      -2542        )
      -2543
      -2544    def _parse_group(self, skip_group_by_token: bool = False) -> t.Optional[exp.Expression]:
      -2545        if not skip_group_by_token and not self._match(TokenType.GROUP_BY):
      -2546            return None
      -2547
      -2548        elements = defaultdict(list)
      -2549
      -2550        while True:
      -2551            expressions = self._parse_csv(self._parse_conjunction)
      -2552            if expressions:
      -2553                elements["expressions"].extend(expressions)
      -2554
      -2555            grouping_sets = self._parse_grouping_sets()
      -2556            if grouping_sets:
      -2557                elements["grouping_sets"].extend(grouping_sets)
      -2558
      -2559            rollup = None
      -2560            cube = None
      -2561            totals = None
      -2562
      -2563            with_ = self._match(TokenType.WITH)
      -2564            if self._match(TokenType.ROLLUP):
      -2565                rollup = with_ or self._parse_wrapped_csv(self._parse_column)
      -2566                elements["rollup"].extend(ensure_list(rollup))
      -2567
      -2568            if self._match(TokenType.CUBE):
      -2569                cube = with_ or self._parse_wrapped_csv(self._parse_column)
      -2570                elements["cube"].extend(ensure_list(cube))
      -2571
      -2572            if self._match_text_seq("TOTALS"):
      -2573                totals = True
      -2574                elements["totals"] = True  # type: ignore
      -2575
      -2576            if not (grouping_sets or rollup or cube or totals):
      -2577                break
      -2578
      -2579        return self.expression(exp.Group, **elements)  # type: ignore
      -2580
      -2581    def _parse_grouping_sets(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]:
      -2582        if not self._match(TokenType.GROUPING_SETS):
      -2583            return None
      -2584
      -2585        return self._parse_wrapped_csv(self._parse_grouping_set)
      -2586
      -2587    def _parse_grouping_set(self) -> t.Optional[exp.Expression]:
      -2588        if self._match(TokenType.L_PAREN):
      -2589            grouping_set = self._parse_csv(self._parse_column)
      -2590            self._match_r_paren()
      -2591            return self.expression(exp.Tuple, expressions=grouping_set)
      -2592
      -2593        return self._parse_column()
      -2594
      -2595    def _parse_having(self, skip_having_token: bool = False) -> t.Optional[exp.Expression]:
      -2596        if not skip_having_token and not self._match(TokenType.HAVING):
      -2597            return None
      -2598        return self.expression(exp.Having, this=self._parse_conjunction())
      -2599
      -2600    def _parse_qualify(self) -> t.Optional[exp.Expression]:
      -2601        if not self._match(TokenType.QUALIFY):
      -2602            return None
      -2603        return self.expression(exp.Qualify, this=self._parse_conjunction())
      -2604
      -2605    def _parse_order(
      -2606        self, this: t.Optional[exp.Expression] = None, skip_order_token: bool = False
      -2607    ) -> t.Optional[exp.Expression]:
      -2608        if not skip_order_token and not self._match(TokenType.ORDER_BY):
      -2609            return this
      -2610
      -2611        return self.expression(
      -2612            exp.Order, this=this, expressions=self._parse_csv(self._parse_ordered)
      -2613        )
      -2614
      -2615    def _parse_sort(
      -2616        self, exp_class: t.Type[exp.Expression], *texts: str
      -2617    ) -> t.Optional[exp.Expression]:
      -2618        if not self._match_text_seq(*texts):
      -2619            return None
      -2620        return self.expression(exp_class, expressions=self._parse_csv(self._parse_ordered))
      -2621
      -2622    def _parse_ordered(self) -> exp.Expression:
      -2623        this = self._parse_conjunction()
      -2624        self._match(TokenType.ASC)
      -2625        is_desc = self._match(TokenType.DESC)
      -2626        is_nulls_first = self._match_text_seq("NULLS", "FIRST")
      -2627        is_nulls_last = self._match_text_seq("NULLS", "LAST")
      -2628        desc = is_desc or False
      -2629        asc = not desc
      -2630        nulls_first = is_nulls_first or False
      -2631        explicitly_null_ordered = is_nulls_first or is_nulls_last
      -2632        if (
      -2633            not explicitly_null_ordered
      -2634            and (
      -2635                (asc and self.null_ordering == "nulls_are_small")
      -2636                or (desc and self.null_ordering != "nulls_are_small")
      -2637            )
      -2638            and self.null_ordering != "nulls_are_last"
      -2639        ):
      -2640            nulls_first = True
      -2641
      -2642        return self.expression(exp.Ordered, this=this, desc=desc, nulls_first=nulls_first)
      -2643
      -2644    def _parse_limit(
      -2645        self, this: t.Optional[exp.Expression] = None, top: bool = False
      -2646    ) -> t.Optional[exp.Expression]:
      -2647        if self._match(TokenType.TOP if top else TokenType.LIMIT):
      -2648            limit_paren = self._match(TokenType.L_PAREN)
      -2649            limit_exp = self.expression(
      -2650                exp.Limit, this=this, expression=self._parse_number() if top else self._parse_term()
      -2651            )
      -2652
      -2653            if limit_paren:
      -2654                self._match_r_paren()
      +2391        return self.expression(
      +2392            exp.Values, expressions=expressions, alias=alias or self._parse_table_alias()
      +2393        )
      +2394
      +2395    def _parse_table_sample(self, as_modifier: bool = False) -> t.Optional[exp.TableSample]:
      +2396        if not self._match(TokenType.TABLE_SAMPLE) and not (
      +2397            as_modifier and self._match_text_seq("USING", "SAMPLE")
      +2398        ):
      +2399            return None
      +2400
      +2401        bucket_numerator = None
      +2402        bucket_denominator = None
      +2403        bucket_field = None
      +2404        percent = None
      +2405        rows = None
      +2406        size = None
      +2407        seed = None
      +2408
      +2409        kind = (
      +2410            self._prev.text if self._prev.token_type == TokenType.TABLE_SAMPLE else "USING SAMPLE"
      +2411        )
      +2412        method = self._parse_var(tokens=(TokenType.ROW,))
      +2413
      +2414        self._match(TokenType.L_PAREN)
      +2415
      +2416        num = self._parse_number()
      +2417
      +2418        if self._match_text_seq("BUCKET"):
      +2419            bucket_numerator = self._parse_number()
      +2420            self._match_text_seq("OUT", "OF")
      +2421            bucket_denominator = bucket_denominator = self._parse_number()
      +2422            self._match(TokenType.ON)
      +2423            bucket_field = self._parse_field()
      +2424        elif self._match_set((TokenType.PERCENT, TokenType.MOD)):
      +2425            percent = num
      +2426        elif self._match(TokenType.ROWS):
      +2427            rows = num
      +2428        else:
      +2429            size = num
      +2430
      +2431        self._match(TokenType.R_PAREN)
      +2432
      +2433        if self._match(TokenType.L_PAREN):
      +2434            method = self._parse_var()
      +2435            seed = self._match(TokenType.COMMA) and self._parse_number()
      +2436            self._match_r_paren()
      +2437        elif self._match_texts(("SEED", "REPEATABLE")):
      +2438            seed = self._parse_wrapped(self._parse_number)
      +2439
      +2440        return self.expression(
      +2441            exp.TableSample,
      +2442            method=method,
      +2443            bucket_numerator=bucket_numerator,
      +2444            bucket_denominator=bucket_denominator,
      +2445            bucket_field=bucket_field,
      +2446            percent=percent,
      +2447            rows=rows,
      +2448            size=size,
      +2449            seed=seed,
      +2450            kind=kind,
      +2451        )
      +2452
      +2453    def _parse_pivots(self) -> t.List[t.Optional[exp.Expression]]:
      +2454        return list(iter(self._parse_pivot, None))
      +2455
      +2456    # https://duckdb.org/docs/sql/statements/pivot
      +2457    def _parse_simplified_pivot(self) -> exp.Pivot:
      +2458        def _parse_on() -> t.Optional[exp.Expression]:
      +2459            this = self._parse_bitwise()
      +2460            return self._parse_in(this) if self._match(TokenType.IN) else this
      +2461
      +2462        this = self._parse_table()
      +2463        expressions = self._match(TokenType.ON) and self._parse_csv(_parse_on)
      +2464        using = self._match(TokenType.USING) and self._parse_csv(
      +2465            lambda: self._parse_alias(self._parse_function())
      +2466        )
      +2467        group = self._parse_group()
      +2468        return self.expression(
      +2469            exp.Pivot, this=this, expressions=expressions, using=using, group=group
      +2470        )
      +2471
      +2472    def _parse_pivot(self) -> t.Optional[exp.Pivot]:
      +2473        index = self._index
      +2474
      +2475        if self._match(TokenType.PIVOT):
      +2476            unpivot = False
      +2477        elif self._match(TokenType.UNPIVOT):
      +2478            unpivot = True
      +2479        else:
      +2480            return None
      +2481
      +2482        expressions = []
      +2483        field = None
      +2484
      +2485        if not self._match(TokenType.L_PAREN):
      +2486            self._retreat(index)
      +2487            return None
      +2488
      +2489        if unpivot:
      +2490            expressions = self._parse_csv(self._parse_column)
      +2491        else:
      +2492            expressions = self._parse_csv(lambda: self._parse_alias(self._parse_function()))
      +2493
      +2494        if not expressions:
      +2495            self.raise_error("Failed to parse PIVOT's aggregation list")
      +2496
      +2497        if not self._match(TokenType.FOR):
      +2498            self.raise_error("Expecting FOR")
      +2499
      +2500        value = self._parse_column()
      +2501
      +2502        if not self._match(TokenType.IN):
      +2503            self.raise_error("Expecting IN")
      +2504
      +2505        field = self._parse_in(value, alias=True)
      +2506
      +2507        self._match_r_paren()
      +2508
      +2509        pivot = self.expression(exp.Pivot, expressions=expressions, field=field, unpivot=unpivot)
      +2510
      +2511        if not self._match_set((TokenType.PIVOT, TokenType.UNPIVOT), advance=False):
      +2512            pivot.set("alias", self._parse_table_alias())
      +2513
      +2514        if not unpivot:
      +2515            names = self._pivot_column_names(t.cast(t.List[exp.Expression], expressions))
      +2516
      +2517            columns: t.List[exp.Expression] = []
      +2518            for fld in pivot.args["field"].expressions:
      +2519                field_name = fld.sql() if self.IDENTIFY_PIVOT_STRINGS else fld.alias_or_name
      +2520                for name in names:
      +2521                    if self.PREFIXED_PIVOT_COLUMNS:
      +2522                        name = f"{name}_{field_name}" if name else field_name
      +2523                    else:
      +2524                        name = f"{field_name}_{name}" if name else field_name
      +2525
      +2526                    columns.append(exp.to_identifier(name))
      +2527
      +2528            pivot.set("columns", columns)
      +2529
      +2530        return pivot
      +2531
      +2532    def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]:
      +2533        return [agg.alias for agg in aggregations]
      +2534
      +2535    def _parse_where(self, skip_where_token: bool = False) -> t.Optional[exp.Where]:
      +2536        if not skip_where_token and not self._match(TokenType.WHERE):
      +2537            return None
      +2538
      +2539        return self.expression(
      +2540            exp.Where, comments=self._prev_comments, this=self._parse_conjunction()
      +2541        )
      +2542
      +2543    def _parse_group(self, skip_group_by_token: bool = False) -> t.Optional[exp.Group]:
      +2544        if not skip_group_by_token and not self._match(TokenType.GROUP_BY):
      +2545            return None
      +2546
      +2547        elements = defaultdict(list)
      +2548
      +2549        while True:
      +2550            expressions = self._parse_csv(self._parse_conjunction)
      +2551            if expressions:
      +2552                elements["expressions"].extend(expressions)
      +2553
      +2554            grouping_sets = self._parse_grouping_sets()
      +2555            if grouping_sets:
      +2556                elements["grouping_sets"].extend(grouping_sets)
      +2557
      +2558            rollup = None
      +2559            cube = None
      +2560            totals = None
      +2561
      +2562            with_ = self._match(TokenType.WITH)
      +2563            if self._match(TokenType.ROLLUP):
      +2564                rollup = with_ or self._parse_wrapped_csv(self._parse_column)
      +2565                elements["rollup"].extend(ensure_list(rollup))
      +2566
      +2567            if self._match(TokenType.CUBE):
      +2568                cube = with_ or self._parse_wrapped_csv(self._parse_column)
      +2569                elements["cube"].extend(ensure_list(cube))
      +2570
      +2571            if self._match_text_seq("TOTALS"):
      +2572                totals = True
      +2573                elements["totals"] = True  # type: ignore
      +2574
      +2575            if not (grouping_sets or rollup or cube or totals):
      +2576                break
      +2577
      +2578        return self.expression(exp.Group, **elements)  # type: ignore
      +2579
      +2580    def _parse_grouping_sets(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]:
      +2581        if not self._match(TokenType.GROUPING_SETS):
      +2582            return None
      +2583
      +2584        return self._parse_wrapped_csv(self._parse_grouping_set)
      +2585
      +2586    def _parse_grouping_set(self) -> t.Optional[exp.Expression]:
      +2587        if self._match(TokenType.L_PAREN):
      +2588            grouping_set = self._parse_csv(self._parse_column)
      +2589            self._match_r_paren()
      +2590            return self.expression(exp.Tuple, expressions=grouping_set)
      +2591
      +2592        return self._parse_column()
      +2593
      +2594    def _parse_having(self, skip_having_token: bool = False) -> t.Optional[exp.Having]:
      +2595        if not skip_having_token and not self._match(TokenType.HAVING):
      +2596            return None
      +2597        return self.expression(exp.Having, this=self._parse_conjunction())
      +2598
      +2599    def _parse_qualify(self) -> t.Optional[exp.Qualify]:
      +2600        if not self._match(TokenType.QUALIFY):
      +2601            return None
      +2602        return self.expression(exp.Qualify, this=self._parse_conjunction())
      +2603
      +2604    def _parse_order(
      +2605        self, this: t.Optional[exp.Expression] = None, skip_order_token: bool = False
      +2606    ) -> t.Optional[exp.Expression]:
      +2607        if not skip_order_token and not self._match(TokenType.ORDER_BY):
      +2608            return this
      +2609
      +2610        return self.expression(
      +2611            exp.Order, this=this, expressions=self._parse_csv(self._parse_ordered)
      +2612        )
      +2613
      +2614    def _parse_sort(self, exp_class: t.Type[E], *texts: str) -> t.Optional[E]:
      +2615        if not self._match_text_seq(*texts):
      +2616            return None
      +2617        return self.expression(exp_class, expressions=self._parse_csv(self._parse_ordered))
      +2618
      +2619    def _parse_ordered(self) -> exp.Ordered:
      +2620        this = self._parse_conjunction()
      +2621        self._match(TokenType.ASC)
      +2622
      +2623        is_desc = self._match(TokenType.DESC)
      +2624        is_nulls_first = self._match_text_seq("NULLS", "FIRST")
      +2625        is_nulls_last = self._match_text_seq("NULLS", "LAST")
      +2626        desc = is_desc or False
      +2627        asc = not desc
      +2628        nulls_first = is_nulls_first or False
      +2629        explicitly_null_ordered = is_nulls_first or is_nulls_last
      +2630
      +2631        if (
      +2632            not explicitly_null_ordered
      +2633            and (
      +2634                (asc and self.NULL_ORDERING == "nulls_are_small")
      +2635                or (desc and self.NULL_ORDERING != "nulls_are_small")
      +2636            )
      +2637            and self.NULL_ORDERING != "nulls_are_last"
      +2638        ):
      +2639            nulls_first = True
      +2640
      +2641        return self.expression(exp.Ordered, this=this, desc=desc, nulls_first=nulls_first)
      +2642
      +2643    def _parse_limit(
      +2644        self, this: t.Optional[exp.Expression] = None, top: bool = False
      +2645    ) -> t.Optional[exp.Expression]:
      +2646        if self._match(TokenType.TOP if top else TokenType.LIMIT):
      +2647            limit_paren = self._match(TokenType.L_PAREN)
      +2648            expression = self._parse_number() if top else self._parse_term()
      +2649
      +2650            if self._match(TokenType.COMMA):
      +2651                offset = expression
      +2652                expression = self._parse_term()
      +2653            else:
      +2654                offset = None
       2655
      -2656            return limit_exp
      +2656            limit_exp = self.expression(exp.Limit, this=this, expression=expression, offset=offset)
       2657
      -2658        if self._match(TokenType.FETCH):
      -2659            direction = self._match_set((TokenType.FIRST, TokenType.NEXT))
      -2660            direction = self._prev.text if direction else "FIRST"
      -2661
      -2662            count = self._parse_number()
      -2663            percent = self._match(TokenType.PERCENT)
      -2664
      -2665            self._match_set((TokenType.ROW, TokenType.ROWS))
      +2658            if limit_paren:
      +2659                self._match_r_paren()
      +2660
      +2661            return limit_exp
      +2662
      +2663        if self._match(TokenType.FETCH):
      +2664            direction = self._match_set((TokenType.FIRST, TokenType.NEXT))
      +2665            direction = self._prev.text if direction else "FIRST"
       2666
      -2667            only = self._match_text_seq("ONLY")
      -2668            with_ties = self._match_text_seq("WITH", "TIES")
      +2667            count = self._parse_number()
      +2668            percent = self._match(TokenType.PERCENT)
       2669
      -2670            if only and with_ties:
      -2671                self.raise_error("Cannot specify both ONLY and WITH TIES in FETCH clause")
      -2672
      -2673            return self.expression(
      -2674                exp.Fetch,
      -2675                direction=direction,
      -2676                count=count,
      -2677                percent=percent,
      -2678                with_ties=with_ties,
      -2679            )
      -2680
      -2681        return this
      -2682
      -2683    def _parse_offset(self, this: t.Optional[exp.Expression] = None) -> t.Optional[exp.Expression]:
      -2684        if not self._match_set((TokenType.OFFSET, TokenType.COMMA)):
      -2685            return this
      -2686
      -2687        count = self._parse_number()
      -2688        self._match_set((TokenType.ROW, TokenType.ROWS))
      -2689        return self.expression(exp.Offset, this=this, expression=count)
      -2690
      -2691    def _parse_locks(self) -> t.List[exp.Expression]:
      -2692        # Lists are invariant, so we need to use a type hint here
      -2693        locks: t.List[exp.Expression] = []
      -2694
      -2695        while True:
      -2696            if self._match_text_seq("FOR", "UPDATE"):
      -2697                update = True
      -2698            elif self._match_text_seq("FOR", "SHARE") or self._match_text_seq(
      -2699                "LOCK", "IN", "SHARE", "MODE"
      -2700            ):
      -2701                update = False
      -2702            else:
      -2703                break
      -2704
      -2705            expressions = None
      -2706            if self._match_text_seq("OF"):
      -2707                expressions = self._parse_csv(lambda: self._parse_table(schema=True))
      -2708
      -2709            wait: t.Optional[bool | exp.Expression] = None
      -2710            if self._match_text_seq("NOWAIT"):
      -2711                wait = True
      -2712            elif self._match_text_seq("WAIT"):
      -2713                wait = self._parse_primary()
      -2714            elif self._match_text_seq("SKIP", "LOCKED"):
      -2715                wait = False
      -2716
      -2717            locks.append(
      -2718                self.expression(exp.Lock, update=update, expressions=expressions, wait=wait)
      -2719            )
      -2720
      -2721        return locks
      -2722
      -2723    def _parse_set_operations(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
      -2724        if not self._match_set(self.SET_OPERATIONS):
      -2725            return this
      -2726
      -2727        token_type = self._prev.token_type
      -2728
      -2729        if token_type == TokenType.UNION:
      -2730            expression = exp.Union
      -2731        elif token_type == TokenType.EXCEPT:
      -2732            expression = exp.Except
      -2733        else:
      -2734            expression = exp.Intersect
      -2735
      -2736        return self.expression(
      -2737            expression,
      -2738            this=this,
      -2739            distinct=self._match(TokenType.DISTINCT) or not self._match(TokenType.ALL),
      -2740            expression=self._parse_set_operations(self._parse_select(nested=True)),
      -2741        )
      -2742
      -2743    def _parse_expression(self) -> t.Optional[exp.Expression]:
      -2744        return self._parse_alias(self._parse_conjunction())
      +2670            self._match_set((TokenType.ROW, TokenType.ROWS))
      +2671
      +2672            only = self._match_text_seq("ONLY")
      +2673            with_ties = self._match_text_seq("WITH", "TIES")
      +2674
      +2675            if only and with_ties:
      +2676                self.raise_error("Cannot specify both ONLY and WITH TIES in FETCH clause")
      +2677
      +2678            return self.expression(
      +2679                exp.Fetch,
      +2680                direction=direction,
      +2681                count=count,
      +2682                percent=percent,
      +2683                with_ties=with_ties,
      +2684            )
      +2685
      +2686        return this
      +2687
      +2688    def _parse_offset(self, this: t.Optional[exp.Expression] = None) -> t.Optional[exp.Expression]:
      +2689        if not self._match(TokenType.OFFSET):
      +2690            return this
      +2691
      +2692        count = self._parse_number()
      +2693        self._match_set((TokenType.ROW, TokenType.ROWS))
      +2694        return self.expression(exp.Offset, this=this, expression=count)
      +2695
      +2696    def _parse_locks(self) -> t.List[exp.Lock]:
      +2697        locks = []
      +2698        while True:
      +2699            if self._match_text_seq("FOR", "UPDATE"):
      +2700                update = True
      +2701            elif self._match_text_seq("FOR", "SHARE") or self._match_text_seq(
      +2702                "LOCK", "IN", "SHARE", "MODE"
      +2703            ):
      +2704                update = False
      +2705            else:
      +2706                break
      +2707
      +2708            expressions = None
      +2709            if self._match_text_seq("OF"):
      +2710                expressions = self._parse_csv(lambda: self._parse_table(schema=True))
      +2711
      +2712            wait: t.Optional[bool | exp.Expression] = None
      +2713            if self._match_text_seq("NOWAIT"):
      +2714                wait = True
      +2715            elif self._match_text_seq("WAIT"):
      +2716                wait = self._parse_primary()
      +2717            elif self._match_text_seq("SKIP", "LOCKED"):
      +2718                wait = False
      +2719
      +2720            locks.append(
      +2721                self.expression(exp.Lock, update=update, expressions=expressions, wait=wait)
      +2722            )
      +2723
      +2724        return locks
      +2725
      +2726    def _parse_set_operations(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
      +2727        if not self._match_set(self.SET_OPERATIONS):
      +2728            return this
      +2729
      +2730        token_type = self._prev.token_type
      +2731
      +2732        if token_type == TokenType.UNION:
      +2733            expression = exp.Union
      +2734        elif token_type == TokenType.EXCEPT:
      +2735            expression = exp.Except
      +2736        else:
      +2737            expression = exp.Intersect
      +2738
      +2739        return self.expression(
      +2740            expression,
      +2741            this=this,
      +2742            distinct=self._match(TokenType.DISTINCT) or not self._match(TokenType.ALL),
      +2743            expression=self._parse_set_operations(self._parse_select(nested=True)),
      +2744        )
       2745
      -2746    def _parse_conjunction(self) -> t.Optional[exp.Expression]:
      -2747        return self._parse_tokens(self._parse_equality, self.CONJUNCTION)
      +2746    def _parse_expression(self) -> t.Optional[exp.Expression]:
      +2747        return self._parse_alias(self._parse_conjunction())
       2748
      -2749    def _parse_equality(self) -> t.Optional[exp.Expression]:
      -2750        return self._parse_tokens(self._parse_comparison, self.EQUALITY)
      +2749    def _parse_conjunction(self) -> t.Optional[exp.Expression]:
      +2750        return self._parse_tokens(self._parse_equality, self.CONJUNCTION)
       2751
      -2752    def _parse_comparison(self) -> t.Optional[exp.Expression]:
      -2753        return self._parse_tokens(self._parse_range, self.COMPARISON)
      +2752    def _parse_equality(self) -> t.Optional[exp.Expression]:
      +2753        return self._parse_tokens(self._parse_comparison, self.EQUALITY)
       2754
      -2755    def _parse_range(self) -> t.Optional[exp.Expression]:
      -2756        this = self._parse_bitwise()
      -2757        negate = self._match(TokenType.NOT)
      -2758
      -2759        if self._match_set(self.RANGE_PARSERS):
      -2760            expression = self.RANGE_PARSERS[self._prev.token_type](self, this)
      -2761            if not expression:
      -2762                return this
      -2763
      -2764            this = expression
      -2765        elif self._match(TokenType.ISNULL):
      -2766            this = self.expression(exp.Is, this=this, expression=exp.Null())
      -2767
      -2768        # Postgres supports ISNULL and NOTNULL for conditions.
      -2769        # https://blog.andreiavram.ro/postgresql-null-composite-type/
      -2770        if self._match(TokenType.NOTNULL):
      -2771            this = self.expression(exp.Is, this=this, expression=exp.Null())
      -2772            this = self.expression(exp.Not, this=this)
      -2773
      -2774        if negate:
      +2755    def _parse_comparison(self) -> t.Optional[exp.Expression]:
      +2756        return self._parse_tokens(self._parse_range, self.COMPARISON)
      +2757
      +2758    def _parse_range(self) -> t.Optional[exp.Expression]:
      +2759        this = self._parse_bitwise()
      +2760        negate = self._match(TokenType.NOT)
      +2761
      +2762        if self._match_set(self.RANGE_PARSERS):
      +2763            expression = self.RANGE_PARSERS[self._prev.token_type](self, this)
      +2764            if not expression:
      +2765                return this
      +2766
      +2767            this = expression
      +2768        elif self._match(TokenType.ISNULL):
      +2769            this = self.expression(exp.Is, this=this, expression=exp.Null())
      +2770
      +2771        # Postgres supports ISNULL and NOTNULL for conditions.
      +2772        # https://blog.andreiavram.ro/postgresql-null-composite-type/
      +2773        if self._match(TokenType.NOTNULL):
      +2774            this = self.expression(exp.Is, this=this, expression=exp.Null())
       2775            this = self.expression(exp.Not, this=this)
       2776
      -2777        if self._match(TokenType.IS):
      -2778            this = self._parse_is(this)
      +2777        if negate:
      +2778            this = self.expression(exp.Not, this=this)
       2779
      -2780        return this
      -2781
      -2782    def _parse_is(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
      -2783        index = self._index - 1
      -2784        negate = self._match(TokenType.NOT)
      -2785        if self._match_text_seq("DISTINCT", "FROM"):
      -2786            klass = exp.NullSafeEQ if negate else exp.NullSafeNEQ
      -2787            return self.expression(klass, this=this, expression=self._parse_expression())
      +2780        if self._match(TokenType.IS):
      +2781            this = self._parse_is(this)
      +2782
      +2783        return this
      +2784
      +2785    def _parse_is(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
      +2786        index = self._index - 1
      +2787        negate = self._match(TokenType.NOT)
       2788
      -2789        expression = self._parse_null() or self._parse_boolean()
      -2790        if not expression:
      -2791            self._retreat(index)
      -2792            return None
      -2793
      -2794        this = self.expression(exp.Is, this=this, expression=expression)
      -2795        return self.expression(exp.Not, this=this) if negate else this
      -2796
      -2797    def _parse_in(self, this: t.Optional[exp.Expression], alias: bool = False) -> exp.In:
      -2798        unnest = self._parse_unnest()
      -2799        if unnest:
      -2800            this = self.expression(exp.In, this=this, unnest=unnest)
      -2801        elif self._match(TokenType.L_PAREN):
      -2802            expressions = self._parse_csv(lambda: self._parse_select_or_expression(alias=alias))
      -2803
      -2804            if len(expressions) == 1 and isinstance(expressions[0], exp.Subqueryable):
      -2805                this = self.expression(exp.In, this=this, query=expressions[0])
      -2806            else:
      -2807                this = self.expression(exp.In, this=this, expressions=expressions)
      -2808
      -2809            self._match_r_paren(this)
      -2810        else:
      -2811            this = self.expression(exp.In, this=this, field=self._parse_field())
      +2789        if self._match_text_seq("DISTINCT", "FROM"):
      +2790            klass = exp.NullSafeEQ if negate else exp.NullSafeNEQ
      +2791            return self.expression(klass, this=this, expression=self._parse_expression())
      +2792
      +2793        expression = self._parse_null() or self._parse_boolean()
      +2794        if not expression:
      +2795            self._retreat(index)
      +2796            return None
      +2797
      +2798        this = self.expression(exp.Is, this=this, expression=expression)
      +2799        return self.expression(exp.Not, this=this) if negate else this
      +2800
      +2801    def _parse_in(self, this: t.Optional[exp.Expression], alias: bool = False) -> exp.In:
      +2802        unnest = self._parse_unnest(with_alias=False)
      +2803        if unnest:
      +2804            this = self.expression(exp.In, this=this, unnest=unnest)
      +2805        elif self._match(TokenType.L_PAREN):
      +2806            expressions = self._parse_csv(lambda: self._parse_select_or_expression(alias=alias))
      +2807
      +2808            if len(expressions) == 1 and isinstance(expressions[0], exp.Subqueryable):
      +2809                this = self.expression(exp.In, this=this, query=expressions[0])
      +2810            else:
      +2811                this = self.expression(exp.In, this=this, expressions=expressions)
       2812
      -2813        return this
      -2814
      -2815    def _parse_between(self, this: exp.Expression) -> exp.Expression:
      -2816        low = self._parse_bitwise()
      -2817        self._match(TokenType.AND)
      -2818        high = self._parse_bitwise()
      -2819        return self.expression(exp.Between, this=this, low=low, high=high)
      -2820
      -2821    def _parse_escape(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
      -2822        if not self._match(TokenType.ESCAPE):
      -2823            return this
      -2824        return self.expression(exp.Escape, this=this, expression=self._parse_string())
      -2825
      -2826    def _parse_interval(self) -> t.Optional[exp.Expression]:
      -2827        if not self._match(TokenType.INTERVAL):
      -2828            return None
      +2813            self._match_r_paren(this)
      +2814        else:
      +2815            this = self.expression(exp.In, this=this, field=self._parse_field())
      +2816
      +2817        return this
      +2818
      +2819    def _parse_between(self, this: exp.Expression) -> exp.Between:
      +2820        low = self._parse_bitwise()
      +2821        self._match(TokenType.AND)
      +2822        high = self._parse_bitwise()
      +2823        return self.expression(exp.Between, this=this, low=low, high=high)
      +2824
      +2825    def _parse_escape(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
      +2826        if not self._match(TokenType.ESCAPE):
      +2827            return this
      +2828        return self.expression(exp.Escape, this=this, expression=self._parse_string())
       2829
      -2830        this = self._parse_primary() or self._parse_term()
      -2831        unit = self._parse_function() or self._parse_var()
      -2832
      -2833        # Most dialects support, e.g., the form INTERVAL '5' day, thus we try to parse
      -2834        # each INTERVAL expression into this canonical form so it's easy to transpile
      -2835        if this and this.is_number:
      -2836            this = exp.Literal.string(this.name)
      -2837        elif this and this.is_string:
      -2838            parts = this.name.split()
      -2839
      -2840            if len(parts) == 2:
      -2841                if unit:
      -2842                    # this is not actually a unit, it's something else
      -2843                    unit = None
      -2844                    self._retreat(self._index - 1)
      -2845                else:
      -2846                    this = exp.Literal.string(parts[0])
      -2847                    unit = self.expression(exp.Var, this=parts[1])
      -2848
      -2849        return self.expression(exp.Interval, this=this, unit=unit)
      -2850
      -2851    def _parse_bitwise(self) -> t.Optional[exp.Expression]:
      -2852        this = self._parse_term()
      -2853
      -2854        while True:
      -2855            if self._match_set(self.BITWISE):
      -2856                this = self.expression(
      -2857                    self.BITWISE[self._prev.token_type],
      -2858                    this=this,
      -2859                    expression=self._parse_term(),
      -2860                )
      -2861            elif self._match_pair(TokenType.LT, TokenType.LT):
      -2862                this = self.expression(
      -2863                    exp.BitwiseLeftShift, this=this, expression=self._parse_term()
      -2864                )
      -2865            elif self._match_pair(TokenType.GT, TokenType.GT):
      -2866                this = self.expression(
      -2867                    exp.BitwiseRightShift, this=this, expression=self._parse_term()
      -2868                )
      -2869            else:
      -2870                break
      -2871
      -2872        return this
      +2830    def _parse_interval(self) -> t.Optional[exp.Interval]:
      +2831        if not self._match(TokenType.INTERVAL):
      +2832            return None
      +2833
      +2834        this = self._parse_primary() or self._parse_term()
      +2835        unit = self._parse_function() or self._parse_var()
      +2836
      +2837        # Most dialects support, e.g., the form INTERVAL '5' day, thus we try to parse
      +2838        # each INTERVAL expression into this canonical form so it's easy to transpile
      +2839        if this and this.is_number:
      +2840            this = exp.Literal.string(this.name)
      +2841        elif this and this.is_string:
      +2842            parts = this.name.split()
      +2843
      +2844            if len(parts) == 2:
      +2845                if unit:
      +2846                    # this is not actually a unit, it's something else
      +2847                    unit = None
      +2848                    self._retreat(self._index - 1)
      +2849                else:
      +2850                    this = exp.Literal.string(parts[0])
      +2851                    unit = self.expression(exp.Var, this=parts[1])
      +2852
      +2853        return self.expression(exp.Interval, this=this, unit=unit)
      +2854
      +2855    def _parse_bitwise(self) -> t.Optional[exp.Expression]:
      +2856        this = self._parse_term()
      +2857
      +2858        while True:
      +2859            if self._match_set(self.BITWISE):
      +2860                this = self.expression(
      +2861                    self.BITWISE[self._prev.token_type], this=this, expression=self._parse_term()
      +2862                )
      +2863            elif self._match_pair(TokenType.LT, TokenType.LT):
      +2864                this = self.expression(
      +2865                    exp.BitwiseLeftShift, this=this, expression=self._parse_term()
      +2866                )
      +2867            elif self._match_pair(TokenType.GT, TokenType.GT):
      +2868                this = self.expression(
      +2869                    exp.BitwiseRightShift, this=this, expression=self._parse_term()
      +2870                )
      +2871            else:
      +2872                break
       2873
      -2874    def _parse_term(self) -> t.Optional[exp.Expression]:
      -2875        return self._parse_tokens(self._parse_factor, self.TERM)
      -2876
      -2877    def _parse_factor(self) -> t.Optional[exp.Expression]:
      -2878        return self._parse_tokens(self._parse_unary, self.FACTOR)
      -2879
      -2880    def _parse_unary(self) -> t.Optional[exp.Expression]:
      -2881        if self._match_set(self.UNARY_PARSERS):
      -2882            return self.UNARY_PARSERS[self._prev.token_type](self)
      -2883        return self._parse_at_time_zone(self._parse_type())
      -2884
      -2885    def _parse_type(self) -> t.Optional[exp.Expression]:
      -2886        interval = self._parse_interval()
      -2887        if interval:
      -2888            return interval
      -2889
      -2890        index = self._index
      -2891        data_type = self._parse_types(check_func=True)
      -2892        this = self._parse_column()
      -2893
      -2894        if data_type:
      -2895            if isinstance(this, exp.Literal):
      -2896                parser = self.TYPE_LITERAL_PARSERS.get(data_type.this)
      -2897                if parser:
      -2898                    return parser(self, this, data_type)
      -2899                return self.expression(exp.Cast, this=this, to=data_type)
      -2900            if not data_type.expressions:
      -2901                self._retreat(index)
      -2902                return self._parse_column()
      -2903            return self._parse_column_ops(data_type)
      -2904
      -2905        return this
      +2874        return this
      +2875
      +2876    def _parse_term(self) -> t.Optional[exp.Expression]:
      +2877        return self._parse_tokens(self._parse_factor, self.TERM)
      +2878
      +2879    def _parse_factor(self) -> t.Optional[exp.Expression]:
      +2880        return self._parse_tokens(self._parse_unary, self.FACTOR)
      +2881
      +2882    def _parse_unary(self) -> t.Optional[exp.Expression]:
      +2883        if self._match_set(self.UNARY_PARSERS):
      +2884            return self.UNARY_PARSERS[self._prev.token_type](self)
      +2885        return self._parse_at_time_zone(self._parse_type())
      +2886
      +2887    def _parse_type(self) -> t.Optional[exp.Expression]:
      +2888        interval = self._parse_interval()
      +2889        if interval:
      +2890            return interval
      +2891
      +2892        index = self._index
      +2893        data_type = self._parse_types(check_func=True)
      +2894        this = self._parse_column()
      +2895
      +2896        if data_type:
      +2897            if isinstance(this, exp.Literal):
      +2898                parser = self.TYPE_LITERAL_PARSERS.get(data_type.this)
      +2899                if parser:
      +2900                    return parser(self, this, data_type)
      +2901                return self.expression(exp.Cast, this=this, to=data_type)
      +2902            if not data_type.expressions:
      +2903                self._retreat(index)
      +2904                return self._parse_column()
      +2905            return self._parse_column_ops(data_type)
       2906
      -2907    def _parse_type_size(self) -> t.Optional[exp.Expression]:
      -2908        this = self._parse_type()
      -2909        if not this:
      -2910            return None
      -2911
      -2912        return self.expression(
      -2913            exp.DataTypeSize, this=this, expression=self._parse_var(any_token=True)
      -2914        )
      -2915
      -2916    def _parse_types(
      -2917        self, check_func: bool = False, schema: bool = False
      -2918    ) -> t.Optional[exp.Expression]:
      -2919        index = self._index
      -2920
      -2921        prefix = self._match_text_seq("SYSUDTLIB", ".")
      +2907        return this
      +2908
      +2909    def _parse_type_size(self) -> t.Optional[exp.DataTypeSize]:
      +2910        this = self._parse_type()
      +2911        if not this:
      +2912            return None
      +2913
      +2914        return self.expression(
      +2915            exp.DataTypeSize, this=this, expression=self._parse_var(any_token=True)
      +2916        )
      +2917
      +2918    def _parse_types(
      +2919        self, check_func: bool = False, schema: bool = False
      +2920    ) -> t.Optional[exp.Expression]:
      +2921        index = self._index
       2922
      -2923        if not self._match_set(self.TYPE_TOKENS):
      -2924            return None
      -2925
      -2926        type_token = self._prev.token_type
      +2923        prefix = self._match_text_seq("SYSUDTLIB", ".")
      +2924
      +2925        if not self._match_set(self.TYPE_TOKENS):
      +2926            return None
       2927
      -2928        if type_token == TokenType.PSEUDO_TYPE:
      -2929            return self.expression(exp.PseudoType, this=self._prev.text)
      -2930
      -2931        nested = type_token in self.NESTED_TYPE_TOKENS
      -2932        is_struct = type_token == TokenType.STRUCT
      -2933        expressions = None
      -2934        maybe_func = False
      -2935
      -2936        if self._match(TokenType.L_PAREN):
      -2937            if is_struct:
      -2938                expressions = self._parse_csv(self._parse_struct_types)
      -2939            elif nested:
      -2940                expressions = self._parse_csv(
      -2941                    lambda: self._parse_types(check_func=check_func, schema=schema)
      -2942                )
      -2943            else:
      -2944                expressions = self._parse_csv(self._parse_type_size)
      -2945
      -2946            if not expressions or not self._match(TokenType.R_PAREN):
      -2947                self._retreat(index)
      -2948                return None
      +2928        type_token = self._prev.token_type
      +2929
      +2930        if type_token == TokenType.PSEUDO_TYPE:
      +2931            return self.expression(exp.PseudoType, this=self._prev.text)
      +2932
      +2933        nested = type_token in self.NESTED_TYPE_TOKENS
      +2934        is_struct = type_token == TokenType.STRUCT
      +2935        expressions = None
      +2936        maybe_func = False
      +2937
      +2938        if self._match(TokenType.L_PAREN):
      +2939            if is_struct:
      +2940                expressions = self._parse_csv(self._parse_struct_types)
      +2941            elif nested:
      +2942                expressions = self._parse_csv(
      +2943                    lambda: self._parse_types(check_func=check_func, schema=schema)
      +2944                )
      +2945            elif type_token in self.ENUM_TYPE_TOKENS:
      +2946                expressions = self._parse_csv(self._parse_primary)
      +2947            else:
      +2948                expressions = self._parse_csv(self._parse_type_size)
       2949
      -2950            maybe_func = True
      -2951
      -2952        if self._match_pair(TokenType.L_BRACKET, TokenType.R_BRACKET):
      -2953            this = exp.DataType(
      -2954                this=exp.DataType.Type.ARRAY,
      -2955                expressions=[exp.DataType.build(type_token.value, expressions=expressions)],
      -2956                nested=True,
      -2957            )
      -2958
      -2959            while self._match_pair(TokenType.L_BRACKET, TokenType.R_BRACKET):
      -2960                this = exp.DataType(
      -2961                    this=exp.DataType.Type.ARRAY,
      -2962                    expressions=[this],
      -2963                    nested=True,
      -2964                )
      +2950            if not expressions or not self._match(TokenType.R_PAREN):
      +2951                self._retreat(index)
      +2952                return None
      +2953
      +2954            maybe_func = True
      +2955
      +2956        if self._match_pair(TokenType.L_BRACKET, TokenType.R_BRACKET):
      +2957            this = exp.DataType(
      +2958                this=exp.DataType.Type.ARRAY,
      +2959                expressions=[exp.DataType.build(type_token.value, expressions=expressions)],
      +2960                nested=True,
      +2961            )
      +2962
      +2963            while self._match_pair(TokenType.L_BRACKET, TokenType.R_BRACKET):
      +2964                this = exp.DataType(this=exp.DataType.Type.ARRAY, expressions=[this], nested=True)
       2965
       2966            return this
       2967
      @@ -7783,1712 +7787,1710 @@
       2987
       2988        value: t.Optional[exp.Expression] = None
       2989        if type_token in self.TIMESTAMPS:
      -2990            if self._match_text_seq("WITH", "TIME", "ZONE") or type_token == TokenType.TIMESTAMPTZ:
      -2991                value = exp.DataType(this=exp.DataType.Type.TIMESTAMPTZ, expressions=expressions)
      -2992            elif (
      -2993                self._match_text_seq("WITH", "LOCAL", "TIME", "ZONE")
      -2994                or type_token == TokenType.TIMESTAMPLTZ
      -2995            ):
      -2996                value = exp.DataType(this=exp.DataType.Type.TIMESTAMPLTZ, expressions=expressions)
      -2997            elif self._match_text_seq("WITHOUT", "TIME", "ZONE"):
      -2998                if type_token == TokenType.TIME:
      -2999                    value = exp.DataType(this=exp.DataType.Type.TIME, expressions=expressions)
      -3000                else:
      -3001                    value = exp.DataType(this=exp.DataType.Type.TIMESTAMP, expressions=expressions)
      -3002
      -3003            maybe_func = maybe_func and value is None
      -3004
      -3005            if value is None:
      -3006                value = exp.DataType(this=exp.DataType.Type.TIMESTAMP, expressions=expressions)
      -3007        elif type_token == TokenType.INTERVAL:
      -3008            unit = self._parse_var()
      +2990            if self._match_text_seq("WITH", "TIME", "ZONE"):
      +2991                maybe_func = False
      +2992                value = exp.DataType(this=exp.DataType.Type.TIMESTAMPTZ, expressions=expressions)
      +2993            elif self._match_text_seq("WITH", "LOCAL", "TIME", "ZONE"):
      +2994                maybe_func = False
      +2995                value = exp.DataType(this=exp.DataType.Type.TIMESTAMPLTZ, expressions=expressions)
      +2996            elif self._match_text_seq("WITHOUT", "TIME", "ZONE"):
      +2997                maybe_func = False
      +2998        elif type_token == TokenType.INTERVAL:
      +2999            unit = self._parse_var()
      +3000
      +3001            if not unit:
      +3002                value = self.expression(exp.DataType, this=exp.DataType.Type.INTERVAL)
      +3003            else:
      +3004                value = self.expression(exp.Interval, unit=unit)
      +3005
      +3006        if maybe_func and check_func:
      +3007            index2 = self._index
      +3008            peek = self._parse_string()
       3009
      -3010            if not unit:
      -3011                value = self.expression(exp.DataType, this=exp.DataType.Type.INTERVAL)
      -3012            else:
      -3013                value = self.expression(exp.Interval, unit=unit)
      -3014
      -3015        if maybe_func and check_func:
      -3016            index2 = self._index
      -3017            peek = self._parse_string()
      +3010            if not peek:
      +3011                self._retreat(index)
      +3012                return None
      +3013
      +3014            self._retreat(index2)
      +3015
      +3016        if value:
      +3017            return value
       3018
      -3019            if not peek:
      -3020                self._retreat(index)
      -3021                return None
      -3022
      -3023            self._retreat(index2)
      -3024
      -3025        if value:
      -3026            return value
      -3027
      -3028        return exp.DataType(
      -3029            this=exp.DataType.Type[type_token.value.upper()],
      -3030            expressions=expressions,
      -3031            nested=nested,
      -3032            values=values,
      -3033            prefix=prefix,
      -3034        )
      -3035
      -3036    def _parse_struct_types(self) -> t.Optional[exp.Expression]:
      -3037        this = self._parse_type() or self._parse_id_var()
      -3038        self._match(TokenType.COLON)
      -3039        return self._parse_column_def(this)
      -3040
      -3041    def _parse_at_time_zone(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
      -3042        if not self._match_text_seq("AT", "TIME", "ZONE"):
      -3043            return this
      -3044        return self.expression(exp.AtTimeZone, this=this, zone=self._parse_unary())
      -3045
      -3046    def _parse_column(self) -> t.Optional[exp.Expression]:
      -3047        this = self._parse_field()
      -3048        if isinstance(this, exp.Identifier):
      -3049            this = self.expression(exp.Column, this=this)
      -3050        elif not this:
      -3051            return self._parse_bracket(this)
      -3052        return self._parse_column_ops(this)
      -3053
      -3054    def _parse_column_ops(self, this: exp.Expression) -> exp.Expression:
      -3055        this = self._parse_bracket(this)
      -3056
      -3057        while self._match_set(self.COLUMN_OPERATORS):
      -3058            op_token = self._prev.token_type
      -3059            op = self.COLUMN_OPERATORS.get(op_token)
      -3060
      -3061            if op_token == TokenType.DCOLON:
      -3062                field = self._parse_types()
      -3063                if not field:
      -3064                    self.raise_error("Expected type")
      -3065            elif op and self._curr:
      -3066                self._advance()
      -3067                value = self._prev.text
      -3068                field = (
      -3069                    exp.Literal.number(value)
      -3070                    if self._prev.token_type == TokenType.NUMBER
      -3071                    else exp.Literal.string(value)
      -3072                )
      -3073            else:
      -3074                field = self._parse_field(anonymous_func=True)
      -3075
      -3076            if isinstance(field, exp.Func):
      -3077                # bigquery allows function calls like x.y.count(...)
      -3078                # SAFE.SUBSTR(...)
      -3079                # https://cloud.google.com/bigquery/docs/reference/standard-sql/functions-reference#function_call_rules
      -3080                this = self._replace_columns_with_dots(this)
      -3081
      -3082            if op:
      -3083                this = op(self, this, field)
      -3084            elif isinstance(this, exp.Column) and not this.args.get("catalog"):
      -3085                this = self.expression(
      -3086                    exp.Column,
      -3087                    this=field,
      -3088                    table=this.this,
      -3089                    db=this.args.get("table"),
      -3090                    catalog=this.args.get("db"),
      -3091                )
      -3092            else:
      -3093                this = self.expression(exp.Dot, this=this, expression=field)
      -3094            this = self._parse_bracket(this)
      -3095        return this
      -3096
      -3097    def _parse_primary(self) -> t.Optional[exp.Expression]:
      -3098        if self._match_set(self.PRIMARY_PARSERS):
      -3099            token_type = self._prev.token_type
      -3100            primary = self.PRIMARY_PARSERS[token_type](self, self._prev)
      -3101
      -3102            if token_type == TokenType.STRING:
      -3103                expressions = [primary]
      -3104                while self._match(TokenType.STRING):
      -3105                    expressions.append(exp.Literal.string(self._prev.text))
      -3106                if len(expressions) > 1:
      -3107                    return self.expression(exp.Concat, expressions=expressions)
      -3108            return primary
      +3019        return exp.DataType(
      +3020            this=exp.DataType.Type[type_token.value.upper()],
      +3021            expressions=expressions,
      +3022            nested=nested,
      +3023            values=values,
      +3024            prefix=prefix,
      +3025        )
      +3026
      +3027    def _parse_struct_types(self) -> t.Optional[exp.Expression]:
      +3028        this = self._parse_type() or self._parse_id_var()
      +3029        self._match(TokenType.COLON)
      +3030        return self._parse_column_def(this)
      +3031
      +3032    def _parse_at_time_zone(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
      +3033        if not self._match_text_seq("AT", "TIME", "ZONE"):
      +3034            return this
      +3035        return self.expression(exp.AtTimeZone, this=this, zone=self._parse_unary())
      +3036
      +3037    def _parse_column(self) -> t.Optional[exp.Expression]:
      +3038        this = self._parse_field()
      +3039        if isinstance(this, exp.Identifier):
      +3040            this = self.expression(exp.Column, this=this)
      +3041        elif not this:
      +3042            return self._parse_bracket(this)
      +3043        return self._parse_column_ops(this)
      +3044
      +3045    def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
      +3046        this = self._parse_bracket(this)
      +3047
      +3048        while self._match_set(self.COLUMN_OPERATORS):
      +3049            op_token = self._prev.token_type
      +3050            op = self.COLUMN_OPERATORS.get(op_token)
      +3051
      +3052            if op_token == TokenType.DCOLON:
      +3053                field = self._parse_types()
      +3054                if not field:
      +3055                    self.raise_error("Expected type")
      +3056            elif op and self._curr:
      +3057                self._advance()
      +3058                value = self._prev.text
      +3059                field = (
      +3060                    exp.Literal.number(value)
      +3061                    if self._prev.token_type == TokenType.NUMBER
      +3062                    else exp.Literal.string(value)
      +3063                )
      +3064            else:
      +3065                field = self._parse_field(anonymous_func=True, any_token=True)
      +3066
      +3067            if isinstance(field, exp.Func):
      +3068                # bigquery allows function calls like x.y.count(...)
      +3069                # SAFE.SUBSTR(...)
      +3070                # https://cloud.google.com/bigquery/docs/reference/standard-sql/functions-reference#function_call_rules
      +3071                this = self._replace_columns_with_dots(this)
      +3072
      +3073            if op:
      +3074                this = op(self, this, field)
      +3075            elif isinstance(this, exp.Column) and not this.args.get("catalog"):
      +3076                this = self.expression(
      +3077                    exp.Column,
      +3078                    this=field,
      +3079                    table=this.this,
      +3080                    db=this.args.get("table"),
      +3081                    catalog=this.args.get("db"),
      +3082                )
      +3083            else:
      +3084                this = self.expression(exp.Dot, this=this, expression=field)
      +3085            this = self._parse_bracket(this)
      +3086        return this
      +3087
      +3088    def _parse_primary(self) -> t.Optional[exp.Expression]:
      +3089        if self._match_set(self.PRIMARY_PARSERS):
      +3090            token_type = self._prev.token_type
      +3091            primary = self.PRIMARY_PARSERS[token_type](self, self._prev)
      +3092
      +3093            if token_type == TokenType.STRING:
      +3094                expressions = [primary]
      +3095                while self._match(TokenType.STRING):
      +3096                    expressions.append(exp.Literal.string(self._prev.text))
      +3097
      +3098                if len(expressions) > 1:
      +3099                    return self.expression(exp.Concat, expressions=expressions)
      +3100
      +3101            return primary
      +3102
      +3103        if self._match_pair(TokenType.DOT, TokenType.NUMBER):
      +3104            return exp.Literal.number(f"0.{self._prev.text}")
      +3105
      +3106        if self._match(TokenType.L_PAREN):
      +3107            comments = self._prev_comments
      +3108            query = self._parse_select()
       3109
      -3110        if self._match_pair(TokenType.DOT, TokenType.NUMBER):
      -3111            return exp.Literal.number(f"0.{self._prev.text}")
      -3112
      -3113        if self._match(TokenType.L_PAREN):
      -3114            comments = self._prev_comments
      -3115            query = self._parse_select()
      +3110            if query:
      +3111                expressions = [query]
      +3112            else:
      +3113                expressions = self._parse_csv(self._parse_expression)
      +3114
      +3115            this = self._parse_query_modifiers(seq_get(expressions, 0))
       3116
      -3117            if query:
      -3118                expressions = [query]
      -3119            else:
      -3120                expressions = self._parse_csv(self._parse_expression)
      -3121
      -3122            this = self._parse_query_modifiers(seq_get(expressions, 0))
      -3123
      -3124            if isinstance(this, exp.Subqueryable):
      -3125                this = self._parse_set_operations(
      -3126                    self._parse_subquery(this=this, parse_alias=False)
      -3127                )
      -3128            elif len(expressions) > 1:
      -3129                this = self.expression(exp.Tuple, expressions=expressions)
      -3130            else:
      -3131                this = self.expression(exp.Paren, this=self._parse_set_operations(this))
      -3132
      -3133            if this:
      -3134                this.add_comments(comments)
      -3135            self._match_r_paren(expression=this)
      -3136
      -3137            return this
      -3138
      -3139        return None
      -3140
      -3141    def _parse_field(
      -3142        self,
      -3143        any_token: bool = False,
      -3144        tokens: t.Optional[t.Collection[TokenType]] = None,
      -3145        anonymous_func: bool = False,
      -3146    ) -> t.Optional[exp.Expression]:
      -3147        return (
      -3148            self._parse_primary()
      -3149            or self._parse_function(anonymous=anonymous_func)
      -3150            or self._parse_id_var(any_token=any_token, tokens=tokens)
      -3151        )
      -3152
      -3153    def _parse_function(
      -3154        self, functions: t.Optional[t.Dict[str, t.Callable]] = None, anonymous: bool = False
      -3155    ) -> t.Optional[exp.Expression]:
      -3156        if not self._curr:
      -3157            return None
      -3158
      -3159        token_type = self._curr.token_type
      -3160
      -3161        if self._match_set(self.NO_PAREN_FUNCTION_PARSERS):
      -3162            return self.NO_PAREN_FUNCTION_PARSERS[token_type](self)
      -3163
      -3164        if not self._next or self._next.token_type != TokenType.L_PAREN:
      -3165            if token_type in self.NO_PAREN_FUNCTIONS:
      -3166                self._advance()
      -3167                return self.expression(self.NO_PAREN_FUNCTIONS[token_type])
      -3168
      -3169            return None
      -3170
      -3171        if token_type not in self.FUNC_TOKENS:
      -3172            return None
      +3117            if isinstance(this, exp.Subqueryable):
      +3118                this = self._parse_set_operations(
      +3119                    self._parse_subquery(this=this, parse_alias=False)
      +3120                )
      +3121            elif len(expressions) > 1:
      +3122                this = self.expression(exp.Tuple, expressions=expressions)
      +3123            else:
      +3124                this = self.expression(exp.Paren, this=self._parse_set_operations(this))
      +3125
      +3126            if this:
      +3127                this.add_comments(comments)
      +3128
      +3129            self._match_r_paren(expression=this)
      +3130            return this
      +3131
      +3132        return None
      +3133
      +3134    def _parse_field(
      +3135        self,
      +3136        any_token: bool = False,
      +3137        tokens: t.Optional[t.Collection[TokenType]] = None,
      +3138        anonymous_func: bool = False,
      +3139    ) -> t.Optional[exp.Expression]:
      +3140        return (
      +3141            self._parse_primary()
      +3142            or self._parse_function(anonymous=anonymous_func)
      +3143            or self._parse_id_var(any_token=any_token, tokens=tokens)
      +3144        )
      +3145
      +3146    def _parse_function(
      +3147        self,
      +3148        functions: t.Optional[t.Dict[str, t.Callable]] = None,
      +3149        anonymous: bool = False,
      +3150        optional_parens: bool = True,
      +3151    ) -> t.Optional[exp.Expression]:
      +3152        if not self._curr:
      +3153            return None
      +3154
      +3155        token_type = self._curr.token_type
      +3156
      +3157        if optional_parens and self._match_set(self.NO_PAREN_FUNCTION_PARSERS):
      +3158            return self.NO_PAREN_FUNCTION_PARSERS[token_type](self)
      +3159
      +3160        if not self._next or self._next.token_type != TokenType.L_PAREN:
      +3161            if optional_parens and token_type in self.NO_PAREN_FUNCTIONS:
      +3162                self._advance()
      +3163                return self.expression(self.NO_PAREN_FUNCTIONS[token_type])
      +3164
      +3165            return None
      +3166
      +3167        if token_type not in self.FUNC_TOKENS:
      +3168            return None
      +3169
      +3170        this = self._curr.text
      +3171        upper = this.upper()
      +3172        self._advance(2)
       3173
      -3174        this = self._curr.text
      -3175        upper = this.upper()
      -3176        self._advance(2)
      -3177
      -3178        parser = self.FUNCTION_PARSERS.get(upper)
      -3179
      -3180        if parser and not anonymous:
      -3181            this = parser(self)
      -3182        else:
      -3183            subquery_predicate = self.SUBQUERY_PREDICATES.get(token_type)
      -3184
      -3185            if subquery_predicate and self._curr.token_type in (TokenType.SELECT, TokenType.WITH):
      -3186                this = self.expression(subquery_predicate, this=self._parse_select())
      -3187                self._match_r_paren()
      -3188                return this
      -3189
      -3190            if functions is None:
      -3191                functions = self.FUNCTIONS
      -3192
      -3193            function = functions.get(upper)
      -3194
      -3195            alias = upper in self.FUNCTIONS_WITH_ALIASED_ARGS
      -3196            args = self._parse_csv(lambda: self._parse_lambda(alias=alias))
      -3197
      -3198            if function and not anonymous:
      -3199                this = function(args)
      -3200                self.validate_expression(this, args)
      -3201            else:
      -3202                this = self.expression(exp.Anonymous, this=this, expressions=args)
      -3203
      -3204        self._match_r_paren(this)
      -3205        return self._parse_window(this)
      -3206
      -3207    def _parse_function_parameter(self) -> t.Optional[exp.Expression]:
      -3208        return self._parse_column_def(self._parse_id_var())
      +3174        parser = self.FUNCTION_PARSERS.get(upper)
      +3175
      +3176        if parser and not anonymous:
      +3177            this = parser(self)
      +3178        else:
      +3179            subquery_predicate = self.SUBQUERY_PREDICATES.get(token_type)
      +3180
      +3181            if subquery_predicate and self._curr.token_type in (TokenType.SELECT, TokenType.WITH):
      +3182                this = self.expression(subquery_predicate, this=self._parse_select())
      +3183                self._match_r_paren()
      +3184                return this
      +3185
      +3186            if functions is None:
      +3187                functions = self.FUNCTIONS
      +3188
      +3189            function = functions.get(upper)
      +3190
      +3191            alias = upper in self.FUNCTIONS_WITH_ALIASED_ARGS
      +3192            args = self._parse_csv(lambda: self._parse_lambda(alias=alias))
      +3193
      +3194            if function and not anonymous:
      +3195                this = self.validate_expression(function(args), args)
      +3196            else:
      +3197                this = self.expression(exp.Anonymous, this=this, expressions=args)
      +3198
      +3199        self._match_r_paren(this)
      +3200        return self._parse_window(this)
      +3201
      +3202    def _parse_function_parameter(self) -> t.Optional[exp.Expression]:
      +3203        return self._parse_column_def(self._parse_id_var())
      +3204
      +3205    def _parse_user_defined_function(
      +3206        self, kind: t.Optional[TokenType] = None
      +3207    ) -> t.Optional[exp.Expression]:
      +3208        this = self._parse_id_var()
       3209
      -3210    def _parse_user_defined_function(
      -3211        self, kind: t.Optional[TokenType] = None
      -3212    ) -> t.Optional[exp.Expression]:
      -3213        this = self._parse_id_var()
      -3214
      -3215        while self._match(TokenType.DOT):
      -3216            this = self.expression(exp.Dot, this=this, expression=self._parse_id_var())
      -3217
      -3218        if not self._match(TokenType.L_PAREN):
      -3219            return this
      -3220
      -3221        expressions = self._parse_csv(self._parse_function_parameter)
      -3222        self._match_r_paren()
      -3223        return self.expression(
      -3224            exp.UserDefinedFunction, this=this, expressions=expressions, wrapped=True
      -3225        )
      +3210        while self._match(TokenType.DOT):
      +3211            this = self.expression(exp.Dot, this=this, expression=self._parse_id_var())
      +3212
      +3213        if not self._match(TokenType.L_PAREN):
      +3214            return this
      +3215
      +3216        expressions = self._parse_csv(self._parse_function_parameter)
      +3217        self._match_r_paren()
      +3218        return self.expression(
      +3219            exp.UserDefinedFunction, this=this, expressions=expressions, wrapped=True
      +3220        )
      +3221
      +3222    def _parse_introducer(self, token: Token) -> exp.Introducer | exp.Identifier:
      +3223        literal = self._parse_primary()
      +3224        if literal:
      +3225            return self.expression(exp.Introducer, this=token.text, expression=literal)
       3226
      -3227    def _parse_introducer(self, token: Token) -> t.Optional[exp.Expression]:
      -3228        literal = self._parse_primary()
      -3229        if literal:
      -3230            return self.expression(exp.Introducer, this=token.text, expression=literal)
      -3231
      -3232        return self.expression(exp.Identifier, this=token.text)
      -3233
      -3234    def _parse_session_parameter(self) -> exp.Expression:
      -3235        kind = None
      -3236        this = self._parse_id_var() or self._parse_primary()
      -3237
      -3238        if this and self._match(TokenType.DOT):
      -3239            kind = this.name
      -3240            this = self._parse_var() or self._parse_primary()
      +3227        return self.expression(exp.Identifier, this=token.text)
      +3228
      +3229    def _parse_session_parameter(self) -> exp.SessionParameter:
      +3230        kind = None
      +3231        this = self._parse_id_var() or self._parse_primary()
      +3232
      +3233        if this and self._match(TokenType.DOT):
      +3234            kind = this.name
      +3235            this = self._parse_var() or self._parse_primary()
      +3236
      +3237        return self.expression(exp.SessionParameter, this=this, kind=kind)
      +3238
      +3239    def _parse_lambda(self, alias: bool = False) -> t.Optional[exp.Expression]:
      +3240        index = self._index
       3241
      -3242        return self.expression(exp.SessionParameter, this=this, kind=kind)
      -3243
      -3244    def _parse_lambda(self, alias: bool = False) -> t.Optional[exp.Expression]:
      -3245        index = self._index
      -3246
      -3247        if self._match(TokenType.L_PAREN):
      -3248            expressions = self._parse_csv(self._parse_id_var)
      +3242        if self._match(TokenType.L_PAREN):
      +3243            expressions = self._parse_csv(self._parse_id_var)
      +3244
      +3245            if not self._match(TokenType.R_PAREN):
      +3246                self._retreat(index)
      +3247        else:
      +3248            expressions = [self._parse_id_var()]
       3249
      -3250            if not self._match(TokenType.R_PAREN):
      -3251                self._retreat(index)
      -3252        else:
      -3253            expressions = [self._parse_id_var()]
      +3250        if self._match_set(self.LAMBDAS):
      +3251            return self.LAMBDAS[self._prev.token_type](self, expressions)
      +3252
      +3253        self._retreat(index)
       3254
      -3255        if self._match_set(self.LAMBDAS):
      -3256            return self.LAMBDAS[self._prev.token_type](self, expressions)
      -3257
      -3258        self._retreat(index)
      -3259
      -3260        this: t.Optional[exp.Expression]
      -3261
      -3262        if self._match(TokenType.DISTINCT):
      -3263            this = self.expression(
      -3264                exp.Distinct, expressions=self._parse_csv(self._parse_conjunction)
      -3265            )
      -3266        else:
      -3267            this = self._parse_select_or_expression(alias=alias)
      +3255        this: t.Optional[exp.Expression]
      +3256
      +3257        if self._match(TokenType.DISTINCT):
      +3258            this = self.expression(
      +3259                exp.Distinct, expressions=self._parse_csv(self._parse_conjunction)
      +3260            )
      +3261        else:
      +3262            this = self._parse_select_or_expression(alias=alias)
      +3263
      +3264            if isinstance(this, exp.EQ):
      +3265                left = this.this
      +3266                if isinstance(left, exp.Column):
      +3267                    left.replace(exp.var(left.text("this")))
       3268
      -3269            if isinstance(this, exp.EQ):
      -3270                left = this.this
      -3271                if isinstance(left, exp.Column):
      -3272                    left.replace(exp.Var(this=left.text("this")))
      +3269        return self._parse_limit(self._parse_order(self._parse_respect_or_ignore_nulls(this)))
      +3270
      +3271    def _parse_schema(self, this: t.Optional[exp.Expression] = None) -> t.Optional[exp.Expression]:
      +3272        index = self._index
       3273
      -3274        return self._parse_limit(self._parse_order(self._parse_respect_or_ignore_nulls(this)))
      -3275
      -3276    def _parse_schema(self, this: t.Optional[exp.Expression] = None) -> t.Optional[exp.Expression]:
      -3277        index = self._index
      -3278
      -3279        if not self.errors:
      -3280            try:
      -3281                if self._parse_select(nested=True):
      -3282                    return this
      -3283            except ParseError:
      -3284                pass
      -3285            finally:
      -3286                self.errors.clear()
      -3287                self._retreat(index)
      -3288
      -3289        if not self._match(TokenType.L_PAREN):
      -3290            return this
      +3274        if not self.errors:
      +3275            try:
      +3276                if self._parse_select(nested=True):
      +3277                    return this
      +3278            except ParseError:
      +3279                pass
      +3280            finally:
      +3281                self.errors.clear()
      +3282                self._retreat(index)
      +3283
      +3284        if not self._match(TokenType.L_PAREN):
      +3285            return this
      +3286
      +3287        args = self._parse_csv(
      +3288            lambda: self._parse_constraint()
      +3289            or self._parse_column_def(self._parse_field(any_token=True))
      +3290        )
       3291
      -3292        args = self._parse_csv(
      -3293            lambda: self._parse_constraint()
      -3294            or self._parse_column_def(self._parse_field(any_token=True))
      -3295        )
      -3296        self._match_r_paren()
      -3297        return self.expression(exp.Schema, this=this, expressions=args)
      -3298
      -3299    def _parse_column_def(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
      -3300        # column defs are not really columns, they're identifiers
      -3301        if isinstance(this, exp.Column):
      -3302            this = this.this
      -3303        kind = self._parse_types(schema=True)
      +3292        self._match_r_paren()
      +3293        return self.expression(exp.Schema, this=this, expressions=args)
      +3294
      +3295    def _parse_column_def(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
      +3296        # column defs are not really columns, they're identifiers
      +3297        if isinstance(this, exp.Column):
      +3298            this = this.this
      +3299
      +3300        kind = self._parse_types(schema=True)
      +3301
      +3302        if self._match_text_seq("FOR", "ORDINALITY"):
      +3303            return self.expression(exp.ColumnDef, this=this, ordinality=True)
       3304
      -3305        if self._match_text_seq("FOR", "ORDINALITY"):
      -3306            return self.expression(exp.ColumnDef, this=this, ordinality=True)
      -3307
      -3308        constraints = []
      -3309        while True:
      -3310            constraint = self._parse_column_constraint()
      -3311            if not constraint:
      -3312                break
      -3313            constraints.append(constraint)
      +3305        constraints = []
      +3306        while True:
      +3307            constraint = self._parse_column_constraint()
      +3308            if not constraint:
      +3309                break
      +3310            constraints.append(constraint)
      +3311
      +3312        if not kind and not constraints:
      +3313            return this
       3314
      -3315        if not kind and not constraints:
      -3316            return this
      -3317
      -3318        return self.expression(exp.ColumnDef, this=this, kind=kind, constraints=constraints)
      -3319
      -3320    def _parse_auto_increment(self) -> exp.Expression:
      -3321        start = None
      -3322        increment = None
      -3323
      -3324        if self._match(TokenType.L_PAREN, advance=False):
      -3325            args = self._parse_wrapped_csv(self._parse_bitwise)
      -3326            start = seq_get(args, 0)
      -3327            increment = seq_get(args, 1)
      -3328        elif self._match_text_seq("START"):
      -3329            start = self._parse_bitwise()
      -3330            self._match_text_seq("INCREMENT")
      -3331            increment = self._parse_bitwise()
      -3332
      -3333        if start and increment:
      -3334            return exp.GeneratedAsIdentityColumnConstraint(start=start, increment=increment)
      -3335
      -3336        return exp.AutoIncrementColumnConstraint()
      -3337
      -3338    def _parse_compress(self) -> exp.Expression:
      -3339        if self._match(TokenType.L_PAREN, advance=False):
      -3340            return self.expression(
      -3341                exp.CompressColumnConstraint, this=self._parse_wrapped_csv(self._parse_bitwise)
      -3342            )
      -3343
      -3344        return self.expression(exp.CompressColumnConstraint, this=self._parse_bitwise())
      -3345
      -3346    def _parse_generated_as_identity(self) -> exp.Expression:
      -3347        if self._match_text_seq("BY", "DEFAULT"):
      -3348            on_null = self._match_pair(TokenType.ON, TokenType.NULL)
      -3349            this = self.expression(
      -3350                exp.GeneratedAsIdentityColumnConstraint, this=False, on_null=on_null
      -3351            )
      -3352        else:
      -3353            self._match_text_seq("ALWAYS")
      -3354            this = self.expression(exp.GeneratedAsIdentityColumnConstraint, this=True)
      -3355
      -3356        self._match(TokenType.ALIAS)
      -3357        identity = self._match_text_seq("IDENTITY")
      -3358
      -3359        if self._match(TokenType.L_PAREN):
      -3360            if self._match_text_seq("START", "WITH"):
      -3361                this.set("start", self._parse_bitwise())
      -3362            if self._match_text_seq("INCREMENT", "BY"):
      -3363                this.set("increment", self._parse_bitwise())
      -3364            if self._match_text_seq("MINVALUE"):
      -3365                this.set("minvalue", self._parse_bitwise())
      -3366            if self._match_text_seq("MAXVALUE"):
      -3367                this.set("maxvalue", self._parse_bitwise())
      -3368
      -3369            if self._match_text_seq("CYCLE"):
      -3370                this.set("cycle", True)
      -3371            elif self._match_text_seq("NO", "CYCLE"):
      -3372                this.set("cycle", False)
      -3373
      -3374            if not identity:
      -3375                this.set("expression", self._parse_bitwise())
      -3376
      -3377            self._match_r_paren()
      -3378
      -3379        return this
      -3380
      -3381    def _parse_inline(self) -> t.Optional[exp.Expression]:
      -3382        self._match_text_seq("LENGTH")
      -3383        return self.expression(exp.InlineLengthColumnConstraint, this=self._parse_bitwise())
      -3384
      -3385    def _parse_not_constraint(self) -> t.Optional[exp.Expression]:
      -3386        if self._match_text_seq("NULL"):
      -3387            return self.expression(exp.NotNullColumnConstraint)
      -3388        if self._match_text_seq("CASESPECIFIC"):
      -3389            return self.expression(exp.CaseSpecificColumnConstraint, not_=True)
      -3390        return None
      -3391
      -3392    def _parse_column_constraint(self) -> t.Optional[exp.Expression]:
      -3393        if self._match(TokenType.CONSTRAINT):
      -3394            this = self._parse_id_var()
      -3395        else:
      -3396            this = None
      -3397
      -3398        if self._match_texts(self.CONSTRAINT_PARSERS):
      -3399            return self.expression(
      -3400                exp.ColumnConstraint,
      -3401                this=this,
      -3402                kind=self.CONSTRAINT_PARSERS[self._prev.text.upper()](self),
      -3403            )
      -3404
      -3405        return this
      -3406
      -3407    def _parse_constraint(self) -> t.Optional[exp.Expression]:
      -3408        if not self._match(TokenType.CONSTRAINT):
      -3409            return self._parse_unnamed_constraint(constraints=self.SCHEMA_UNNAMED_CONSTRAINTS)
      -3410
      -3411        this = self._parse_id_var()
      -3412        expressions = []
      -3413
      -3414        while True:
      -3415            constraint = self._parse_unnamed_constraint() or self._parse_function()
      -3416            if not constraint:
      -3417                break
      -3418            expressions.append(constraint)
      -3419
      -3420        return self.expression(exp.Constraint, this=this, expressions=expressions)
      -3421
      -3422    def _parse_unnamed_constraint(
      -3423        self, constraints: t.Optional[t.Collection[str]] = None
      -3424    ) -> t.Optional[exp.Expression]:
      -3425        if not self._match_texts(constraints or self.CONSTRAINT_PARSERS):
      -3426            return None
      -3427
      -3428        constraint = self._prev.text.upper()
      -3429        if constraint not in self.CONSTRAINT_PARSERS:
      -3430            self.raise_error(f"No parser found for schema constraint {constraint}.")
      -3431
      -3432        return self.CONSTRAINT_PARSERS[constraint](self)
      -3433
      -3434    def _parse_unique(self) -> exp.Expression:
      -3435        self._match_text_seq("KEY")
      -3436        return self.expression(
      -3437            exp.UniqueColumnConstraint, this=self._parse_schema(self._parse_id_var(any_token=False))
      -3438        )
      -3439
      -3440    def _parse_key_constraint_options(self) -> t.List[str]:
      -3441        options = []
      -3442        while True:
      -3443            if not self._curr:
      -3444                break
      -3445
      -3446            if self._match(TokenType.ON):
      -3447                action = None
      -3448                on = self._advance_any() and self._prev.text
      -3449
      -3450                if self._match_text_seq("NO", "ACTION"):
      -3451                    action = "NO ACTION"
      -3452                elif self._match_text_seq("CASCADE"):
      -3453                    action = "CASCADE"
      -3454                elif self._match_pair(TokenType.SET, TokenType.NULL):
      -3455                    action = "SET NULL"
      -3456                elif self._match_pair(TokenType.SET, TokenType.DEFAULT):
      -3457                    action = "SET DEFAULT"
      -3458                else:
      -3459                    self.raise_error("Invalid key constraint")
      -3460
      -3461                options.append(f"ON {on} {action}")
      -3462            elif self._match_text_seq("NOT", "ENFORCED"):
      -3463                options.append("NOT ENFORCED")
      -3464            elif self._match_text_seq("DEFERRABLE"):
      -3465                options.append("DEFERRABLE")
      -3466            elif self._match_text_seq("INITIALLY", "DEFERRED"):
      -3467                options.append("INITIALLY DEFERRED")
      -3468            elif self._match_text_seq("NORELY"):
      -3469                options.append("NORELY")
      -3470            elif self._match_text_seq("MATCH", "FULL"):
      -3471                options.append("MATCH FULL")
      -3472            else:
      -3473                break
      -3474
      -3475        return options
      -3476
      -3477    def _parse_references(self, match: bool = True) -> t.Optional[exp.Expression]:
      -3478        if match and not self._match(TokenType.REFERENCES):
      -3479            return None
      -3480
      -3481        expressions = None
      -3482        this = self._parse_id_var()
      -3483
      -3484        if self._match(TokenType.L_PAREN, advance=False):
      -3485            expressions = self._parse_wrapped_id_vars()
      -3486
      -3487        options = self._parse_key_constraint_options()
      -3488        return self.expression(exp.Reference, this=this, expressions=expressions, options=options)
      -3489
      -3490    def _parse_foreign_key(self) -> exp.Expression:
      -3491        expressions = self._parse_wrapped_id_vars()
      -3492        reference = self._parse_references()
      -3493        options = {}
      -3494
      -3495        while self._match(TokenType.ON):
      -3496            if not self._match_set((TokenType.DELETE, TokenType.UPDATE)):
      -3497                self.raise_error("Expected DELETE or UPDATE")
      -3498
      -3499            kind = self._prev.text.lower()
      -3500
      -3501            if self._match_text_seq("NO", "ACTION"):
      -3502                action = "NO ACTION"
      -3503            elif self._match(TokenType.SET):
      -3504                self._match_set((TokenType.NULL, TokenType.DEFAULT))
      -3505                action = "SET " + self._prev.text.upper()
      -3506            else:
      -3507                self._advance()
      -3508                action = self._prev.text.upper()
      -3509
      -3510            options[kind] = action
      -3511
      -3512        return self.expression(
      -3513            exp.ForeignKey, expressions=expressions, reference=reference, **options  # type: ignore
      -3514        )
      -3515
      -3516    def _parse_primary_key(
      -3517        self, wrapped_optional: bool = False, in_props: bool = False
      -3518    ) -> exp.Expression:
      -3519        desc = (
      -3520            self._match_set((TokenType.ASC, TokenType.DESC))
      -3521            and self._prev.token_type == TokenType.DESC
      -3522        )
      -3523
      -3524        if not in_props and not self._match(TokenType.L_PAREN, advance=False):
      -3525            return self.expression(exp.PrimaryKeyColumnConstraint, desc=desc)
      -3526
      -3527        expressions = self._parse_wrapped_csv(self._parse_field, optional=wrapped_optional)
      -3528        options = self._parse_key_constraint_options()
      -3529        return self.expression(exp.PrimaryKey, expressions=expressions, options=options)
      -3530
      -3531    @t.overload
      -3532    def _parse_bracket(self, this: exp.Expression) -> exp.Expression:
      -3533        ...
      -3534
      -3535    @t.overload
      -3536    def _parse_bracket(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
      -3537        ...
      -3538
      -3539    def _parse_bracket(self, this):
      -3540        if not self._match_set((TokenType.L_BRACKET, TokenType.L_BRACE)):
      -3541            return this
      -3542
      -3543        bracket_kind = self._prev.token_type
      -3544        expressions: t.List[t.Optional[exp.Expression]]
      -3545
      -3546        if self._match(TokenType.COLON):
      -3547            expressions = [self.expression(exp.Slice, expression=self._parse_conjunction())]
      -3548        else:
      -3549            expressions = self._parse_csv(lambda: self._parse_slice(self._parse_conjunction()))
      -3550
      -3551        # https://duckdb.org/docs/sql/data_types/struct.html#creating-structs
      -3552        if bracket_kind == TokenType.L_BRACE:
      -3553            this = self.expression(exp.Struct, expressions=expressions)
      -3554        elif not this or this.name.upper() == "ARRAY":
      -3555            this = self.expression(exp.Array, expressions=expressions)
      -3556        else:
      -3557            expressions = apply_index_offset(this, expressions, -self.index_offset)
      -3558            this = self.expression(exp.Bracket, this=this, expressions=expressions)
      -3559
      -3560        if not self._match(TokenType.R_BRACKET) and bracket_kind == TokenType.L_BRACKET:
      -3561            self.raise_error("Expected ]")
      -3562        elif not self._match(TokenType.R_BRACE) and bracket_kind == TokenType.L_BRACE:
      -3563            self.raise_error("Expected }")
      -3564
      -3565        self._add_comments(this)
      -3566        return self._parse_bracket(this)
      -3567
      -3568    def _parse_slice(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
      -3569        if self._match(TokenType.COLON):
      -3570            return self.expression(exp.Slice, this=this, expression=self._parse_conjunction())
      -3571        return this
      +3315        return self.expression(exp.ColumnDef, this=this, kind=kind, constraints=constraints)
      +3316
      +3317    def _parse_auto_increment(
      +3318        self,
      +3319    ) -> exp.GeneratedAsIdentityColumnConstraint | exp.AutoIncrementColumnConstraint:
      +3320        start = None
      +3321        increment = None
      +3322
      +3323        if self._match(TokenType.L_PAREN, advance=False):
      +3324            args = self._parse_wrapped_csv(self._parse_bitwise)
      +3325            start = seq_get(args, 0)
      +3326            increment = seq_get(args, 1)
      +3327        elif self._match_text_seq("START"):
      +3328            start = self._parse_bitwise()
      +3329            self._match_text_seq("INCREMENT")
      +3330            increment = self._parse_bitwise()
      +3331
      +3332        if start and increment:
      +3333            return exp.GeneratedAsIdentityColumnConstraint(start=start, increment=increment)
      +3334
      +3335        return exp.AutoIncrementColumnConstraint()
      +3336
      +3337    def _parse_compress(self) -> exp.CompressColumnConstraint:
      +3338        if self._match(TokenType.L_PAREN, advance=False):
      +3339            return self.expression(
      +3340                exp.CompressColumnConstraint, this=self._parse_wrapped_csv(self._parse_bitwise)
      +3341            )
      +3342
      +3343        return self.expression(exp.CompressColumnConstraint, this=self._parse_bitwise())
      +3344
      +3345    def _parse_generated_as_identity(self) -> exp.GeneratedAsIdentityColumnConstraint:
      +3346        if self._match_text_seq("BY", "DEFAULT"):
      +3347            on_null = self._match_pair(TokenType.ON, TokenType.NULL)
      +3348            this = self.expression(
      +3349                exp.GeneratedAsIdentityColumnConstraint, this=False, on_null=on_null
      +3350            )
      +3351        else:
      +3352            self._match_text_seq("ALWAYS")
      +3353            this = self.expression(exp.GeneratedAsIdentityColumnConstraint, this=True)
      +3354
      +3355        self._match(TokenType.ALIAS)
      +3356        identity = self._match_text_seq("IDENTITY")
      +3357
      +3358        if self._match(TokenType.L_PAREN):
      +3359            if self._match_text_seq("START", "WITH"):
      +3360                this.set("start", self._parse_bitwise())
      +3361            if self._match_text_seq("INCREMENT", "BY"):
      +3362                this.set("increment", self._parse_bitwise())
      +3363            if self._match_text_seq("MINVALUE"):
      +3364                this.set("minvalue", self._parse_bitwise())
      +3365            if self._match_text_seq("MAXVALUE"):
      +3366                this.set("maxvalue", self._parse_bitwise())
      +3367
      +3368            if self._match_text_seq("CYCLE"):
      +3369                this.set("cycle", True)
      +3370            elif self._match_text_seq("NO", "CYCLE"):
      +3371                this.set("cycle", False)
      +3372
      +3373            if not identity:
      +3374                this.set("expression", self._parse_bitwise())
      +3375
      +3376            self._match_r_paren()
      +3377
      +3378        return this
      +3379
      +3380    def _parse_inline(self) -> exp.InlineLengthColumnConstraint:
      +3381        self._match_text_seq("LENGTH")
      +3382        return self.expression(exp.InlineLengthColumnConstraint, this=self._parse_bitwise())
      +3383
      +3384    def _parse_not_constraint(
      +3385        self,
      +3386    ) -> t.Optional[exp.NotNullColumnConstraint | exp.CaseSpecificColumnConstraint]:
      +3387        if self._match_text_seq("NULL"):
      +3388            return self.expression(exp.NotNullColumnConstraint)
      +3389        if self._match_text_seq("CASESPECIFIC"):
      +3390            return self.expression(exp.CaseSpecificColumnConstraint, not_=True)
      +3391        return None
      +3392
      +3393    def _parse_column_constraint(self) -> t.Optional[exp.Expression]:
      +3394        if self._match(TokenType.CONSTRAINT):
      +3395            this = self._parse_id_var()
      +3396        else:
      +3397            this = None
      +3398
      +3399        if self._match_texts(self.CONSTRAINT_PARSERS):
      +3400            return self.expression(
      +3401                exp.ColumnConstraint,
      +3402                this=this,
      +3403                kind=self.CONSTRAINT_PARSERS[self._prev.text.upper()](self),
      +3404            )
      +3405
      +3406        return this
      +3407
      +3408    def _parse_constraint(self) -> t.Optional[exp.Expression]:
      +3409        if not self._match(TokenType.CONSTRAINT):
      +3410            return self._parse_unnamed_constraint(constraints=self.SCHEMA_UNNAMED_CONSTRAINTS)
      +3411
      +3412        this = self._parse_id_var()
      +3413        expressions = []
      +3414
      +3415        while True:
      +3416            constraint = self._parse_unnamed_constraint() or self._parse_function()
      +3417            if not constraint:
      +3418                break
      +3419            expressions.append(constraint)
      +3420
      +3421        return self.expression(exp.Constraint, this=this, expressions=expressions)
      +3422
      +3423    def _parse_unnamed_constraint(
      +3424        self, constraints: t.Optional[t.Collection[str]] = None
      +3425    ) -> t.Optional[exp.Expression]:
      +3426        if not self._match_texts(constraints or self.CONSTRAINT_PARSERS):
      +3427            return None
      +3428
      +3429        constraint = self._prev.text.upper()
      +3430        if constraint not in self.CONSTRAINT_PARSERS:
      +3431            self.raise_error(f"No parser found for schema constraint {constraint}.")
      +3432
      +3433        return self.CONSTRAINT_PARSERS[constraint](self)
      +3434
      +3435    def _parse_unique(self) -> exp.UniqueColumnConstraint:
      +3436        self._match_text_seq("KEY")
      +3437        return self.expression(
      +3438            exp.UniqueColumnConstraint, this=self._parse_schema(self._parse_id_var(any_token=False))
      +3439        )
      +3440
      +3441    def _parse_key_constraint_options(self) -> t.List[str]:
      +3442        options = []
      +3443        while True:
      +3444            if not self._curr:
      +3445                break
      +3446
      +3447            if self._match(TokenType.ON):
      +3448                action = None
      +3449                on = self._advance_any() and self._prev.text
      +3450
      +3451                if self._match_text_seq("NO", "ACTION"):
      +3452                    action = "NO ACTION"
      +3453                elif self._match_text_seq("CASCADE"):
      +3454                    action = "CASCADE"
      +3455                elif self._match_pair(TokenType.SET, TokenType.NULL):
      +3456                    action = "SET NULL"
      +3457                elif self._match_pair(TokenType.SET, TokenType.DEFAULT):
      +3458                    action = "SET DEFAULT"
      +3459                else:
      +3460                    self.raise_error("Invalid key constraint")
      +3461
      +3462                options.append(f"ON {on} {action}")
      +3463            elif self._match_text_seq("NOT", "ENFORCED"):
      +3464                options.append("NOT ENFORCED")
      +3465            elif self._match_text_seq("DEFERRABLE"):
      +3466                options.append("DEFERRABLE")
      +3467            elif self._match_text_seq("INITIALLY", "DEFERRED"):
      +3468                options.append("INITIALLY DEFERRED")
      +3469            elif self._match_text_seq("NORELY"):
      +3470                options.append("NORELY")
      +3471            elif self._match_text_seq("MATCH", "FULL"):
      +3472                options.append("MATCH FULL")
      +3473            else:
      +3474                break
      +3475
      +3476        return options
      +3477
      +3478    def _parse_references(self, match: bool = True) -> t.Optional[exp.Reference]:
      +3479        if match and not self._match(TokenType.REFERENCES):
      +3480            return None
      +3481
      +3482        expressions = None
      +3483        this = self._parse_id_var()
      +3484
      +3485        if self._match(TokenType.L_PAREN, advance=False):
      +3486            expressions = self._parse_wrapped_id_vars()
      +3487
      +3488        options = self._parse_key_constraint_options()
      +3489        return self.expression(exp.Reference, this=this, expressions=expressions, options=options)
      +3490
      +3491    def _parse_foreign_key(self) -> exp.ForeignKey:
      +3492        expressions = self._parse_wrapped_id_vars()
      +3493        reference = self._parse_references()
      +3494        options = {}
      +3495
      +3496        while self._match(TokenType.ON):
      +3497            if not self._match_set((TokenType.DELETE, TokenType.UPDATE)):
      +3498                self.raise_error("Expected DELETE or UPDATE")
      +3499
      +3500            kind = self._prev.text.lower()
      +3501
      +3502            if self._match_text_seq("NO", "ACTION"):
      +3503                action = "NO ACTION"
      +3504            elif self._match(TokenType.SET):
      +3505                self._match_set((TokenType.NULL, TokenType.DEFAULT))
      +3506                action = "SET " + self._prev.text.upper()
      +3507            else:
      +3508                self._advance()
      +3509                action = self._prev.text.upper()
      +3510
      +3511            options[kind] = action
      +3512
      +3513        return self.expression(
      +3514            exp.ForeignKey, expressions=expressions, reference=reference, **options  # type: ignore
      +3515        )
      +3516
      +3517    def _parse_primary_key(
      +3518        self, wrapped_optional: bool = False, in_props: bool = False
      +3519    ) -> exp.PrimaryKeyColumnConstraint | exp.PrimaryKey:
      +3520        desc = (
      +3521            self._match_set((TokenType.ASC, TokenType.DESC))
      +3522            and self._prev.token_type == TokenType.DESC
      +3523        )
      +3524
      +3525        if not in_props and not self._match(TokenType.L_PAREN, advance=False):
      +3526            return self.expression(exp.PrimaryKeyColumnConstraint, desc=desc)
      +3527
      +3528        expressions = self._parse_wrapped_csv(self._parse_field, optional=wrapped_optional)
      +3529        options = self._parse_key_constraint_options()
      +3530        return self.expression(exp.PrimaryKey, expressions=expressions, options=options)
      +3531
      +3532    def _parse_bracket(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
      +3533        if not self._match_set((TokenType.L_BRACKET, TokenType.L_BRACE)):
      +3534            return this
      +3535
      +3536        bracket_kind = self._prev.token_type
      +3537
      +3538        if self._match(TokenType.COLON):
      +3539            expressions: t.List[t.Optional[exp.Expression]] = [
      +3540                self.expression(exp.Slice, expression=self._parse_conjunction())
      +3541            ]
      +3542        else:
      +3543            expressions = self._parse_csv(lambda: self._parse_slice(self._parse_conjunction()))
      +3544
      +3545        # https://duckdb.org/docs/sql/data_types/struct.html#creating-structs
      +3546        if bracket_kind == TokenType.L_BRACE:
      +3547            this = self.expression(exp.Struct, expressions=expressions)
      +3548        elif not this or this.name.upper() == "ARRAY":
      +3549            this = self.expression(exp.Array, expressions=expressions)
      +3550        else:
      +3551            expressions = apply_index_offset(this, expressions, -self.INDEX_OFFSET)
      +3552            this = self.expression(exp.Bracket, this=this, expressions=expressions)
      +3553
      +3554        if not self._match(TokenType.R_BRACKET) and bracket_kind == TokenType.L_BRACKET:
      +3555            self.raise_error("Expected ]")
      +3556        elif not self._match(TokenType.R_BRACE) and bracket_kind == TokenType.L_BRACE:
      +3557            self.raise_error("Expected }")
      +3558
      +3559        self._add_comments(this)
      +3560        return self._parse_bracket(this)
      +3561
      +3562    def _parse_slice(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
      +3563        if self._match(TokenType.COLON):
      +3564            return self.expression(exp.Slice, this=this, expression=self._parse_conjunction())
      +3565        return this
      +3566
      +3567    def _parse_case(self) -> t.Optional[exp.Expression]:
      +3568        ifs = []
      +3569        default = None
      +3570
      +3571        expression = self._parse_conjunction()
       3572
      -3573    def _parse_case(self) -> t.Optional[exp.Expression]:
      -3574        ifs = []
      -3575        default = None
      -3576
      -3577        expression = self._parse_conjunction()
      +3573        while self._match(TokenType.WHEN):
      +3574            this = self._parse_conjunction()
      +3575            self._match(TokenType.THEN)
      +3576            then = self._parse_conjunction()
      +3577            ifs.append(self.expression(exp.If, this=this, true=then))
       3578
      -3579        while self._match(TokenType.WHEN):
      -3580            this = self._parse_conjunction()
      -3581            self._match(TokenType.THEN)
      -3582            then = self._parse_conjunction()
      -3583            ifs.append(self.expression(exp.If, this=this, true=then))
      +3579        if self._match(TokenType.ELSE):
      +3580            default = self._parse_conjunction()
      +3581
      +3582        if not self._match(TokenType.END):
      +3583            self.raise_error("Expected END after CASE", self._prev)
       3584
      -3585        if self._match(TokenType.ELSE):
      -3586            default = self._parse_conjunction()
      -3587
      -3588        if not self._match(TokenType.END):
      -3589            self.raise_error("Expected END after CASE", self._prev)
      -3590
      -3591        return self._parse_window(
      -3592            self.expression(exp.Case, this=expression, ifs=ifs, default=default)
      -3593        )
      -3594
      -3595    def _parse_if(self) -> t.Optional[exp.Expression]:
      -3596        if self._match(TokenType.L_PAREN):
      -3597            args = self._parse_csv(self._parse_conjunction)
      -3598            this = exp.If.from_arg_list(args)
      -3599            self.validate_expression(this, args)
      -3600            self._match_r_paren()
      -3601        else:
      -3602            index = self._index - 1
      -3603            condition = self._parse_conjunction()
      -3604
      -3605            if not condition:
      -3606                self._retreat(index)
      -3607                return None
      -3608
      -3609            self._match(TokenType.THEN)
      -3610            true = self._parse_conjunction()
      -3611            false = self._parse_conjunction() if self._match(TokenType.ELSE) else None
      -3612            self._match(TokenType.END)
      -3613            this = self.expression(exp.If, this=condition, true=true, false=false)
      -3614
      -3615        return self._parse_window(this)
      -3616
      -3617    def _parse_extract(self) -> exp.Expression:
      -3618        this = self._parse_function() or self._parse_var() or self._parse_type()
      -3619
      -3620        if self._match(TokenType.FROM):
      -3621            return self.expression(exp.Extract, this=this, expression=self._parse_bitwise())
      -3622
      -3623        if not self._match(TokenType.COMMA):
      -3624            self.raise_error("Expected FROM or comma after EXTRACT", self._prev)
      -3625
      -3626        return self.expression(exp.Extract, this=this, expression=self._parse_bitwise())
      -3627
      -3628    def _parse_cast(self, strict: bool) -> exp.Expression:
      -3629        this = self._parse_conjunction()
      -3630
      -3631        if not self._match(TokenType.ALIAS):
      -3632            if self._match(TokenType.COMMA):
      -3633                return self.expression(
      -3634                    exp.CastToStrType, this=this, expression=self._parse_string()
      -3635                )
      -3636            else:
      -3637                self.raise_error("Expected AS after CAST")
      -3638
      -3639        to = self._parse_types()
      -3640
      -3641        if not to:
      -3642            self.raise_error("Expected TYPE after CAST")
      -3643        elif to.this == exp.DataType.Type.CHAR:
      -3644            if self._match(TokenType.CHARACTER_SET):
      -3645                to = self.expression(exp.CharacterSet, this=self._parse_var_or_string())
      -3646
      -3647        return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
      -3648
      -3649    def _parse_string_agg(self) -> exp.Expression:
      -3650        expression: t.Optional[exp.Expression]
      -3651
      -3652        if self._match(TokenType.DISTINCT):
      -3653            args = self._parse_csv(self._parse_conjunction)
      -3654            expression = self.expression(exp.Distinct, expressions=[seq_get(args, 0)])
      -3655        else:
      -3656            args = self._parse_csv(self._parse_conjunction)
      -3657            expression = seq_get(args, 0)
      -3658
      -3659        index = self._index
      -3660        if not self._match(TokenType.R_PAREN):
      -3661            # postgres: STRING_AGG([DISTINCT] expression, separator [ORDER BY expression1 {ASC | DESC} [, ...]])
      -3662            order = self._parse_order(this=expression)
      -3663            return self.expression(exp.GroupConcat, this=order, separator=seq_get(args, 1))
      -3664
      -3665        # Checks if we can parse an order clause: WITHIN GROUP (ORDER BY <order_by_expression_list> [ASC | DESC]).
      -3666        # This is done "manually", instead of letting _parse_window parse it into an exp.WithinGroup node, so that
      -3667        # the STRING_AGG call is parsed like in MySQL / SQLite and can thus be transpiled more easily to them.
      -3668        if not self._match_text_seq("WITHIN", "GROUP"):
      -3669            self._retreat(index)
      -3670            this = exp.GroupConcat.from_arg_list(args)
      -3671            self.validate_expression(this, args)
      -3672            return this
      -3673
      -3674        self._match_l_paren()  # The corresponding match_r_paren will be called in parse_function (caller)
      -3675        order = self._parse_order(this=expression)
      -3676        return self.expression(exp.GroupConcat, this=order, separator=seq_get(args, 1))
      -3677
      -3678    def _parse_convert(self, strict: bool) -> t.Optional[exp.Expression]:
      -3679        to: t.Optional[exp.Expression]
      -3680        this = self._parse_bitwise()
      -3681
      -3682        if self._match(TokenType.USING):
      -3683            to = self.expression(exp.CharacterSet, this=self._parse_var())
      -3684        elif self._match(TokenType.COMMA):
      -3685            to = self._parse_bitwise()
      -3686        else:
      -3687            to = None
      -3688
      -3689        # Swap the argument order if needed to produce the correct AST
      -3690        if self.CONVERT_TYPE_FIRST:
      -3691            this, to = to, this
      +3585        return self._parse_window(
      +3586            self.expression(exp.Case, this=expression, ifs=ifs, default=default)
      +3587        )
      +3588
      +3589    def _parse_if(self) -> t.Optional[exp.Expression]:
      +3590        if self._match(TokenType.L_PAREN):
      +3591            args = self._parse_csv(self._parse_conjunction)
      +3592            this = self.validate_expression(exp.If.from_arg_list(args), args)
      +3593            self._match_r_paren()
      +3594        else:
      +3595            index = self._index - 1
      +3596            condition = self._parse_conjunction()
      +3597
      +3598            if not condition:
      +3599                self._retreat(index)
      +3600                return None
      +3601
      +3602            self._match(TokenType.THEN)
      +3603            true = self._parse_conjunction()
      +3604            false = self._parse_conjunction() if self._match(TokenType.ELSE) else None
      +3605            self._match(TokenType.END)
      +3606            this = self.expression(exp.If, this=condition, true=true, false=false)
      +3607
      +3608        return self._parse_window(this)
      +3609
      +3610    def _parse_extract(self) -> exp.Extract:
      +3611        this = self._parse_function() or self._parse_var() or self._parse_type()
      +3612
      +3613        if self._match(TokenType.FROM):
      +3614            return self.expression(exp.Extract, this=this, expression=self._parse_bitwise())
      +3615
      +3616        if not self._match(TokenType.COMMA):
      +3617            self.raise_error("Expected FROM or comma after EXTRACT", self._prev)
      +3618
      +3619        return self.expression(exp.Extract, this=this, expression=self._parse_bitwise())
      +3620
      +3621    def _parse_cast(self, strict: bool) -> exp.Expression:
      +3622        this = self._parse_conjunction()
      +3623
      +3624        if not self._match(TokenType.ALIAS):
      +3625            if self._match(TokenType.COMMA):
      +3626                return self.expression(
      +3627                    exp.CastToStrType, this=this, expression=self._parse_string()
      +3628                )
      +3629            else:
      +3630                self.raise_error("Expected AS after CAST")
      +3631
      +3632        to = self._parse_types()
      +3633
      +3634        if not to:
      +3635            self.raise_error("Expected TYPE after CAST")
      +3636        elif to.this == exp.DataType.Type.CHAR:
      +3637            if self._match(TokenType.CHARACTER_SET):
      +3638                to = self.expression(exp.CharacterSet, this=self._parse_var_or_string())
      +3639        elif to.this in exp.DataType.TEMPORAL_TYPES and self._match(TokenType.FORMAT):
      +3640            fmt = self._parse_string()
      +3641
      +3642            return self.expression(
      +3643                exp.StrToDate if to.this == exp.DataType.Type.DATE else exp.StrToTime,
      +3644                this=this,
      +3645                format=exp.Literal.string(
      +3646                    format_time(
      +3647                        fmt.this if fmt else "",
      +3648                        self.FORMAT_MAPPING or self.TIME_MAPPING,
      +3649                        self.FORMAT_TRIE or self.TIME_TRIE,
      +3650                    )
      +3651                ),
      +3652            )
      +3653
      +3654        return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
      +3655
      +3656    def _parse_concat(self) -> t.Optional[exp.Expression]:
      +3657        args = self._parse_csv(self._parse_conjunction)
      +3658        if self.CONCAT_NULL_OUTPUTS_STRING:
      +3659            args = [exp.func("COALESCE", arg, exp.Literal.string("")) for arg in args]
      +3660
      +3661        # Some dialects (e.g. Trino) don't allow a single-argument CONCAT call, so when
      +3662        # we find such a call we replace it with its argument.
      +3663        if len(args) == 1:
      +3664            return args[0]
      +3665
      +3666        return self.expression(
      +3667            exp.Concat if self.STRICT_STRING_CONCAT else exp.SafeConcat, expressions=args
      +3668        )
      +3669
      +3670    def _parse_string_agg(self) -> exp.Expression:
      +3671        expression: t.Optional[exp.Expression]
      +3672
      +3673        if self._match(TokenType.DISTINCT):
      +3674            args = self._parse_csv(self._parse_conjunction)
      +3675            expression = self.expression(exp.Distinct, expressions=[seq_get(args, 0)])
      +3676        else:
      +3677            args = self._parse_csv(self._parse_conjunction)
      +3678            expression = seq_get(args, 0)
      +3679
      +3680        index = self._index
      +3681        if not self._match(TokenType.R_PAREN):
      +3682            # postgres: STRING_AGG([DISTINCT] expression, separator [ORDER BY expression1 {ASC | DESC} [, ...]])
      +3683            order = self._parse_order(this=expression)
      +3684            return self.expression(exp.GroupConcat, this=order, separator=seq_get(args, 1))
      +3685
      +3686        # Checks if we can parse an order clause: WITHIN GROUP (ORDER BY <order_by_expression_list> [ASC | DESC]).
      +3687        # This is done "manually", instead of letting _parse_window parse it into an exp.WithinGroup node, so that
      +3688        # the STRING_AGG call is parsed like in MySQL / SQLite and can thus be transpiled more easily to them.
      +3689        if not self._match_text_seq("WITHIN", "GROUP"):
      +3690            self._retreat(index)
      +3691            return self.validate_expression(exp.GroupConcat.from_arg_list(args), args)
       3692
      -3693        return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
      -3694
      -3695    def _parse_decode(self) -> t.Optional[exp.Expression]:
      -3696        """
      -3697        There are generally two variants of the DECODE function:
      -3698
      -3699        - DECODE(bin, charset)
      -3700        - DECODE(expression, search, result [, search, result] ... [, default])
      -3701
      -3702        The second variant will always be parsed into a CASE expression. Note that NULL
      -3703        needs special treatment, since we need to explicitly check for it with `IS NULL`,
      -3704        instead of relying on pattern matching.
      -3705        """
      -3706        args = self._parse_csv(self._parse_conjunction)
      +3693        self._match_l_paren()  # The corresponding match_r_paren will be called in parse_function (caller)
      +3694        order = self._parse_order(this=expression)
      +3695        return self.expression(exp.GroupConcat, this=order, separator=seq_get(args, 1))
      +3696
      +3697    def _parse_convert(self, strict: bool) -> t.Optional[exp.Expression]:
      +3698        to: t.Optional[exp.Expression]
      +3699        this = self._parse_bitwise()
      +3700
      +3701        if self._match(TokenType.USING):
      +3702            to = self.expression(exp.CharacterSet, this=self._parse_var())
      +3703        elif self._match(TokenType.COMMA):
      +3704            to = self._parse_bitwise()
      +3705        else:
      +3706            to = None
       3707
      -3708        if len(args) < 3:
      -3709            return self.expression(exp.Decode, this=seq_get(args, 0), charset=seq_get(args, 1))
      -3710
      -3711        expression, *expressions = args
      -3712        if not expression:
      -3713            return None
      -3714
      -3715        ifs = []
      -3716        for search, result in zip(expressions[::2], expressions[1::2]):
      -3717            if not search or not result:
      -3718                return None
      -3719
      -3720            if isinstance(search, exp.Literal):
      -3721                ifs.append(
      -3722                    exp.If(this=exp.EQ(this=expression.copy(), expression=search), true=result)
      -3723                )
      -3724            elif isinstance(search, exp.Null):
      -3725                ifs.append(
      -3726                    exp.If(this=exp.Is(this=expression.copy(), expression=exp.Null()), true=result)
      -3727                )
      -3728            else:
      -3729                cond = exp.or_(
      -3730                    exp.EQ(this=expression.copy(), expression=search),
      -3731                    exp.and_(
      -3732                        exp.Is(this=expression.copy(), expression=exp.Null()),
      -3733                        exp.Is(this=search.copy(), expression=exp.Null()),
      -3734                        copy=False,
      -3735                    ),
      -3736                    copy=False,
      -3737                )
      -3738                ifs.append(exp.If(this=cond, true=result))
      -3739
      -3740        return exp.Case(ifs=ifs, default=expressions[-1] if len(expressions) % 2 == 1 else None)
      -3741
      -3742    def _parse_json_key_value(self) -> t.Optional[exp.Expression]:
      -3743        self._match_text_seq("KEY")
      -3744        key = self._parse_field()
      -3745        self._match(TokenType.COLON)
      -3746        self._match_text_seq("VALUE")
      -3747        value = self._parse_field()
      -3748        if not key and not value:
      -3749            return None
      -3750        return self.expression(exp.JSONKeyValue, this=key, expression=value)
      -3751
      -3752    def _parse_json_object(self) -> exp.Expression:
      -3753        expressions = self._parse_csv(self._parse_json_key_value)
      -3754
      -3755        null_handling = None
      -3756        if self._match_text_seq("NULL", "ON", "NULL"):
      -3757            null_handling = "NULL ON NULL"
      -3758        elif self._match_text_seq("ABSENT", "ON", "NULL"):
      -3759            null_handling = "ABSENT ON NULL"
      +3708        # Swap the argument order if needed to produce the correct AST
      +3709        if self.CONVERT_TYPE_FIRST:
      +3710            this, to = to, this
      +3711
      +3712        return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
      +3713
      +3714    def _parse_decode(self) -> t.Optional[exp.Decode | exp.Case]:
      +3715        """
      +3716        There are generally two variants of the DECODE function:
      +3717
      +3718        - DECODE(bin, charset)
      +3719        - DECODE(expression, search, result [, search, result] ... [, default])
      +3720
      +3721        The second variant will always be parsed into a CASE expression. Note that NULL
      +3722        needs special treatment, since we need to explicitly check for it with `IS NULL`,
      +3723        instead of relying on pattern matching.
      +3724        """
      +3725        args = self._parse_csv(self._parse_conjunction)
      +3726
      +3727        if len(args) < 3:
      +3728            return self.expression(exp.Decode, this=seq_get(args, 0), charset=seq_get(args, 1))
      +3729
      +3730        expression, *expressions = args
      +3731        if not expression:
      +3732            return None
      +3733
      +3734        ifs = []
      +3735        for search, result in zip(expressions[::2], expressions[1::2]):
      +3736            if not search or not result:
      +3737                return None
      +3738
      +3739            if isinstance(search, exp.Literal):
      +3740                ifs.append(
      +3741                    exp.If(this=exp.EQ(this=expression.copy(), expression=search), true=result)
      +3742                )
      +3743            elif isinstance(search, exp.Null):
      +3744                ifs.append(
      +3745                    exp.If(this=exp.Is(this=expression.copy(), expression=exp.Null()), true=result)
      +3746                )
      +3747            else:
      +3748                cond = exp.or_(
      +3749                    exp.EQ(this=expression.copy(), expression=search),
      +3750                    exp.and_(
      +3751                        exp.Is(this=expression.copy(), expression=exp.Null()),
      +3752                        exp.Is(this=search.copy(), expression=exp.Null()),
      +3753                        copy=False,
      +3754                    ),
      +3755                    copy=False,
      +3756                )
      +3757                ifs.append(exp.If(this=cond, true=result))
      +3758
      +3759        return exp.Case(ifs=ifs, default=expressions[-1] if len(expressions) % 2 == 1 else None)
       3760
      -3761        unique_keys = None
      -3762        if self._match_text_seq("WITH", "UNIQUE"):
      -3763            unique_keys = True
      -3764        elif self._match_text_seq("WITHOUT", "UNIQUE"):
      -3765            unique_keys = False
      -3766
      -3767        self._match_text_seq("KEYS")
      -3768
      -3769        return_type = self._match_text_seq("RETURNING") and self._parse_type()
      -3770        format_json = self._match_text_seq("FORMAT", "JSON")
      -3771        encoding = self._match_text_seq("ENCODING") and self._parse_var()
      -3772
      -3773        return self.expression(
      -3774            exp.JSONObject,
      -3775            expressions=expressions,
      -3776            null_handling=null_handling,
      -3777            unique_keys=unique_keys,
      -3778            return_type=return_type,
      -3779            format_json=format_json,
      -3780            encoding=encoding,
      -3781        )
      -3782
      -3783    def _parse_logarithm(self) -> exp.Expression:
      -3784        # Default argument order is base, expression
      -3785        args = self._parse_csv(self._parse_range)
      -3786
      -3787        if len(args) > 1:
      -3788            if not self.LOG_BASE_FIRST:
      -3789                args.reverse()
      -3790            return exp.Log.from_arg_list(args)
      -3791
      -3792        return self.expression(
      -3793            exp.Ln if self.LOG_DEFAULTS_TO_LN else exp.Log, this=seq_get(args, 0)
      -3794        )
      -3795
      -3796    def _parse_match_against(self) -> exp.Expression:
      -3797        expressions = self._parse_csv(self._parse_column)
      -3798
      -3799        self._match_text_seq(")", "AGAINST", "(")
      -3800
      -3801        this = self._parse_string()
      -3802
      -3803        if self._match_text_seq("IN", "NATURAL", "LANGUAGE", "MODE"):
      -3804            modifier = "IN NATURAL LANGUAGE MODE"
      -3805            if self._match_text_seq("WITH", "QUERY", "EXPANSION"):
      -3806                modifier = f"{modifier} WITH QUERY EXPANSION"
      -3807        elif self._match_text_seq("IN", "BOOLEAN", "MODE"):
      -3808            modifier = "IN BOOLEAN MODE"
      -3809        elif self._match_text_seq("WITH", "QUERY", "EXPANSION"):
      -3810            modifier = "WITH QUERY EXPANSION"
      -3811        else:
      -3812            modifier = None
      -3813
      -3814        return self.expression(
      -3815            exp.MatchAgainst, this=this, expressions=expressions, modifier=modifier
      -3816        )
      -3817
      -3818    # https://learn.microsoft.com/en-us/sql/t-sql/functions/openjson-transact-sql?view=sql-server-ver16
      -3819    def _parse_open_json(self) -> exp.Expression:
      -3820        this = self._parse_bitwise()
      -3821        path = self._match(TokenType.COMMA) and self._parse_string()
      -3822
      -3823        def _parse_open_json_column_def() -> exp.Expression:
      -3824            this = self._parse_field(any_token=True)
      -3825            kind = self._parse_types()
      -3826            path = self._parse_string()
      -3827            as_json = self._match_pair(TokenType.ALIAS, TokenType.JSON)
      -3828            return self.expression(
      -3829                exp.OpenJSONColumnDef, this=this, kind=kind, path=path, as_json=as_json
      -3830            )
      -3831
      -3832        expressions = None
      -3833        if self._match_pair(TokenType.R_PAREN, TokenType.WITH):
      -3834            self._match_l_paren()
      -3835            expressions = self._parse_csv(_parse_open_json_column_def)
      -3836
      -3837        return self.expression(exp.OpenJSON, this=this, path=path, expressions=expressions)
      +3761    def _parse_json_key_value(self) -> t.Optional[exp.JSONKeyValue]:
      +3762        self._match_text_seq("KEY")
      +3763        key = self._parse_field()
      +3764        self._match(TokenType.COLON)
      +3765        self._match_text_seq("VALUE")
      +3766        value = self._parse_field()
      +3767
      +3768        if not key and not value:
      +3769            return None
      +3770        return self.expression(exp.JSONKeyValue, this=key, expression=value)
      +3771
      +3772    def _parse_json_object(self) -> exp.JSONObject:
      +3773        star = self._parse_star()
      +3774        expressions = [star] if star else self._parse_csv(self._parse_json_key_value)
      +3775
      +3776        null_handling = None
      +3777        if self._match_text_seq("NULL", "ON", "NULL"):
      +3778            null_handling = "NULL ON NULL"
      +3779        elif self._match_text_seq("ABSENT", "ON", "NULL"):
      +3780            null_handling = "ABSENT ON NULL"
      +3781
      +3782        unique_keys = None
      +3783        if self._match_text_seq("WITH", "UNIQUE"):
      +3784            unique_keys = True
      +3785        elif self._match_text_seq("WITHOUT", "UNIQUE"):
      +3786            unique_keys = False
      +3787
      +3788        self._match_text_seq("KEYS")
      +3789
      +3790        return_type = self._match_text_seq("RETURNING") and self._parse_type()
      +3791        format_json = self._match_text_seq("FORMAT", "JSON")
      +3792        encoding = self._match_text_seq("ENCODING") and self._parse_var()
      +3793
      +3794        return self.expression(
      +3795            exp.JSONObject,
      +3796            expressions=expressions,
      +3797            null_handling=null_handling,
      +3798            unique_keys=unique_keys,
      +3799            return_type=return_type,
      +3800            format_json=format_json,
      +3801            encoding=encoding,
      +3802        )
      +3803
      +3804    def _parse_logarithm(self) -> exp.Func:
      +3805        # Default argument order is base, expression
      +3806        args = self._parse_csv(self._parse_range)
      +3807
      +3808        if len(args) > 1:
      +3809            if not self.LOG_BASE_FIRST:
      +3810                args.reverse()
      +3811            return exp.Log.from_arg_list(args)
      +3812
      +3813        return self.expression(
      +3814            exp.Ln if self.LOG_DEFAULTS_TO_LN else exp.Log, this=seq_get(args, 0)
      +3815        )
      +3816
      +3817    def _parse_match_against(self) -> exp.MatchAgainst:
      +3818        expressions = self._parse_csv(self._parse_column)
      +3819
      +3820        self._match_text_seq(")", "AGAINST", "(")
      +3821
      +3822        this = self._parse_string()
      +3823
      +3824        if self._match_text_seq("IN", "NATURAL", "LANGUAGE", "MODE"):
      +3825            modifier = "IN NATURAL LANGUAGE MODE"
      +3826            if self._match_text_seq("WITH", "QUERY", "EXPANSION"):
      +3827                modifier = f"{modifier} WITH QUERY EXPANSION"
      +3828        elif self._match_text_seq("IN", "BOOLEAN", "MODE"):
      +3829            modifier = "IN BOOLEAN MODE"
      +3830        elif self._match_text_seq("WITH", "QUERY", "EXPANSION"):
      +3831            modifier = "WITH QUERY EXPANSION"
      +3832        else:
      +3833            modifier = None
      +3834
      +3835        return self.expression(
      +3836            exp.MatchAgainst, this=this, expressions=expressions, modifier=modifier
      +3837        )
       3838
      -3839    def _parse_position(self, haystack_first: bool = False) -> exp.Expression:
      -3840        args = self._parse_csv(self._parse_bitwise)
      -3841
      -3842        if self._match(TokenType.IN):
      -3843            return self.expression(
      -3844                exp.StrPosition, this=self._parse_bitwise(), substr=seq_get(args, 0)
      -3845            )
      -3846
      -3847        if haystack_first:
      -3848            haystack = seq_get(args, 0)
      -3849            needle = seq_get(args, 1)
      -3850        else:
      -3851            needle = seq_get(args, 0)
      -3852            haystack = seq_get(args, 1)
      +3839    # https://learn.microsoft.com/en-us/sql/t-sql/functions/openjson-transact-sql?view=sql-server-ver16
      +3840    def _parse_open_json(self) -> exp.OpenJSON:
      +3841        this = self._parse_bitwise()
      +3842        path = self._match(TokenType.COMMA) and self._parse_string()
      +3843
      +3844        def _parse_open_json_column_def() -> exp.OpenJSONColumnDef:
      +3845            this = self._parse_field(any_token=True)
      +3846            kind = self._parse_types()
      +3847            path = self._parse_string()
      +3848            as_json = self._match_pair(TokenType.ALIAS, TokenType.JSON)
      +3849
      +3850            return self.expression(
      +3851                exp.OpenJSONColumnDef, this=this, kind=kind, path=path, as_json=as_json
      +3852            )
       3853
      -3854        this = exp.StrPosition(this=haystack, substr=needle, position=seq_get(args, 2))
      -3855
      -3856        self.validate_expression(this, args)
      -3857
      -3858        return this
      -3859
      -3860    def _parse_join_hint(self, func_name: str) -> exp.Expression:
      -3861        args = self._parse_csv(self._parse_table)
      -3862        return exp.JoinHint(this=func_name.upper(), expressions=args)
      +3854        expressions = None
      +3855        if self._match_pair(TokenType.R_PAREN, TokenType.WITH):
      +3856            self._match_l_paren()
      +3857            expressions = self._parse_csv(_parse_open_json_column_def)
      +3858
      +3859        return self.expression(exp.OpenJSON, this=this, path=path, expressions=expressions)
      +3860
      +3861    def _parse_position(self, haystack_first: bool = False) -> exp.StrPosition:
      +3862        args = self._parse_csv(self._parse_bitwise)
       3863
      -3864    def _parse_substring(self) -> exp.Expression:
      -3865        # Postgres supports the form: substring(string [from int] [for int])
      -3866        # https://www.postgresql.org/docs/9.1/functions-string.html @ Table 9-6
      -3867
      -3868        args = self._parse_csv(self._parse_bitwise)
      -3869
      -3870        if self._match(TokenType.FROM):
      -3871            args.append(self._parse_bitwise())
      -3872            if self._match(TokenType.FOR):
      -3873                args.append(self._parse_bitwise())
      -3874
      -3875        this = exp.Substring.from_arg_list(args)
      -3876        self.validate_expression(this, args)
      -3877
      -3878        return this
      +3864        if self._match(TokenType.IN):
      +3865            return self.expression(
      +3866                exp.StrPosition, this=self._parse_bitwise(), substr=seq_get(args, 0)
      +3867            )
      +3868
      +3869        if haystack_first:
      +3870            haystack = seq_get(args, 0)
      +3871            needle = seq_get(args, 1)
      +3872        else:
      +3873            needle = seq_get(args, 0)
      +3874            haystack = seq_get(args, 1)
      +3875
      +3876        return self.expression(
      +3877            exp.StrPosition, this=haystack, substr=needle, position=seq_get(args, 2)
      +3878        )
       3879
      -3880    def _parse_trim(self) -> exp.Expression:
      -3881        # https://www.w3resource.com/sql/character-functions/trim.php
      -3882        # https://docs.oracle.com/javadb/10.8.3.0/ref/rreftrimfunc.html
      +3880    def _parse_join_hint(self, func_name: str) -> exp.JoinHint:
      +3881        args = self._parse_csv(self._parse_table)
      +3882        return exp.JoinHint(this=func_name.upper(), expressions=args)
       3883
      -3884        position = None
      -3885        collation = None
      -3886
      -3887        if self._match_texts(self.TRIM_TYPES):
      -3888            position = self._prev.text.upper()
      +3884    def _parse_substring(self) -> exp.Substring:
      +3885        # Postgres supports the form: substring(string [from int] [for int])
      +3886        # https://www.postgresql.org/docs/9.1/functions-string.html @ Table 9-6
      +3887
      +3888        args = self._parse_csv(self._parse_bitwise)
       3889
      -3890        expression = self._parse_bitwise()
      -3891        if self._match_set((TokenType.FROM, TokenType.COMMA)):
      -3892            this = self._parse_bitwise()
      -3893        else:
      -3894            this = expression
      -3895            expression = None
      +3890        if self._match(TokenType.FROM):
      +3891            args.append(self._parse_bitwise())
      +3892            if self._match(TokenType.FOR):
      +3893                args.append(self._parse_bitwise())
      +3894
      +3895        return self.validate_expression(exp.Substring.from_arg_list(args), args)
       3896
      -3897        if self._match(TokenType.COLLATE):
      -3898            collation = self._parse_bitwise()
      -3899
      -3900        return self.expression(
      -3901            exp.Trim,
      -3902            this=this,
      -3903            position=position,
      -3904            expression=expression,
      -3905            collation=collation,
      -3906        )
      -3907
      -3908    def _parse_window_clause(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]:
      -3909        return self._match(TokenType.WINDOW) and self._parse_csv(self._parse_named_window)
      -3910
      -3911    def _parse_named_window(self) -> t.Optional[exp.Expression]:
      -3912        return self._parse_window(self._parse_id_var(), alias=True)
      +3897    def _parse_trim(self) -> exp.Trim:
      +3898        # https://www.w3resource.com/sql/character-functions/trim.php
      +3899        # https://docs.oracle.com/javadb/10.8.3.0/ref/rreftrimfunc.html
      +3900
      +3901        position = None
      +3902        collation = None
      +3903
      +3904        if self._match_texts(self.TRIM_TYPES):
      +3905            position = self._prev.text.upper()
      +3906
      +3907        expression = self._parse_bitwise()
      +3908        if self._match_set((TokenType.FROM, TokenType.COMMA)):
      +3909            this = self._parse_bitwise()
      +3910        else:
      +3911            this = expression
      +3912            expression = None
       3913
      -3914    def _parse_respect_or_ignore_nulls(
      -3915        self, this: t.Optional[exp.Expression]
      -3916    ) -> t.Optional[exp.Expression]:
      -3917        if self._match_text_seq("IGNORE", "NULLS"):
      -3918            return self.expression(exp.IgnoreNulls, this=this)
      -3919        if self._match_text_seq("RESPECT", "NULLS"):
      -3920            return self.expression(exp.RespectNulls, this=this)
      -3921        return this
      -3922
      -3923    def _parse_window(
      -3924        self, this: t.Optional[exp.Expression], alias: bool = False
      -3925    ) -> t.Optional[exp.Expression]:
      -3926        if self._match_pair(TokenType.FILTER, TokenType.L_PAREN):
      -3927            this = self.expression(exp.Filter, this=this, expression=self._parse_where())
      -3928            self._match_r_paren()
      -3929
      -3930        # T-SQL allows the OVER (...) syntax after WITHIN GROUP.
      -3931        # https://learn.microsoft.com/en-us/sql/t-sql/functions/percentile-disc-transact-sql?view=sql-server-ver16
      -3932        if self._match_text_seq("WITHIN", "GROUP"):
      -3933            order = self._parse_wrapped(self._parse_order)
      -3934            this = self.expression(exp.WithinGroup, this=this, expression=order)
      +3914        if self._match(TokenType.COLLATE):
      +3915            collation = self._parse_bitwise()
      +3916
      +3917        return self.expression(
      +3918            exp.Trim, this=this, position=position, expression=expression, collation=collation
      +3919        )
      +3920
      +3921    def _parse_window_clause(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]:
      +3922        return self._match(TokenType.WINDOW) and self._parse_csv(self._parse_named_window)
      +3923
      +3924    def _parse_named_window(self) -> t.Optional[exp.Expression]:
      +3925        return self._parse_window(self._parse_id_var(), alias=True)
      +3926
      +3927    def _parse_respect_or_ignore_nulls(
      +3928        self, this: t.Optional[exp.Expression]
      +3929    ) -> t.Optional[exp.Expression]:
      +3930        if self._match_text_seq("IGNORE", "NULLS"):
      +3931            return self.expression(exp.IgnoreNulls, this=this)
      +3932        if self._match_text_seq("RESPECT", "NULLS"):
      +3933            return self.expression(exp.RespectNulls, this=this)
      +3934        return this
       3935
      -3936        # SQL spec defines an optional [ { IGNORE | RESPECT } NULLS ] OVER
      -3937        # Some dialects choose to implement and some do not.
      -3938        # https://dev.mysql.com/doc/refman/8.0/en/window-function-descriptions.html
      -3939
      -3940        # There is some code above in _parse_lambda that handles
      -3941        #   SELECT FIRST_VALUE(TABLE.COLUMN IGNORE|RESPECT NULLS) OVER ...
      +3936    def _parse_window(
      +3937        self, this: t.Optional[exp.Expression], alias: bool = False
      +3938    ) -> t.Optional[exp.Expression]:
      +3939        if self._match_pair(TokenType.FILTER, TokenType.L_PAREN):
      +3940            this = self.expression(exp.Filter, this=this, expression=self._parse_where())
      +3941            self._match_r_paren()
       3942
      -3943        # The below changes handle
      -3944        #   SELECT FIRST_VALUE(TABLE.COLUMN) IGNORE|RESPECT NULLS OVER ...
      -3945
      -3946        # Oracle allows both formats
      -3947        #   (https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/img_text/first_value.html)
      -3948        #   and Snowflake chose to do the same for familiarity
      -3949        #   https://docs.snowflake.com/en/sql-reference/functions/first_value.html#usage-notes
      -3950        this = self._parse_respect_or_ignore_nulls(this)
      -3951
      -3952        # bigquery select from window x AS (partition by ...)
      -3953        if alias:
      -3954            over = None
      -3955            self._match(TokenType.ALIAS)
      -3956        elif not self._match_set(self.WINDOW_BEFORE_PAREN_TOKENS):
      -3957            return this
      -3958        else:
      -3959            over = self._prev.text.upper()
      -3960
      -3961        if not self._match(TokenType.L_PAREN):
      -3962            return self.expression(
      -3963                exp.Window, this=this, alias=self._parse_id_var(False), over=over
      -3964            )
      -3965
      -3966        window_alias = self._parse_id_var(any_token=False, tokens=self.WINDOW_ALIAS_TOKENS)
      -3967
      -3968        first = self._match(TokenType.FIRST)
      -3969        if self._match_text_seq("LAST"):
      -3970            first = False
      -3971
      -3972        partition = self._parse_partition_by()
      -3973        order = self._parse_order()
      -3974        kind = self._match_set((TokenType.ROWS, TokenType.RANGE)) and self._prev.text
      -3975
      -3976        if kind:
      -3977            self._match(TokenType.BETWEEN)
      -3978            start = self._parse_window_spec()
      -3979            self._match(TokenType.AND)
      -3980            end = self._parse_window_spec()
      -3981
      -3982            spec = self.expression(
      -3983                exp.WindowSpec,
      -3984                kind=kind,
      -3985                start=start["value"],
      -3986                start_side=start["side"],
      -3987                end=end["value"],
      -3988                end_side=end["side"],
      -3989            )
      -3990        else:
      -3991            spec = None
      -3992
      -3993        self._match_r_paren()
      +3943        # T-SQL allows the OVER (...) syntax after WITHIN GROUP.
      +3944        # https://learn.microsoft.com/en-us/sql/t-sql/functions/percentile-disc-transact-sql?view=sql-server-ver16
      +3945        if self._match_text_seq("WITHIN", "GROUP"):
      +3946            order = self._parse_wrapped(self._parse_order)
      +3947            this = self.expression(exp.WithinGroup, this=this, expression=order)
      +3948
      +3949        # SQL spec defines an optional [ { IGNORE | RESPECT } NULLS ] OVER
      +3950        # Some dialects choose to implement and some do not.
      +3951        # https://dev.mysql.com/doc/refman/8.0/en/window-function-descriptions.html
      +3952
      +3953        # There is some code above in _parse_lambda that handles
      +3954        #   SELECT FIRST_VALUE(TABLE.COLUMN IGNORE|RESPECT NULLS) OVER ...
      +3955
      +3956        # The below changes handle
      +3957        #   SELECT FIRST_VALUE(TABLE.COLUMN) IGNORE|RESPECT NULLS OVER ...
      +3958
      +3959        # Oracle allows both formats
      +3960        #   (https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/img_text/first_value.html)
      +3961        #   and Snowflake chose to do the same for familiarity
      +3962        #   https://docs.snowflake.com/en/sql-reference/functions/first_value.html#usage-notes
      +3963        this = self._parse_respect_or_ignore_nulls(this)
      +3964
      +3965        # bigquery select from window x AS (partition by ...)
      +3966        if alias:
      +3967            over = None
      +3968            self._match(TokenType.ALIAS)
      +3969        elif not self._match_set(self.WINDOW_BEFORE_PAREN_TOKENS):
      +3970            return this
      +3971        else:
      +3972            over = self._prev.text.upper()
      +3973
      +3974        if not self._match(TokenType.L_PAREN):
      +3975            return self.expression(
      +3976                exp.Window, this=this, alias=self._parse_id_var(False), over=over
      +3977            )
      +3978
      +3979        window_alias = self._parse_id_var(any_token=False, tokens=self.WINDOW_ALIAS_TOKENS)
      +3980
      +3981        first = self._match(TokenType.FIRST)
      +3982        if self._match_text_seq("LAST"):
      +3983            first = False
      +3984
      +3985        partition = self._parse_partition_by()
      +3986        order = self._parse_order()
      +3987        kind = self._match_set((TokenType.ROWS, TokenType.RANGE)) and self._prev.text
      +3988
      +3989        if kind:
      +3990            self._match(TokenType.BETWEEN)
      +3991            start = self._parse_window_spec()
      +3992            self._match(TokenType.AND)
      +3993            end = self._parse_window_spec()
       3994
      -3995        return self.expression(
      -3996            exp.Window,
      -3997            this=this,
      -3998            partition_by=partition,
      -3999            order=order,
      -4000            spec=spec,
      -4001            alias=window_alias,
      -4002            over=over,
      -4003            first=first,
      -4004        )
      +3995            spec = self.expression(
      +3996                exp.WindowSpec,
      +3997                kind=kind,
      +3998                start=start["value"],
      +3999                start_side=start["side"],
      +4000                end=end["value"],
      +4001                end_side=end["side"],
      +4002            )
      +4003        else:
      +4004            spec = None
       4005
      -4006    def _parse_window_spec(self) -> t.Dict[str, t.Optional[str | exp.Expression]]:
      -4007        self._match(TokenType.BETWEEN)
      -4008
      -4009        return {
      -4010            "value": (
      -4011                (self._match_text_seq("UNBOUNDED") and "UNBOUNDED")
      -4012                or (self._match_text_seq("CURRENT", "ROW") and "CURRENT ROW")
      -4013                or self._parse_bitwise()
      -4014            ),
      -4015            "side": self._match_texts(self.WINDOW_SIDES) and self._prev.text,
      -4016        }
      -4017
      -4018    def _parse_alias(
      -4019        self, this: t.Optional[exp.Expression], explicit: bool = False
      -4020    ) -> t.Optional[exp.Expression]:
      -4021        any_token = self._match(TokenType.ALIAS)
      -4022
      -4023        if explicit and not any_token:
      -4024            return this
      -4025
      -4026        if self._match(TokenType.L_PAREN):
      -4027            aliases = self.expression(
      -4028                exp.Aliases,
      -4029                this=this,
      -4030                expressions=self._parse_csv(lambda: self._parse_id_var(any_token)),
      -4031            )
      -4032            self._match_r_paren(aliases)
      -4033            return aliases
      -4034
      -4035        alias = self._parse_id_var(any_token)
      -4036
      -4037        if alias:
      -4038            return self.expression(exp.Alias, this=this, alias=alias)
      -4039
      -4040        return this
      -4041
      -4042    def _parse_id_var(
      -4043        self,
      -4044        any_token: bool = True,
      -4045        tokens: t.Optional[t.Collection[TokenType]] = None,
      -4046    ) -> t.Optional[exp.Expression]:
      -4047        identifier = self._parse_identifier()
      -4048
      -4049        if identifier:
      -4050            return identifier
      -4051
      -4052        if (any_token and self._advance_any()) or self._match_set(tokens or self.ID_VAR_TOKENS):
      -4053            quoted = self._prev.token_type == TokenType.STRING
      -4054            return exp.Identifier(this=self._prev.text, quoted=quoted)
      -4055
      -4056        return None
      -4057
      -4058    def _parse_string(self) -> t.Optional[exp.Expression]:
      -4059        if self._match(TokenType.STRING):
      -4060            return self.PRIMARY_PARSERS[TokenType.STRING](self, self._prev)
      -4061        return self._parse_placeholder()
      -4062
      -4063    def _parse_string_as_identifier(self) -> t.Optional[exp.Expression]:
      -4064        return exp.to_identifier(self._match(TokenType.STRING) and self._prev.text, quoted=True)
      -4065
      -4066    def _parse_number(self) -> t.Optional[exp.Expression]:
      -4067        if self._match(TokenType.NUMBER):
      -4068            return self.PRIMARY_PARSERS[TokenType.NUMBER](self, self._prev)
      -4069        return self._parse_placeholder()
      +4006        self._match_r_paren()
      +4007
      +4008        return self.expression(
      +4009            exp.Window,
      +4010            this=this,
      +4011            partition_by=partition,
      +4012            order=order,
      +4013            spec=spec,
      +4014            alias=window_alias,
      +4015            over=over,
      +4016            first=first,
      +4017        )
      +4018
      +4019    def _parse_window_spec(self) -> t.Dict[str, t.Optional[str | exp.Expression]]:
      +4020        self._match(TokenType.BETWEEN)
      +4021
      +4022        return {
      +4023            "value": (
      +4024                (self._match_text_seq("UNBOUNDED") and "UNBOUNDED")
      +4025                or (self._match_text_seq("CURRENT", "ROW") and "CURRENT ROW")
      +4026                or self._parse_bitwise()
      +4027            ),
      +4028            "side": self._match_texts(self.WINDOW_SIDES) and self._prev.text,
      +4029        }
      +4030
      +4031    def _parse_alias(
      +4032        self, this: t.Optional[exp.Expression], explicit: bool = False
      +4033    ) -> t.Optional[exp.Expression]:
      +4034        any_token = self._match(TokenType.ALIAS)
      +4035
      +4036        if explicit and not any_token:
      +4037            return this
      +4038
      +4039        if self._match(TokenType.L_PAREN):
      +4040            aliases = self.expression(
      +4041                exp.Aliases,
      +4042                this=this,
      +4043                expressions=self._parse_csv(lambda: self._parse_id_var(any_token)),
      +4044            )
      +4045            self._match_r_paren(aliases)
      +4046            return aliases
      +4047
      +4048        alias = self._parse_id_var(any_token)
      +4049
      +4050        if alias:
      +4051            return self.expression(exp.Alias, this=this, alias=alias)
      +4052
      +4053        return this
      +4054
      +4055    def _parse_id_var(
      +4056        self,
      +4057        any_token: bool = True,
      +4058        tokens: t.Optional[t.Collection[TokenType]] = None,
      +4059    ) -> t.Optional[exp.Expression]:
      +4060        identifier = self._parse_identifier()
      +4061
      +4062        if identifier:
      +4063            return identifier
      +4064
      +4065        if (any_token and self._advance_any()) or self._match_set(tokens or self.ID_VAR_TOKENS):
      +4066            quoted = self._prev.token_type == TokenType.STRING
      +4067            return exp.Identifier(this=self._prev.text, quoted=quoted)
      +4068
      +4069        return None
       4070
      -4071    def _parse_identifier(self) -> t.Optional[exp.Expression]:
      -4072        if self._match(TokenType.IDENTIFIER):
      -4073            return self.expression(exp.Identifier, this=self._prev.text, quoted=True)
      +4071    def _parse_string(self) -> t.Optional[exp.Expression]:
      +4072        if self._match(TokenType.STRING):
      +4073            return self.PRIMARY_PARSERS[TokenType.STRING](self, self._prev)
       4074        return self._parse_placeholder()
       4075
      -4076    def _parse_var(
      -4077        self, any_token: bool = False, tokens: t.Optional[t.Collection[TokenType]] = None
      -4078    ) -> t.Optional[exp.Expression]:
      -4079        if (
      -4080            (any_token and self._advance_any())
      -4081            or self._match(TokenType.VAR)
      -4082            or (self._match_set(tokens) if tokens else False)
      -4083        ):
      -4084            return self.expression(exp.Var, this=self._prev.text)
      -4085        return self._parse_placeholder()
      -4086
      -4087    def _advance_any(self) -> t.Optional[Token]:
      -4088        if self._curr and self._curr.token_type not in self.RESERVED_KEYWORDS:
      -4089            self._advance()
      -4090            return self._prev
      -4091        return None
      -4092
      -4093    def _parse_var_or_string(self) -> t.Optional[exp.Expression]:
      -4094        return self._parse_var() or self._parse_string()
      -4095
      -4096    def _parse_null(self) -> t.Optional[exp.Expression]:
      -4097        if self._match(TokenType.NULL):
      -4098            return self.PRIMARY_PARSERS[TokenType.NULL](self, self._prev)
      -4099        return None
      -4100
      -4101    def _parse_boolean(self) -> t.Optional[exp.Expression]:
      -4102        if self._match(TokenType.TRUE):
      -4103            return self.PRIMARY_PARSERS[TokenType.TRUE](self, self._prev)
      -4104        if self._match(TokenType.FALSE):
      -4105            return self.PRIMARY_PARSERS[TokenType.FALSE](self, self._prev)
      -4106        return None
      -4107
      -4108    def _parse_star(self) -> t.Optional[exp.Expression]:
      -4109        if self._match(TokenType.STAR):
      -4110            return self.PRIMARY_PARSERS[TokenType.STAR](self, self._prev)
      -4111        return None
      -4112
      -4113    def _parse_parameter(self) -> exp.Expression:
      -4114        wrapped = self._match(TokenType.L_BRACE)
      -4115        this = self._parse_var() or self._parse_identifier() or self._parse_primary()
      -4116        self._match(TokenType.R_BRACE)
      -4117        return self.expression(exp.Parameter, this=this, wrapped=wrapped)
      -4118
      -4119    def _parse_placeholder(self) -> t.Optional[exp.Expression]:
      -4120        if self._match_set(self.PLACEHOLDER_PARSERS):
      -4121            placeholder = self.PLACEHOLDER_PARSERS[self._prev.token_type](self)
      -4122            if placeholder:
      -4123                return placeholder
      -4124            self._advance(-1)
      -4125        return None
      -4126
      -4127    def _parse_except(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]:
      -4128        if not self._match(TokenType.EXCEPT):
      -4129            return None
      -4130        if self._match(TokenType.L_PAREN, advance=False):
      -4131            return self._parse_wrapped_csv(self._parse_column)
      -4132        return self._parse_csv(self._parse_column)
      -4133
      -4134    def _parse_replace(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]:
      -4135        if not self._match(TokenType.REPLACE):
      -4136            return None
      -4137        if self._match(TokenType.L_PAREN, advance=False):
      -4138            return self._parse_wrapped_csv(self._parse_expression)
      -4139        return self._parse_csv(self._parse_expression)
      -4140
      -4141    def _parse_csv(
      -4142        self, parse_method: t.Callable, sep: TokenType = TokenType.COMMA
      -4143    ) -> t.List[t.Optional[exp.Expression]]:
      -4144        parse_result = parse_method()
      -4145        items = [parse_result] if parse_result is not None else []
      +4076    def _parse_string_as_identifier(self) -> t.Optional[exp.Identifier]:
      +4077        return exp.to_identifier(self._match(TokenType.STRING) and self._prev.text, quoted=True)
      +4078
      +4079    def _parse_number(self) -> t.Optional[exp.Expression]:
      +4080        if self._match(TokenType.NUMBER):
      +4081            return self.PRIMARY_PARSERS[TokenType.NUMBER](self, self._prev)
      +4082        return self._parse_placeholder()
      +4083
      +4084    def _parse_identifier(self) -> t.Optional[exp.Expression]:
      +4085        if self._match(TokenType.IDENTIFIER):
      +4086            return self.expression(exp.Identifier, this=self._prev.text, quoted=True)
      +4087        return self._parse_placeholder()
      +4088
      +4089    def _parse_var(
      +4090        self, any_token: bool = False, tokens: t.Optional[t.Collection[TokenType]] = None
      +4091    ) -> t.Optional[exp.Expression]:
      +4092        if (
      +4093            (any_token and self._advance_any())
      +4094            or self._match(TokenType.VAR)
      +4095            or (self._match_set(tokens) if tokens else False)
      +4096        ):
      +4097            return self.expression(exp.Var, this=self._prev.text)
      +4098        return self._parse_placeholder()
      +4099
      +4100    def _advance_any(self) -> t.Optional[Token]:
      +4101        if self._curr and self._curr.token_type not in self.RESERVED_KEYWORDS:
      +4102            self._advance()
      +4103            return self._prev
      +4104        return None
      +4105
      +4106    def _parse_var_or_string(self) -> t.Optional[exp.Expression]:
      +4107        return self._parse_var() or self._parse_string()
      +4108
      +4109    def _parse_null(self) -> t.Optional[exp.Expression]:
      +4110        if self._match(TokenType.NULL):
      +4111            return self.PRIMARY_PARSERS[TokenType.NULL](self, self._prev)
      +4112        return None
      +4113
      +4114    def _parse_boolean(self) -> t.Optional[exp.Expression]:
      +4115        if self._match(TokenType.TRUE):
      +4116            return self.PRIMARY_PARSERS[TokenType.TRUE](self, self._prev)
      +4117        if self._match(TokenType.FALSE):
      +4118            return self.PRIMARY_PARSERS[TokenType.FALSE](self, self._prev)
      +4119        return None
      +4120
      +4121    def _parse_star(self) -> t.Optional[exp.Expression]:
      +4122        if self._match(TokenType.STAR):
      +4123            return self.PRIMARY_PARSERS[TokenType.STAR](self, self._prev)
      +4124        return None
      +4125
      +4126    def _parse_parameter(self) -> exp.Parameter:
      +4127        wrapped = self._match(TokenType.L_BRACE)
      +4128        this = self._parse_var() or self._parse_identifier() or self._parse_primary()
      +4129        self._match(TokenType.R_BRACE)
      +4130        return self.expression(exp.Parameter, this=this, wrapped=wrapped)
      +4131
      +4132    def _parse_placeholder(self) -> t.Optional[exp.Expression]:
      +4133        if self._match_set(self.PLACEHOLDER_PARSERS):
      +4134            placeholder = self.PLACEHOLDER_PARSERS[self._prev.token_type](self)
      +4135            if placeholder:
      +4136                return placeholder
      +4137            self._advance(-1)
      +4138        return None
      +4139
      +4140    def _parse_except(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]:
      +4141        if not self._match(TokenType.EXCEPT):
      +4142            return None
      +4143        if self._match(TokenType.L_PAREN, advance=False):
      +4144            return self._parse_wrapped_csv(self._parse_column)
      +4145        return self._parse_csv(self._parse_column)
       4146
      -4147        while self._match(sep):
      -4148            self._add_comments(parse_result)
      -4149            parse_result = parse_method()
      -4150            if parse_result is not None:
      -4151                items.append(parse_result)
      -4152
      -4153        return items
      -4154
      -4155    def _parse_tokens(
      -4156        self, parse_method: t.Callable, expressions: t.Dict
      -4157    ) -> t.Optional[exp.Expression]:
      -4158        this = parse_method()
      +4147    def _parse_replace(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]:
      +4148        if not self._match(TokenType.REPLACE):
      +4149            return None
      +4150        if self._match(TokenType.L_PAREN, advance=False):
      +4151            return self._parse_wrapped_csv(self._parse_expression)
      +4152        return self._parse_csv(self._parse_expression)
      +4153
      +4154    def _parse_csv(
      +4155        self, parse_method: t.Callable, sep: TokenType = TokenType.COMMA
      +4156    ) -> t.List[t.Optional[exp.Expression]]:
      +4157        parse_result = parse_method()
      +4158        items = [parse_result] if parse_result is not None else []
       4159
      -4160        while self._match_set(expressions):
      -4161            this = self.expression(
      -4162                expressions[self._prev.token_type],
      -4163                this=this,
      -4164                comments=self._prev_comments,
      -4165                expression=parse_method(),
      -4166            )
      +4160        while self._match(sep):
      +4161            self._add_comments(parse_result)
      +4162            parse_result = parse_method()
      +4163            if parse_result is not None:
      +4164                items.append(parse_result)
      +4165
      +4166        return items
       4167
      -4168        return this
      -4169
      -4170    def _parse_wrapped_id_vars(self, optional: bool = False) -> t.List[t.Optional[exp.Expression]]:
      -4171        return self._parse_wrapped_csv(self._parse_id_var, optional=optional)
      +4168    def _parse_tokens(
      +4169        self, parse_method: t.Callable, expressions: t.Dict
      +4170    ) -> t.Optional[exp.Expression]:
      +4171        this = parse_method()
       4172
      -4173    def _parse_wrapped_csv(
      -4174        self, parse_method: t.Callable, sep: TokenType = TokenType.COMMA, optional: bool = False
      -4175    ) -> t.List[t.Optional[exp.Expression]]:
      -4176        return self._parse_wrapped(
      -4177            lambda: self._parse_csv(parse_method, sep=sep), optional=optional
      -4178        )
      -4179
      -4180    def _parse_wrapped(self, parse_method: t.Callable, optional: bool = False) -> t.Any:
      -4181        wrapped = self._match(TokenType.L_PAREN)
      -4182        if not wrapped and not optional:
      -4183            self.raise_error("Expecting (")
      -4184        parse_result = parse_method()
      -4185        if wrapped:
      -4186            self._match_r_paren()
      -4187        return parse_result
      -4188
      -4189    def _parse_select_or_expression(self, alias: bool = False) -> t.Optional[exp.Expression]:
      -4190        return self._parse_select() or self._parse_set_operations(
      -4191            self._parse_expression() if alias else self._parse_conjunction()
      -4192        )
      -4193
      -4194    def _parse_ddl_select(self) -> t.Optional[exp.Expression]:
      -4195        return self._parse_query_modifiers(
      -4196            self._parse_set_operations(self._parse_select(nested=True, parse_subquery_alias=False))
      -4197        )
      -4198
      -4199    def _parse_transaction(self) -> exp.Expression:
      -4200        this = None
      -4201        if self._match_texts(self.TRANSACTION_KIND):
      -4202            this = self._prev.text
      -4203
      -4204        self._match_texts({"TRANSACTION", "WORK"})
      -4205
      -4206        modes = []
      -4207        while True:
      -4208            mode = []
      -4209            while self._match(TokenType.VAR):
      -4210                mode.append(self._prev.text)
      +4173        while self._match_set(expressions):
      +4174            this = self.expression(
      +4175                expressions[self._prev.token_type],
      +4176                this=this,
      +4177                comments=self._prev_comments,
      +4178                expression=parse_method(),
      +4179            )
      +4180
      +4181        return this
      +4182
      +4183    def _parse_wrapped_id_vars(self, optional: bool = False) -> t.List[t.Optional[exp.Expression]]:
      +4184        return self._parse_wrapped_csv(self._parse_id_var, optional=optional)
      +4185
      +4186    def _parse_wrapped_csv(
      +4187        self, parse_method: t.Callable, sep: TokenType = TokenType.COMMA, optional: bool = False
      +4188    ) -> t.List[t.Optional[exp.Expression]]:
      +4189        return self._parse_wrapped(
      +4190            lambda: self._parse_csv(parse_method, sep=sep), optional=optional
      +4191        )
      +4192
      +4193    def _parse_wrapped(self, parse_method: t.Callable, optional: bool = False) -> t.Any:
      +4194        wrapped = self._match(TokenType.L_PAREN)
      +4195        if not wrapped and not optional:
      +4196            self.raise_error("Expecting (")
      +4197        parse_result = parse_method()
      +4198        if wrapped:
      +4199            self._match_r_paren()
      +4200        return parse_result
      +4201
      +4202    def _parse_select_or_expression(self, alias: bool = False) -> t.Optional[exp.Expression]:
      +4203        return self._parse_select() or self._parse_set_operations(
      +4204            self._parse_expression() if alias else self._parse_conjunction()
      +4205        )
      +4206
      +4207    def _parse_ddl_select(self) -> t.Optional[exp.Expression]:
      +4208        return self._parse_query_modifiers(
      +4209            self._parse_set_operations(self._parse_select(nested=True, parse_subquery_alias=False))
      +4210        )
       4211
      -4212            if mode:
      -4213                modes.append(" ".join(mode))
      -4214            if not self._match(TokenType.COMMA):
      -4215                break
      +4212    def _parse_transaction(self) -> exp.Transaction:
      +4213        this = None
      +4214        if self._match_texts(self.TRANSACTION_KIND):
      +4215            this = self._prev.text
       4216
      -4217        return self.expression(exp.Transaction, this=this, modes=modes)
      +4217        self._match_texts({"TRANSACTION", "WORK"})
       4218
      -4219    def _parse_commit_or_rollback(self) -> exp.Expression:
      -4220        chain = None
      -4221        savepoint = None
      -4222        is_rollback = self._prev.token_type == TokenType.ROLLBACK
      -4223
      -4224        self._match_texts({"TRANSACTION", "WORK"})
      -4225
      -4226        if self._match_text_seq("TO"):
      -4227            self._match_text_seq("SAVEPOINT")
      -4228            savepoint = self._parse_id_var()
      +4219        modes = []
      +4220        while True:
      +4221            mode = []
      +4222            while self._match(TokenType.VAR):
      +4223                mode.append(self._prev.text)
      +4224
      +4225            if mode:
      +4226                modes.append(" ".join(mode))
      +4227            if not self._match(TokenType.COMMA):
      +4228                break
       4229
      -4230        if self._match(TokenType.AND):
      -4231            chain = not self._match_text_seq("NO")
      -4232            self._match_text_seq("CHAIN")
      -4233
      -4234        if is_rollback:
      -4235            return self.expression(exp.Rollback, savepoint=savepoint)
      -4236        return self.expression(exp.Commit, chain=chain)
      -4237
      -4238    def _parse_add_column(self) -> t.Optional[exp.Expression]:
      -4239        if not self._match_text_seq("ADD"):
      -4240            return None
      -4241
      -4242        self._match(TokenType.COLUMN)
      -4243        exists_column = self._parse_exists(not_=True)
      -4244        expression = self._parse_column_def(self._parse_field(any_token=True))
      -4245
      -4246        if expression:
      -4247            expression.set("exists", exists_column)
      -4248
      -4249            # https://docs.databricks.com/delta/update-schema.html#explicitly-update-schema-to-add-columns
      -4250            if self._match_texts(("FIRST", "AFTER")):
      -4251                position = self._prev.text
      -4252                column_position = self.expression(
      -4253                    exp.ColumnPosition, this=self._parse_column(), position=position
      -4254                )
      -4255                expression.set("position", column_position)
      -4256
      -4257        return expression
      -4258
      -4259    def _parse_drop_column(self) -> t.Optional[exp.Expression]:
      -4260        drop = self._match(TokenType.DROP) and self._parse_drop()
      -4261        if drop and not isinstance(drop, exp.Command):
      -4262            drop.set("kind", drop.args.get("kind", "COLUMN"))
      -4263        return drop
      -4264
      -4265    # https://docs.aws.amazon.com/athena/latest/ug/alter-table-drop-partition.html
      -4266    def _parse_drop_partition(self, exists: t.Optional[bool] = None) -> exp.Expression:
      -4267        return self.expression(
      -4268            exp.DropPartition, expressions=self._parse_csv(self._parse_partition), exists=exists
      -4269        )
      +4230        return self.expression(exp.Transaction, this=this, modes=modes)
      +4231
      +4232    def _parse_commit_or_rollback(self) -> exp.Commit | exp.Rollback:
      +4233        chain = None
      +4234        savepoint = None
      +4235        is_rollback = self._prev.token_type == TokenType.ROLLBACK
      +4236
      +4237        self._match_texts({"TRANSACTION", "WORK"})
      +4238
      +4239        if self._match_text_seq("TO"):
      +4240            self._match_text_seq("SAVEPOINT")
      +4241            savepoint = self._parse_id_var()
      +4242
      +4243        if self._match(TokenType.AND):
      +4244            chain = not self._match_text_seq("NO")
      +4245            self._match_text_seq("CHAIN")
      +4246
      +4247        if is_rollback:
      +4248            return self.expression(exp.Rollback, savepoint=savepoint)
      +4249
      +4250        return self.expression(exp.Commit, chain=chain)
      +4251
      +4252    def _parse_add_column(self) -> t.Optional[exp.Expression]:
      +4253        if not self._match_text_seq("ADD"):
      +4254            return None
      +4255
      +4256        self._match(TokenType.COLUMN)
      +4257        exists_column = self._parse_exists(not_=True)
      +4258        expression = self._parse_column_def(self._parse_field(any_token=True))
      +4259
      +4260        if expression:
      +4261            expression.set("exists", exists_column)
      +4262
      +4263            # https://docs.databricks.com/delta/update-schema.html#explicitly-update-schema-to-add-columns
      +4264            if self._match_texts(("FIRST", "AFTER")):
      +4265                position = self._prev.text
      +4266                column_position = self.expression(
      +4267                    exp.ColumnPosition, this=self._parse_column(), position=position
      +4268                )
      +4269                expression.set("position", column_position)
       4270
      -4271    def _parse_add_constraint(self) -> t.Optional[exp.Expression]:
      -4272        this = None
      -4273        kind = self._prev.token_type
      -4274
      -4275        if kind == TokenType.CONSTRAINT:
      -4276            this = self._parse_id_var()
      -4277
      -4278            if self._match_text_seq("CHECK"):
      -4279                expression = self._parse_wrapped(self._parse_conjunction)
      -4280                enforced = self._match_text_seq("ENFORCED")
      -4281
      -4282                return self.expression(
      -4283                    exp.AddConstraint, this=this, expression=expression, enforced=enforced
      -4284                )
      -4285
      -4286        if kind == TokenType.FOREIGN_KEY or self._match(TokenType.FOREIGN_KEY):
      -4287            expression = self._parse_foreign_key()
      -4288        elif kind == TokenType.PRIMARY_KEY or self._match(TokenType.PRIMARY_KEY):
      -4289            expression = self._parse_primary_key()
      -4290        else:
      -4291            expression = None
      -4292
      -4293        return self.expression(exp.AddConstraint, this=this, expression=expression)
      -4294
      -4295    def _parse_alter_table_add(self) -> t.List[t.Optional[exp.Expression]]:
      -4296        index = self._index - 1
      -4297
      -4298        if self._match_set(self.ADD_CONSTRAINT_TOKENS):
      -4299            return self._parse_csv(self._parse_add_constraint)
      -4300
      -4301        self._retreat(index)
      -4302        return self._parse_csv(self._parse_add_column)
      -4303
      -4304    def _parse_alter_table_alter(self) -> exp.Expression:
      -4305        self._match(TokenType.COLUMN)
      -4306        column = self._parse_field(any_token=True)
      -4307
      -4308        if self._match_pair(TokenType.DROP, TokenType.DEFAULT):
      -4309            return self.expression(exp.AlterColumn, this=column, drop=True)
      -4310        if self._match_pair(TokenType.SET, TokenType.DEFAULT):
      -4311            return self.expression(exp.AlterColumn, this=column, default=self._parse_conjunction())
      -4312
      -4313        self._match_text_seq("SET", "DATA")
      -4314        return self.expression(
      -4315            exp.AlterColumn,
      -4316            this=column,
      -4317            dtype=self._match_text_seq("TYPE") and self._parse_types(),
      -4318            collate=self._match(TokenType.COLLATE) and self._parse_term(),
      -4319            using=self._match(TokenType.USING) and self._parse_conjunction(),
      -4320        )
      +4271        return expression
      +4272
      +4273    def _parse_drop_column(self) -> t.Optional[exp.Drop | exp.Command]:
      +4274        drop = self._match(TokenType.DROP) and self._parse_drop()
      +4275        if drop and not isinstance(drop, exp.Command):
      +4276            drop.set("kind", drop.args.get("kind", "COLUMN"))
      +4277        return drop
      +4278
      +4279    # https://docs.aws.amazon.com/athena/latest/ug/alter-table-drop-partition.html
      +4280    def _parse_drop_partition(self, exists: t.Optional[bool] = None) -> exp.DropPartition:
      +4281        return self.expression(
      +4282            exp.DropPartition, expressions=self._parse_csv(self._parse_partition), exists=exists
      +4283        )
      +4284
      +4285    def _parse_add_constraint(self) -> exp.AddConstraint:
      +4286        this = None
      +4287        kind = self._prev.token_type
      +4288
      +4289        if kind == TokenType.CONSTRAINT:
      +4290            this = self._parse_id_var()
      +4291
      +4292            if self._match_text_seq("CHECK"):
      +4293                expression = self._parse_wrapped(self._parse_conjunction)
      +4294                enforced = self._match_text_seq("ENFORCED")
      +4295
      +4296                return self.expression(
      +4297                    exp.AddConstraint, this=this, expression=expression, enforced=enforced
      +4298                )
      +4299
      +4300        if kind == TokenType.FOREIGN_KEY or self._match(TokenType.FOREIGN_KEY):
      +4301            expression = self._parse_foreign_key()
      +4302        elif kind == TokenType.PRIMARY_KEY or self._match(TokenType.PRIMARY_KEY):
      +4303            expression = self._parse_primary_key()
      +4304        else:
      +4305            expression = None
      +4306
      +4307        return self.expression(exp.AddConstraint, this=this, expression=expression)
      +4308
      +4309    def _parse_alter_table_add(self) -> t.List[t.Optional[exp.Expression]]:
      +4310        index = self._index - 1
      +4311
      +4312        if self._match_set(self.ADD_CONSTRAINT_TOKENS):
      +4313            return self._parse_csv(self._parse_add_constraint)
      +4314
      +4315        self._retreat(index)
      +4316        return self._parse_csv(self._parse_add_column)
      +4317
      +4318    def _parse_alter_table_alter(self) -> exp.AlterColumn:
      +4319        self._match(TokenType.COLUMN)
      +4320        column = self._parse_field(any_token=True)
       4321
      -4322    def _parse_alter_table_drop(self) -> t.List[t.Optional[exp.Expression]]:
      -4323        index = self._index - 1
      -4324
      -4325        partition_exists = self._parse_exists()
      -4326        if self._match(TokenType.PARTITION, advance=False):
      -4327            return self._parse_csv(lambda: self._parse_drop_partition(exists=partition_exists))
      -4328
      -4329        self._retreat(index)
      -4330        return self._parse_csv(self._parse_drop_column)
      -4331
      -4332    def _parse_alter_table_rename(self) -> exp.Expression:
      -4333        self._match_text_seq("TO")
      -4334        return self.expression(exp.RenameTable, this=self._parse_table(schema=True))
      +4322        if self._match_pair(TokenType.DROP, TokenType.DEFAULT):
      +4323            return self.expression(exp.AlterColumn, this=column, drop=True)
      +4324        if self._match_pair(TokenType.SET, TokenType.DEFAULT):
      +4325            return self.expression(exp.AlterColumn, this=column, default=self._parse_conjunction())
      +4326
      +4327        self._match_text_seq("SET", "DATA")
      +4328        return self.expression(
      +4329            exp.AlterColumn,
      +4330            this=column,
      +4331            dtype=self._match_text_seq("TYPE") and self._parse_types(),
      +4332            collate=self._match(TokenType.COLLATE) and self._parse_term(),
      +4333            using=self._match(TokenType.USING) and self._parse_conjunction(),
      +4334        )
       4335
      -4336    def _parse_alter(self) -> t.Optional[exp.Expression]:
      -4337        start = self._prev
      +4336    def _parse_alter_table_drop(self) -> t.List[t.Optional[exp.Expression]]:
      +4337        index = self._index - 1
       4338
      -4339        if not self._match(TokenType.TABLE):
      -4340            return self._parse_as_command(start)
      -4341
      -4342        exists = self._parse_exists()
      -4343        this = self._parse_table(schema=True)
      -4344
      -4345        if self._next:
      -4346            self._advance()
      -4347        parser = self.ALTER_PARSERS.get(self._prev.text.upper()) if self._prev else None
      -4348
      -4349        if parser:
      -4350            actions = ensure_list(parser(self))
      -4351
      -4352            if not self._curr:
      -4353                return self.expression(
      -4354                    exp.AlterTable,
      -4355                    this=this,
      -4356                    exists=exists,
      -4357                    actions=actions,
      -4358                )
      -4359        return self._parse_as_command(start)
      -4360
      -4361    def _parse_merge(self) -> exp.Expression:
      -4362        self._match(TokenType.INTO)
      -4363        target = self._parse_table()
      -4364
      -4365        self._match(TokenType.USING)
      -4366        using = self._parse_table()
      -4367
      -4368        self._match(TokenType.ON)
      -4369        on = self._parse_conjunction()
      -4370
      -4371        whens = []
      -4372        while self._match(TokenType.WHEN):
      -4373            matched = not self._match(TokenType.NOT)
      -4374            self._match_text_seq("MATCHED")
      -4375            source = (
      -4376                False
      -4377                if self._match_text_seq("BY", "TARGET")
      -4378                else self._match_text_seq("BY", "SOURCE")
      -4379            )
      -4380            condition = self._parse_conjunction() if self._match(TokenType.AND) else None
      +4339        partition_exists = self._parse_exists()
      +4340        if self._match(TokenType.PARTITION, advance=False):
      +4341            return self._parse_csv(lambda: self._parse_drop_partition(exists=partition_exists))
      +4342
      +4343        self._retreat(index)
      +4344        return self._parse_csv(self._parse_drop_column)
      +4345
      +4346    def _parse_alter_table_rename(self) -> exp.RenameTable:
      +4347        self._match_text_seq("TO")
      +4348        return self.expression(exp.RenameTable, this=self._parse_table(schema=True))
      +4349
      +4350    def _parse_alter(self) -> exp.AlterTable | exp.Command:
      +4351        start = self._prev
      +4352
      +4353        if not self._match(TokenType.TABLE):
      +4354            return self._parse_as_command(start)
      +4355
      +4356        exists = self._parse_exists()
      +4357        this = self._parse_table(schema=True)
      +4358
      +4359        if self._next:
      +4360            self._advance()
      +4361        parser = self.ALTER_PARSERS.get(self._prev.text.upper()) if self._prev else None
      +4362
      +4363        if parser:
      +4364            actions = ensure_list(parser(self))
      +4365
      +4366            if not self._curr:
      +4367                return self.expression(
      +4368                    exp.AlterTable,
      +4369                    this=this,
      +4370                    exists=exists,
      +4371                    actions=actions,
      +4372                )
      +4373        return self._parse_as_command(start)
      +4374
      +4375    def _parse_merge(self) -> exp.Merge:
      +4376        self._match(TokenType.INTO)
      +4377        target = self._parse_table()
      +4378
      +4379        self._match(TokenType.USING)
      +4380        using = self._parse_table()
       4381
      -4382            self._match(TokenType.THEN)
      -4383
      -4384            if self._match(TokenType.INSERT):
      -4385                _this = self._parse_star()
      -4386                if _this:
      -4387                    then: t.Optional[exp.Expression] = self.expression(exp.Insert, this=_this)
      -4388                else:
      -4389                    then = self.expression(
      -4390                        exp.Insert,
      -4391                        this=self._parse_value(),
      -4392                        expression=self._match(TokenType.VALUES) and self._parse_value(),
      -4393                    )
      -4394            elif self._match(TokenType.UPDATE):
      -4395                expressions = self._parse_star()
      -4396                if expressions:
      -4397                    then = self.expression(exp.Update, expressions=expressions)
      -4398                else:
      -4399                    then = self.expression(
      -4400                        exp.Update,
      -4401                        expressions=self._match(TokenType.SET)
      -4402                        and self._parse_csv(self._parse_equality),
      -4403                    )
      -4404            elif self._match(TokenType.DELETE):
      -4405                then = self.expression(exp.Var, this=self._prev.text)
      -4406            else:
      -4407                then = None
      -4408
      -4409            whens.append(
      -4410                self.expression(
      -4411                    exp.When,
      -4412                    matched=matched,
      -4413                    source=source,
      -4414                    condition=condition,
      -4415                    then=then,
      -4416                )
      -4417            )
      -4418
      -4419        return self.expression(
      -4420            exp.Merge,
      -4421            this=target,
      -4422            using=using,
      -4423            on=on,
      -4424            expressions=whens,
      -4425        )
      -4426
      -4427    def _parse_show(self) -> t.Optional[exp.Expression]:
      -4428        parser = self._find_parser(self.SHOW_PARSERS, self._show_trie)  # type: ignore
      -4429        if parser:
      -4430            return parser(self)
      -4431        self._advance()
      -4432        return self.expression(exp.Show, this=self._prev.text.upper())
      -4433
      -4434    def _parse_set_item_assignment(
      -4435        self, kind: t.Optional[str] = None
      -4436    ) -> t.Optional[exp.Expression]:
      -4437        index = self._index
      -4438
      -4439        if kind in {"GLOBAL", "SESSION"} and self._match_text_seq("TRANSACTION"):
      -4440            return self._parse_set_transaction(global_=kind == "GLOBAL")
      -4441
      -4442        left = self._parse_primary() or self._parse_id_var()
      -4443
      -4444        if not self._match_texts(("=", "TO")):
      -4445            self._retreat(index)
      -4446            return None
      +4382        self._match(TokenType.ON)
      +4383        on = self._parse_conjunction()
      +4384
      +4385        whens = []
      +4386        while self._match(TokenType.WHEN):
      +4387            matched = not self._match(TokenType.NOT)
      +4388            self._match_text_seq("MATCHED")
      +4389            source = (
      +4390                False
      +4391                if self._match_text_seq("BY", "TARGET")
      +4392                else self._match_text_seq("BY", "SOURCE")
      +4393            )
      +4394            condition = self._parse_conjunction() if self._match(TokenType.AND) else None
      +4395
      +4396            self._match(TokenType.THEN)
      +4397
      +4398            if self._match(TokenType.INSERT):
      +4399                _this = self._parse_star()
      +4400                if _this:
      +4401                    then: t.Optional[exp.Expression] = self.expression(exp.Insert, this=_this)
      +4402                else:
      +4403                    then = self.expression(
      +4404                        exp.Insert,
      +4405                        this=self._parse_value(),
      +4406                        expression=self._match(TokenType.VALUES) and self._parse_value(),
      +4407                    )
      +4408            elif self._match(TokenType.UPDATE):
      +4409                expressions = self._parse_star()
      +4410                if expressions:
      +4411                    then = self.expression(exp.Update, expressions=expressions)
      +4412                else:
      +4413                    then = self.expression(
      +4414                        exp.Update,
      +4415                        expressions=self._match(TokenType.SET)
      +4416                        and self._parse_csv(self._parse_equality),
      +4417                    )
      +4418            elif self._match(TokenType.DELETE):
      +4419                then = self.expression(exp.Var, this=self._prev.text)
      +4420            else:
      +4421                then = None
      +4422
      +4423            whens.append(
      +4424                self.expression(
      +4425                    exp.When,
      +4426                    matched=matched,
      +4427                    source=source,
      +4428                    condition=condition,
      +4429                    then=then,
      +4430                )
      +4431            )
      +4432
      +4433        return self.expression(
      +4434            exp.Merge,
      +4435            this=target,
      +4436            using=using,
      +4437            on=on,
      +4438            expressions=whens,
      +4439        )
      +4440
      +4441    def _parse_show(self) -> t.Optional[exp.Expression]:
      +4442        parser = self._find_parser(self.SHOW_PARSERS, self.SHOW_TRIE)
      +4443        if parser:
      +4444            return parser(self)
      +4445        self._advance()
      +4446        return self.expression(exp.Show, this=self._prev.text.upper())
       4447
      -4448        right = self._parse_statement() or self._parse_id_var()
      -4449        this = self.expression(
      -4450            exp.EQ,
      -4451            this=left,
      -4452            expression=right,
      -4453        )
      -4454
      -4455        return self.expression(
      -4456            exp.SetItem,
      -4457            this=this,
      -4458            kind=kind,
      -4459        )
      -4460
      -4461    def _parse_set_transaction(self, global_: bool = False) -> exp.Expression:
      -4462        self._match_text_seq("TRANSACTION")
      -4463        characteristics = self._parse_csv(
      -4464            lambda: self._parse_var_from_options(self.TRANSACTION_CHARACTERISTICS)
      -4465        )
      -4466        return self.expression(
      -4467            exp.SetItem,
      -4468            expressions=characteristics,
      -4469            kind="TRANSACTION",
      -4470            **{"global": global_},  # type: ignore
      +4448    def _parse_set_item_assignment(
      +4449        self, kind: t.Optional[str] = None
      +4450    ) -> t.Optional[exp.Expression]:
      +4451        index = self._index
      +4452
      +4453        if kind in {"GLOBAL", "SESSION"} and self._match_text_seq("TRANSACTION"):
      +4454            return self._parse_set_transaction(global_=kind == "GLOBAL")
      +4455
      +4456        left = self._parse_primary() or self._parse_id_var()
      +4457
      +4458        if not self._match_texts(("=", "TO")):
      +4459            self._retreat(index)
      +4460            return None
      +4461
      +4462        right = self._parse_statement() or self._parse_id_var()
      +4463        this = self.expression(exp.EQ, this=left, expression=right)
      +4464
      +4465        return self.expression(exp.SetItem, this=this, kind=kind)
      +4466
      +4467    def _parse_set_transaction(self, global_: bool = False) -> exp.Expression:
      +4468        self._match_text_seq("TRANSACTION")
      +4469        characteristics = self._parse_csv(
      +4470            lambda: self._parse_var_from_options(self.TRANSACTION_CHARACTERISTICS)
       4471        )
      -4472
      -4473    def _parse_set_item(self) -> t.Optional[exp.Expression]:
      -4474        parser = self._find_parser(self.SET_PARSERS, self._set_trie)  # type: ignore
      -4475        return parser(self) if parser else self._parse_set_item_assignment(kind=None)
      -4476
      -4477    def _parse_set(self) -> exp.Expression:
      -4478        index = self._index
      -4479        set_ = self.expression(exp.Set, expressions=self._parse_csv(self._parse_set_item))
      -4480
      -4481        if self._curr:
      -4482            self._retreat(index)
      -4483            return self._parse_as_command(self._prev)
      -4484
      -4485        return set_
      +4472        return self.expression(
      +4473            exp.SetItem,
      +4474            expressions=characteristics,
      +4475            kind="TRANSACTION",
      +4476            **{"global": global_},  # type: ignore
      +4477        )
      +4478
      +4479    def _parse_set_item(self) -> t.Optional[exp.Expression]:
      +4480        parser = self._find_parser(self.SET_PARSERS, self.SET_TRIE)
      +4481        return parser(self) if parser else self._parse_set_item_assignment(kind=None)
      +4482
      +4483    def _parse_set(self) -> exp.Set | exp.Command:
      +4484        index = self._index
      +4485        set_ = self.expression(exp.Set, expressions=self._parse_csv(self._parse_set_item))
       4486
      -4487    def _parse_var_from_options(self, options: t.Collection[str]) -> t.Optional[exp.Expression]:
      -4488        for option in options:
      -4489            if self._match_text_seq(*option.split(" ")):
      -4490                return exp.Var(this=option)
      -4491        return None
      +4487        if self._curr:
      +4488            self._retreat(index)
      +4489            return self._parse_as_command(self._prev)
      +4490
      +4491        return set_
       4492
      -4493    def _parse_as_command(self, start: Token) -> exp.Command:
      -4494        while self._curr:
      -4495            self._advance()
      -4496        text = self._find_sql(start, self._prev)
      -4497        size = len(start.text)
      -4498        return exp.Command(this=text[:size], expression=text[size:])
      -4499
      -4500    def _parse_dict_property(self, this: str) -> exp.DictProperty:
      -4501        settings = []
      -4502
      -4503        self._match_l_paren()
      -4504        kind = self._parse_id_var()
      +4493    def _parse_var_from_options(self, options: t.Collection[str]) -> t.Optional[exp.Var]:
      +4494        for option in options:
      +4495            if self._match_text_seq(*option.split(" ")):
      +4496                return exp.var(option)
      +4497        return None
      +4498
      +4499    def _parse_as_command(self, start: Token) -> exp.Command:
      +4500        while self._curr:
      +4501            self._advance()
      +4502        text = self._find_sql(start, self._prev)
      +4503        size = len(start.text)
      +4504        return exp.Command(this=text[:size], expression=text[size:])
       4505
      -4506        if self._match(TokenType.L_PAREN):
      -4507            while True:
      -4508                key = self._parse_id_var()
      -4509                value = self._parse_primary()
      -4510
      -4511                if not key and value is None:
      -4512                    break
      -4513                settings.append(self.expression(exp.DictSubProperty, this=key, value=value))
      -4514            self._match(TokenType.R_PAREN)
      -4515
      -4516        self._match_r_paren()
      -4517
      -4518        return self.expression(
      -4519            exp.DictProperty,
      -4520            this=this,
      -4521            kind=kind.this if kind else None,
      -4522            settings=settings,
      -4523        )
      -4524
      -4525    def _parse_dict_range(self, this: str) -> exp.DictRange:
      -4526        self._match_l_paren()
      -4527        has_min = self._match_text_seq("MIN")
      -4528        if has_min:
      -4529            min = self._parse_var() or self._parse_primary()
      -4530            self._match_text_seq("MAX")
      -4531            max = self._parse_var() or self._parse_primary()
      -4532        else:
      -4533            max = self._parse_var() or self._parse_primary()
      -4534            min = exp.Literal.number(0)
      -4535        self._match_r_paren()
      -4536        return self.expression(exp.DictRange, this=this, min=min, max=max)
      -4537
      -4538    def _find_parser(
      -4539        self, parsers: t.Dict[str, t.Callable], trie: t.Dict
      -4540    ) -> t.Optional[t.Callable]:
      -4541        if not self._curr:
      -4542            return None
      +4506    def _parse_dict_property(self, this: str) -> exp.DictProperty:
      +4507        settings = []
      +4508
      +4509        self._match_l_paren()
      +4510        kind = self._parse_id_var()
      +4511
      +4512        if self._match(TokenType.L_PAREN):
      +4513            while True:
      +4514                key = self._parse_id_var()
      +4515                value = self._parse_primary()
      +4516
      +4517                if not key and value is None:
      +4518                    break
      +4519                settings.append(self.expression(exp.DictSubProperty, this=key, value=value))
      +4520            self._match(TokenType.R_PAREN)
      +4521
      +4522        self._match_r_paren()
      +4523
      +4524        return self.expression(
      +4525            exp.DictProperty,
      +4526            this=this,
      +4527            kind=kind.this if kind else None,
      +4528            settings=settings,
      +4529        )
      +4530
      +4531    def _parse_dict_range(self, this: str) -> exp.DictRange:
      +4532        self._match_l_paren()
      +4533        has_min = self._match_text_seq("MIN")
      +4534        if has_min:
      +4535            min = self._parse_var() or self._parse_primary()
      +4536            self._match_text_seq("MAX")
      +4537            max = self._parse_var() or self._parse_primary()
      +4538        else:
      +4539            max = self._parse_var() or self._parse_primary()
      +4540            min = exp.Literal.number(0)
      +4541        self._match_r_paren()
      +4542        return self.expression(exp.DictRange, this=this, min=min, max=max)
       4543
      -4544        index = self._index
      -4545        this = []
      -4546        while True:
      -4547            # The current token might be multiple words
      -4548            curr = self._curr.text.upper()
      -4549            key = curr.split(" ")
      -4550            this.append(curr)
      -4551            self._advance()
      -4552            result, trie = in_trie(trie, key)
      -4553            if result == 0:
      -4554                break
      -4555            if result == 2:
      -4556                subparser = parsers[" ".join(this)]
      -4557                return subparser
      -4558        self._retreat(index)
      -4559        return None
      -4560
      -4561    def _match(self, token_type, advance=True, expression=None):
      -4562        if not self._curr:
      -4563            return None
      -4564
      -4565        if self._curr.token_type == token_type:
      -4566            if advance:
      -4567                self._advance()
      -4568            self._add_comments(expression)
      -4569            return True
      +4544    def _find_parser(
      +4545        self, parsers: t.Dict[str, t.Callable], trie: t.Dict
      +4546    ) -> t.Optional[t.Callable]:
      +4547        if not self._curr:
      +4548            return None
      +4549
      +4550        index = self._index
      +4551        this = []
      +4552        while True:
      +4553            # The current token might be multiple words
      +4554            curr = self._curr.text.upper()
      +4555            key = curr.split(" ")
      +4556            this.append(curr)
      +4557            self._advance()
      +4558            result, trie = in_trie(trie, key)
      +4559            if result == 0:
      +4560                break
      +4561            if result == 2:
      +4562                subparser = parsers[" ".join(this)]
      +4563                return subparser
      +4564        self._retreat(index)
      +4565        return None
      +4566
      +4567    def _match(self, token_type, advance=True, expression=None):
      +4568        if not self._curr:
      +4569            return None
       4570
      -4571        return None
      -4572
      -4573    def _match_set(self, types, advance=True):
      -4574        if not self._curr:
      -4575            return None
      +4571        if self._curr.token_type == token_type:
      +4572            if advance:
      +4573                self._advance()
      +4574            self._add_comments(expression)
      +4575            return True
       4576
      -4577        if self._curr.token_type in types:
      -4578            if advance:
      -4579                self._advance()
      -4580            return True
      -4581
      -4582        return None
      -4583
      -4584    def _match_pair(self, token_type_a, token_type_b, advance=True):
      -4585        if not self._curr or not self._next:
      -4586            return None
      +4577        return None
      +4578
      +4579    def _match_set(self, types, advance=True):
      +4580        if not self._curr:
      +4581            return None
      +4582
      +4583        if self._curr.token_type in types:
      +4584            if advance:
      +4585                self._advance()
      +4586            return True
       4587
      -4588        if self._curr.token_type == token_type_a and self._next.token_type == token_type_b:
      -4589            if advance:
      -4590                self._advance(2)
      -4591            return True
      -4592
      -4593        return None
      -4594
      -4595    def _match_l_paren(self, expression: t.Optional[exp.Expression] = None) -> None:
      -4596        if not self._match(TokenType.L_PAREN, expression=expression):
      -4597            self.raise_error("Expecting (")
      +4588        return None
      +4589
      +4590    def _match_pair(self, token_type_a, token_type_b, advance=True):
      +4591        if not self._curr or not self._next:
      +4592            return None
      +4593
      +4594        if self._curr.token_type == token_type_a and self._next.token_type == token_type_b:
      +4595            if advance:
      +4596                self._advance(2)
      +4597            return True
       4598
      -4599    def _match_r_paren(self, expression: t.Optional[exp.Expression] = None) -> None:
      -4600        if not self._match(TokenType.R_PAREN, expression=expression):
      -4601            self.raise_error("Expecting )")
      -4602
      -4603    def _match_texts(self, texts, advance=True):
      -4604        if self._curr and self._curr.text.upper() in texts:
      -4605            if advance:
      -4606                self._advance()
      -4607            return True
      -4608        return False
      -4609
      -4610    def _match_text_seq(self, *texts, advance=True):
      -4611        index = self._index
      -4612        for text in texts:
      -4613            if self._curr and self._curr.text.upper() == text:
      -4614                self._advance()
      -4615            else:
      -4616                self._retreat(index)
      -4617                return False
      -4618
      -4619        if not advance:
      -4620            self._retreat(index)
      -4621
      -4622        return True
      -4623
      -4624    @t.overload
      -4625    def _replace_columns_with_dots(self, this: exp.Expression) -> exp.Expression:
      -4626        ...
      +4599        return None
      +4600
      +4601    def _match_l_paren(self, expression: t.Optional[exp.Expression] = None) -> None:
      +4602        if not self._match(TokenType.L_PAREN, expression=expression):
      +4603            self.raise_error("Expecting (")
      +4604
      +4605    def _match_r_paren(self, expression: t.Optional[exp.Expression] = None) -> None:
      +4606        if not self._match(TokenType.R_PAREN, expression=expression):
      +4607            self.raise_error("Expecting )")
      +4608
      +4609    def _match_texts(self, texts, advance=True):
      +4610        if self._curr and self._curr.text.upper() in texts:
      +4611            if advance:
      +4612                self._advance()
      +4613            return True
      +4614        return False
      +4615
      +4616    def _match_text_seq(self, *texts, advance=True):
      +4617        index = self._index
      +4618        for text in texts:
      +4619            if self._curr and self._curr.text.upper() == text:
      +4620                self._advance()
      +4621            else:
      +4622                self._retreat(index)
      +4623                return False
      +4624
      +4625        if not advance:
      +4626            self._retreat(index)
       4627
      -4628    @t.overload
      -4629    def _replace_columns_with_dots(
      -4630        self, this: t.Optional[exp.Expression]
      -4631    ) -> t.Optional[exp.Expression]:
      +4628        return True
      +4629
      +4630    @t.overload
      +4631    def _replace_columns_with_dots(self, this: exp.Expression) -> exp.Expression:
       4632        ...
       4633
      -4634    def _replace_columns_with_dots(self, this):
      -4635        if isinstance(this, exp.Dot):
      -4636            exp.replace_children(this, self._replace_columns_with_dots)
      -4637        elif isinstance(this, exp.Column):
      -4638            exp.replace_children(this, self._replace_columns_with_dots)
      -4639            table = this.args.get("table")
      -4640            this = (
      -4641                self.expression(exp.Dot, this=table, expression=this.this)
      -4642                if table
      -4643                else self.expression(exp.Var, this=this.name)
      -4644            )
      -4645        elif isinstance(this, exp.Identifier):
      -4646            this = self.expression(exp.Var, this=this.name)
      -4647
      -4648        return this
      -4649
      -4650    def _replace_lambda(
      -4651        self, node: t.Optional[exp.Expression], lambda_variables: t.Set[str]
      -4652    ) -> t.Optional[exp.Expression]:
      -4653        if not node:
      -4654            return node
      +4634    @t.overload
      +4635    def _replace_columns_with_dots(
      +4636        self, this: t.Optional[exp.Expression]
      +4637    ) -> t.Optional[exp.Expression]:
      +4638        ...
      +4639
      +4640    def _replace_columns_with_dots(self, this):
      +4641        if isinstance(this, exp.Dot):
      +4642            exp.replace_children(this, self._replace_columns_with_dots)
      +4643        elif isinstance(this, exp.Column):
      +4644            exp.replace_children(this, self._replace_columns_with_dots)
      +4645            table = this.args.get("table")
      +4646            this = (
      +4647                self.expression(exp.Dot, this=table, expression=this.this)
      +4648                if table
      +4649                else self.expression(exp.Var, this=this.name)
      +4650            )
      +4651        elif isinstance(this, exp.Identifier):
      +4652            this = self.expression(exp.Var, this=this.name)
      +4653
      +4654        return this
       4655
      -4656        for column in node.find_all(exp.Column):
      -4657            if column.parts[0].name in lambda_variables:
      -4658                dot_or_id = column.to_dot() if column.table else column.this
      -4659                parent = column.parent
      -4660
      -4661                while isinstance(parent, exp.Dot):
      -4662                    if not isinstance(parent.parent, exp.Dot):
      -4663                        parent.replace(dot_or_id)
      -4664                        break
      -4665                    parent = parent.parent
      -4666                else:
      -4667                    if column is node:
      -4668                        node = dot_or_id
      -4669                    else:
      -4670                        column.replace(dot_or_id)
      -4671        return node
      +4656    def _replace_lambda(
      +4657        self, node: t.Optional[exp.Expression], lambda_variables: t.Set[str]
      +4658    ) -> t.Optional[exp.Expression]:
      +4659        if not node:
      +4660            return node
      +4661
      +4662        for column in node.find_all(exp.Column):
      +4663            if column.parts[0].name in lambda_variables:
      +4664                dot_or_id = column.to_dot() if column.table else column.this
      +4665                parent = column.parent
      +4666
      +4667                while isinstance(parent, exp.Dot):
      +4668                    if not isinstance(parent.parent, exp.Dot):
      +4669                        parent.replace(dot_or_id)
      +4670                        break
      +4671                    parent = parent.parent
      +4672                else:
      +4673                    if column is node:
      +4674                        node = dot_or_id
      +4675                    else:
      +4676                        column.replace(dot_or_id)
      +4677        return node
       
      -

      Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces -a parsed syntax tree.

      +

      Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

      Arguments:
        -
      • error_level: the desired error level. +
      • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
      • -
      • error_message_context: determines the amount of context to capture from a +
      • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). -Default: 50.
      • -
      • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. -Default: 0
      • -
      • alias_post_tablesample: If the table alias comes after tablesample. -Default: False
      • +Default: 100
      • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
      • -
      • null_ordering: Indicates the default null ordering method to use if not explicitly set. -Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". -Default: "nulls_are_small"
      @@ -9497,30 +9499,22 @@ Default: "nulls_are_small"
      - Parser( error_level: Optional[sqlglot.errors.ErrorLevel] = None, error_message_context: int = 100, index_offset: int = 0, unnest_column_only: bool = False, alias_post_tablesample: bool = False, max_errors: int = 3, null_ordering: Optional[str] = None) + Parser( error_level: Optional[sqlglot.errors.ErrorLevel] = None, error_message_context: int = 100, max_errors: int = 3)
      -
      810    def __init__(
      -811        self,
      -812        error_level: t.Optional[ErrorLevel] = None,
      -813        error_message_context: int = 100,
      -814        index_offset: int = 0,
      -815        unnest_column_only: bool = False,
      -816        alias_post_tablesample: bool = False,
      -817        max_errors: int = 3,
      -818        null_ordering: t.Optional[str] = None,
      -819    ):
      -820        self.error_level = error_level or ErrorLevel.IMMEDIATE
      -821        self.error_message_context = error_message_context
      -822        self.index_offset = index_offset
      -823        self.unnest_column_only = unnest_column_only
      -824        self.alias_post_tablesample = alias_post_tablesample
      -825        self.max_errors = max_errors
      -826        self.null_ordering = null_ordering
      -827        self.reset()
      +            
      829    def __init__(
      +830        self,
      +831        error_level: t.Optional[ErrorLevel] = None,
      +832        error_message_context: int = 100,
      +833        max_errors: int = 3,
      +834    ):
      +835        self.error_level = error_level or ErrorLevel.IMMEDIATE
      +836        self.error_message_context = error_message_context
      +837        self.max_errors = max_errors
      +838        self.reset()
       
      @@ -9538,15 +9532,15 @@ Default: "nulls_are_small"
      -
      829    def reset(self):
      -830        self.sql = ""
      -831        self.errors = []
      -832        self._tokens = []
      -833        self._index = 0
      -834        self._curr = None
      -835        self._next = None
      -836        self._prev = None
      -837        self._prev_comments = None
      +            
      840    def reset(self):
      +841        self.sql = ""
      +842        self.errors = []
      +843        self._tokens = []
      +844        self._index = 0
      +845        self._curr = None
      +846        self._next = None
      +847        self._prev = None
      +848        self._prev_comments = None
       
      @@ -9564,23 +9558,23 @@ Default: "nulls_are_small"
      -
      839    def parse(
      -840        self, raw_tokens: t.List[Token], sql: t.Optional[str] = None
      -841    ) -> t.List[t.Optional[exp.Expression]]:
      -842        """
      -843        Parses a list of tokens and returns a list of syntax trees, one tree
      -844        per parsed SQL statement.
      -845
      -846        Args:
      -847            raw_tokens: the list of tokens.
      -848            sql: the original SQL string, used to produce helpful debug messages.
      -849
      -850        Returns:
      -851            The list of syntax trees.
      -852        """
      -853        return self._parse(
      -854            parse_method=self.__class__._parse_statement, raw_tokens=raw_tokens, sql=sql
      -855        )
      +            
      850    def parse(
      +851        self, raw_tokens: t.List[Token], sql: t.Optional[str] = None
      +852    ) -> t.List[t.Optional[exp.Expression]]:
      +853        """
      +854        Parses a list of tokens and returns a list of syntax trees, one tree
      +855        per parsed SQL statement.
      +856
      +857        Args:
      +858            raw_tokens: The list of tokens.
      +859            sql: The original SQL string, used to produce helpful debug messages.
      +860
      +861        Returns:
      +862            The list of the produced syntax trees.
      +863        """
      +864        return self._parse(
      +865            parse_method=self.__class__._parse_statement, raw_tokens=raw_tokens, sql=sql
      +866        )
       
      @@ -9590,14 +9584,14 @@ per parsed SQL statement.

      Arguments:
        -
      • raw_tokens: the list of tokens.
      • -
      • sql: the original SQL string, used to produce helpful debug messages.
      • +
      • raw_tokens: The list of tokens.
      • +
      • sql: The original SQL string, used to produce helpful debug messages.
      Returns:
      -

      The list of syntax trees.

      +

      The list of the produced syntax trees.

      @@ -9614,39 +9608,41 @@ per parsed SQL statement.

      -
      857    def parse_into(
      -858        self,
      -859        expression_types: exp.IntoType,
      -860        raw_tokens: t.List[Token],
      -861        sql: t.Optional[str] = None,
      -862    ) -> t.List[t.Optional[exp.Expression]]:
      -863        """
      -864        Parses a list of tokens into a given Expression type. If a collection of Expression
      -865        types is given instead, this method will try to parse the token list into each one
      -866        of them, stopping at the first for which the parsing succeeds.
      -867
      -868        Args:
      -869            expression_types: the expression type(s) to try and parse the token list into.
      -870            raw_tokens: the list of tokens.
      -871            sql: the original SQL string, used to produce helpful debug messages.
      -872
      -873        Returns:
      -874            The target Expression.
      -875        """
      -876        errors = []
      -877        for expression_type in ensure_collection(expression_types):
      -878            parser = self.EXPRESSION_PARSERS.get(expression_type)
      -879            if not parser:
      -880                raise TypeError(f"No parser registered for {expression_type}")
      -881            try:
      -882                return self._parse(parser, raw_tokens, sql)
      -883            except ParseError as e:
      -884                e.errors[0]["into_expression"] = expression_type
      -885                errors.append(e)
      -886        raise ParseError(
      -887            f"Failed to parse '{sql or raw_tokens}' into {expression_types}",
      -888            errors=merge_errors(errors),
      -889        ) from errors[-1]
      +            
      868    def parse_into(
      +869        self,
      +870        expression_types: exp.IntoType,
      +871        raw_tokens: t.List[Token],
      +872        sql: t.Optional[str] = None,
      +873    ) -> t.List[t.Optional[exp.Expression]]:
      +874        """
      +875        Parses a list of tokens into a given Expression type. If a collection of Expression
      +876        types is given instead, this method will try to parse the token list into each one
      +877        of them, stopping at the first for which the parsing succeeds.
      +878
      +879        Args:
      +880            expression_types: The expression type(s) to try and parse the token list into.
      +881            raw_tokens: The list of tokens.
      +882            sql: The original SQL string, used to produce helpful debug messages.
      +883
      +884        Returns:
      +885            The target Expression.
      +886        """
      +887        errors = []
      +888        for expression_type in ensure_list(expression_types):
      +889            parser = self.EXPRESSION_PARSERS.get(expression_type)
      +890            if not parser:
      +891                raise TypeError(f"No parser registered for {expression_type}")
      +892
      +893            try:
      +894                return self._parse(parser, raw_tokens, sql)
      +895            except ParseError as e:
      +896                e.errors[0]["into_expression"] = expression_type
      +897                errors.append(e)
      +898
      +899        raise ParseError(
      +900            f"Failed to parse '{sql or raw_tokens}' into {expression_types}",
      +901            errors=merge_errors(errors),
      +902        ) from errors[-1]
       
      @@ -9657,9 +9653,9 @@ of them, stopping at the first for which the parsing succeeds.

      Arguments:
        -
      • expression_types: the expression type(s) to try and parse the token list into.
      • -
      • raw_tokens: the list of tokens.
      • -
      • sql: the original SQL string, used to produce helpful debug messages.
      • +
      • expression_types: The expression type(s) to try and parse the token list into.
      • +
      • raw_tokens: The list of tokens.
      • +
      • sql: The original SQL string, used to produce helpful debug messages.
      Returns:
      @@ -9682,18 +9678,16 @@ of them, stopping at the first for which the parsing succeeds.

      -
      925    def check_errors(self) -> None:
      -926        """
      -927        Logs or raises any found errors, depending on the chosen error level setting.
      -928        """
      -929        if self.error_level == ErrorLevel.WARN:
      -930            for error in self.errors:
      -931                logger.error(str(error))
      -932        elif self.error_level == ErrorLevel.RAISE and self.errors:
      -933            raise ParseError(
      -934                concat_messages(self.errors, self.max_errors),
      -935                errors=merge_errors(self.errors),
      -936            )
      +            
      939    def check_errors(self) -> None:
      +940        """Logs or raises any found errors, depending on the chosen error level setting."""
      +941        if self.error_level == ErrorLevel.WARN:
      +942            for error in self.errors:
      +943                logger.error(str(error))
      +944        elif self.error_level == ErrorLevel.RAISE and self.errors:
      +945            raise ParseError(
      +946                concat_messages(self.errors, self.max_errors),
      +947                errors=merge_errors(self.errors),
      +948            )
       
      @@ -9713,33 +9707,33 @@ of them, stopping at the first for which the parsing succeeds.

      -
      938    def raise_error(self, message: str, token: t.Optional[Token] = None) -> None:
      -939        """
      -940        Appends an error in the list of recorded errors or raises it, depending on the chosen
      -941        error level setting.
      -942        """
      -943        token = token or self._curr or self._prev or Token.string("")
      -944        start = token.start
      -945        end = token.end + 1
      -946        start_context = self.sql[max(start - self.error_message_context, 0) : start]
      -947        highlight = self.sql[start:end]
      -948        end_context = self.sql[end : end + self.error_message_context]
      -949
      -950        error = ParseError.new(
      -951            f"{message}. Line {token.line}, Col: {token.col}.\n"
      -952            f"  {start_context}\033[4m{highlight}\033[0m{end_context}",
      -953            description=message,
      -954            line=token.line,
      -955            col=token.col,
      -956            start_context=start_context,
      -957            highlight=highlight,
      -958            end_context=end_context,
      -959        )
      -960
      -961        if self.error_level == ErrorLevel.IMMEDIATE:
      -962            raise error
      -963
      -964        self.errors.append(error)
      +            
      950    def raise_error(self, message: str, token: t.Optional[Token] = None) -> None:
      +951        """
      +952        Appends an error in the list of recorded errors or raises it, depending on the chosen
      +953        error level setting.
      +954        """
      +955        token = token or self._curr or self._prev or Token.string("")
      +956        start = token.start
      +957        end = token.end + 1
      +958        start_context = self.sql[max(start - self.error_message_context, 0) : start]
      +959        highlight = self.sql[start:end]
      +960        end_context = self.sql[end : end + self.error_message_context]
      +961
      +962        error = ParseError.new(
      +963            f"{message}. Line {token.line}, Col: {token.col}.\n"
      +964            f"  {start_context}\033[4m{highlight}\033[0m{end_context}",
      +965            description=message,
      +966            line=token.line,
      +967            col=token.col,
      +968            start_context=start_context,
      +969            highlight=highlight,
      +970            end_context=end_context,
      +971        )
      +972
      +973        if self.error_level == ErrorLevel.IMMEDIATE:
      +974            raise error
      +975
      +976        self.errors.append(error)
       
      @@ -9760,24 +9754,23 @@ error level setting.

      -
      966    def expression(
      -967        self, exp_class: t.Type[E], comments: t.Optional[t.List[str]] = None, **kwargs
      -968    ) -> E:
      -969        """
      -970        Creates a new, validated Expression.
      -971
      -972        Args:
      -973            exp_class: the expression class to instantiate.
      -974            comments: an optional list of comments to attach to the expression.
      -975            kwargs: the arguments to set for the expression along with their respective values.
      -976
      -977        Returns:
      -978            The target expression.
      -979        """
      -980        instance = exp_class(**kwargs)
      -981        instance.add_comments(comments) if comments else self._add_comments(instance)
      -982        self.validate_expression(instance)
      -983        return instance
      +            
      978    def expression(
      +979        self, exp_class: t.Type[E], comments: t.Optional[t.List[str]] = None, **kwargs
      +980    ) -> E:
      +981        """
      +982        Creates a new, validated Expression.
      +983
      +984        Args:
      +985            exp_class: The expression class to instantiate.
      +986            comments: An optional list of comments to attach to the expression.
      +987            kwargs: The arguments to set for the expression along with their respective values.
      +988
      +989        Returns:
      +990            The target expression.
      +991        """
      +992        instance = exp_class(**kwargs)
      +993        instance.add_comments(comments) if comments else self._add_comments(instance)
      +994        return self.validate_expression(instance)
       
      @@ -9786,9 +9779,9 @@ error level setting.

      Arguments:
        -
      • exp_class: the expression class to instantiate.
      • -
      • comments: an optional list of comments to attach to the expression.
      • -
      • kwargs: the arguments to set for the expression along with their respective values.
      • +
      • exp_class: The expression class to instantiate.
      • +
      • comments: An optional list of comments to attach to the expression.
      • +
      • kwargs: The arguments to set for the expression along with their respective values.
      Returns:
      @@ -9805,40 +9798,45 @@ error level setting.

      def - validate_expression( self, expression: sqlglot.expressions.Expression, args: Optional[List] = None) -> None: + validate_expression(self, expression: ~E, args: Optional[List] = None) -> ~E:
      -
       990    def validate_expression(
      - 991        self, expression: exp.Expression, args: t.Optional[t.List] = None
      - 992    ) -> None:
      - 993        """
      - 994        Validates an already instantiated expression, making sure that all its mandatory arguments
      - 995        are set.
      - 996
      - 997        Args:
      - 998            expression: the expression to validate.
      - 999            args: an optional list of items that was used to instantiate the expression, if it's a Func.
      -1000        """
      -1001        if self.error_level == ErrorLevel.IGNORE:
      -1002            return
      -1003
      -1004        for error_message in expression.error_messages(args):
      -1005            self.raise_error(error_message)
      +            
      1001    def validate_expression(self, expression: E, args: t.Optional[t.List] = None) -> E:
      +1002        """
      +1003        Validates an Expression, making sure that all its mandatory arguments are set.
      +1004
      +1005        Args:
      +1006            expression: The expression to validate.
      +1007            args: An optional list of items that was used to instantiate the expression, if it's a Func.
      +1008
      +1009        Returns:
      +1010            The validated expression.
      +1011        """
      +1012        if self.error_level != ErrorLevel.IGNORE:
      +1013            for error_message in expression.error_messages(args):
      +1014                self.raise_error(error_message)
      +1015
      +1016        return expression
       
      -

      Validates an already instantiated expression, making sure that all its mandatory arguments -are set.

      +

      Validates an Expression, making sure that all its mandatory arguments are set.

      Arguments:
        -
      • expression: the expression to validate.
      • -
      • args: an optional list of items that was used to instantiate the expression, if it's a Func.
      • +
      • expression: The expression to validate.
      • +
      • args: An optional list of items that was used to instantiate the expression, if it's a Func.
      + +
      Returns:
      + +
      +

      The validated expression.

      +
      diff --git a/docs/sqlglot/schema.html b/docs/sqlglot/schema.html index 8bcaf2f..d3a0fbe 100644 --- a/docs/sqlglot/schema.html +++ b/docs/sqlglot/schema.html @@ -418,198 +418,196 @@ 285 elif isinstance(column_type, str): 286 return self._to_data_type(column_type.upper(), dialect=dialect) 287 -288 raise SchemaError(f"Unknown column type '{column_type}'") +288 return exp.DataType.build("unknown") 289 -290 return exp.DataType.build("unknown") -291 -292 def _normalize(self, schema: t.Dict) -> t.Dict: -293 """ -294 Converts all identifiers in the schema into lowercase, unless they're quoted. -295 -296 Args: -297 schema: the schema to normalize. -298 -299 Returns: -300 The normalized schema mapping. -301 """ -302 flattened_schema = flatten_schema(schema, depth=dict_depth(schema) - 1) -303 -304 normalized_mapping: t.Dict = {} -305 for keys in flattened_schema: -306 columns = nested_get(schema, *zip(keys, keys)) -307 assert columns is not None -308 -309 normalized_keys = [self._normalize_name(key, dialect=self.dialect) for key in keys] -310 for column_name, column_type in columns.items(): -311 nested_set( -312 normalized_mapping, -313 normalized_keys + [self._normalize_name(column_name, dialect=self.dialect)], -314 column_type, -315 ) +290 def _normalize(self, schema: t.Dict) -> t.Dict: +291 """ +292 Converts all identifiers in the schema into lowercase, unless they're quoted. +293 +294 Args: +295 schema: the schema to normalize. +296 +297 Returns: +298 The normalized schema mapping. +299 """ +300 flattened_schema = flatten_schema(schema, depth=dict_depth(schema) - 1) +301 +302 normalized_mapping: t.Dict = {} +303 for keys in flattened_schema: +304 columns = nested_get(schema, *zip(keys, keys)) +305 assert columns is not None +306 +307 normalized_keys = [self._normalize_name(key, dialect=self.dialect) for key in keys] +308 for column_name, column_type in columns.items(): +309 nested_set( +310 normalized_mapping, +311 normalized_keys + [self._normalize_name(column_name, dialect=self.dialect)], +312 column_type, +313 ) +314 +315 return normalized_mapping 316 -317 return normalized_mapping -318 -319 def _normalize_table(self, table: exp.Table, dialect: DialectType = None) -> exp.Table: -320 normalized_table = table.copy() -321 -322 for arg in TABLE_ARGS: -323 value = normalized_table.args.get(arg) -324 if isinstance(value, (str, exp.Identifier)): -325 normalized_table.set( -326 arg, exp.to_identifier(self._normalize_name(value, dialect=dialect)) -327 ) +317 def _normalize_table(self, table: exp.Table, dialect: DialectType = None) -> exp.Table: +318 normalized_table = table.copy() +319 +320 for arg in TABLE_ARGS: +321 value = normalized_table.args.get(arg) +322 if isinstance(value, (str, exp.Identifier)): +323 normalized_table.set( +324 arg, exp.to_identifier(self._normalize_name(value, dialect=dialect)) +325 ) +326 +327 return normalized_table 328 -329 return normalized_table -330 -331 def _normalize_name(self, name: str | exp.Identifier, dialect: DialectType = None) -> str: -332 dialect = dialect or self.dialect -333 -334 try: -335 identifier = sqlglot.maybe_parse(name, dialect=dialect, into=exp.Identifier) -336 except ParseError: -337 return name if isinstance(name, str) else name.name +329 def _normalize_name(self, name: str | exp.Identifier, dialect: DialectType = None) -> str: +330 dialect = dialect or self.dialect +331 +332 try: +333 identifier = sqlglot.maybe_parse(name, dialect=dialect, into=exp.Identifier) +334 except ParseError: +335 return name if isinstance(name, str) else name.name +336 +337 name = identifier.name 338 -339 name = identifier.name -340 -341 if not self.normalize or identifier.quoted: -342 return name +339 if not self.normalize or identifier.quoted: +340 return name +341 +342 return name.upper() if dialect in RESOLVES_IDENTIFIERS_AS_UPPERCASE else name.lower() 343 -344 return name.upper() if dialect in RESOLVES_IDENTIFIERS_AS_UPPERCASE else name.lower() -345 -346 def _depth(self) -> int: -347 # The columns themselves are a mapping, but we don't want to include those -348 return super()._depth() - 1 -349 -350 def _ensure_table(self, table: exp.Table | str, dialect: DialectType = None) -> exp.Table: -351 return exp.maybe_parse(table, into=exp.Table, dialect=dialect or self.dialect) -352 -353 def _to_data_type(self, schema_type: str, dialect: DialectType = None) -> exp.DataType: -354 """ -355 Convert a type represented as a string to the corresponding `sqlglot.exp.DataType` object. -356 -357 Args: -358 schema_type: the type we want to convert. -359 dialect: the SQL dialect that will be used to parse `schema_type`, if needed. -360 -361 Returns: -362 The resulting expression type. -363 """ -364 if schema_type not in self._type_mapping_cache: -365 dialect = dialect or self.dialect -366 -367 try: -368 expression = exp.DataType.build(schema_type, dialect=dialect) -369 self._type_mapping_cache[schema_type] = expression -370 except AttributeError: -371 in_dialect = f" in dialect {dialect}" if dialect else "" -372 raise SchemaError(f"Failed to build type '{schema_type}'{in_dialect}.") +344 def _depth(self) -> int: +345 # The columns themselves are a mapping, but we don't want to include those +346 return super()._depth() - 1 +347 +348 def _ensure_table(self, table: exp.Table | str, dialect: DialectType = None) -> exp.Table: +349 return exp.maybe_parse(table, into=exp.Table, dialect=dialect or self.dialect) +350 +351 def _to_data_type(self, schema_type: str, dialect: DialectType = None) -> exp.DataType: +352 """ +353 Convert a type represented as a string to the corresponding `sqlglot.exp.DataType` object. +354 +355 Args: +356 schema_type: the type we want to convert. +357 dialect: the SQL dialect that will be used to parse `schema_type`, if needed. +358 +359 Returns: +360 The resulting expression type. +361 """ +362 if schema_type not in self._type_mapping_cache: +363 dialect = dialect or self.dialect +364 +365 try: +366 expression = exp.DataType.build(schema_type, dialect=dialect) +367 self._type_mapping_cache[schema_type] = expression +368 except AttributeError: +369 in_dialect = f" in dialect {dialect}" if dialect else "" +370 raise SchemaError(f"Failed to build type '{schema_type}'{in_dialect}.") +371 +372 return self._type_mapping_cache[schema_type] 373 -374 return self._type_mapping_cache[schema_type] -375 -376 -377def ensure_schema(schema: Schema | t.Optional[t.Dict], **kwargs: t.Any) -> Schema: -378 if isinstance(schema, Schema): -379 return schema +374 +375def ensure_schema(schema: Schema | t.Optional[t.Dict], **kwargs: t.Any) -> Schema: +376 if isinstance(schema, Schema): +377 return schema +378 +379 return MappingSchema(schema, **kwargs) 380 -381 return MappingSchema(schema, **kwargs) -382 -383 -384def ensure_column_mapping(mapping: t.Optional[ColumnMapping]) -> t.Dict: -385 if mapping is None: -386 return {} -387 elif isinstance(mapping, dict): -388 return mapping -389 elif isinstance(mapping, str): -390 col_name_type_strs = [x.strip() for x in mapping.split(",")] -391 return { -392 name_type_str.split(":")[0].strip(): name_type_str.split(":")[1].strip() -393 for name_type_str in col_name_type_strs -394 } -395 # Check if mapping looks like a DataFrame StructType -396 elif hasattr(mapping, "simpleString"): -397 return {struct_field.name: struct_field.dataType.simpleString() for struct_field in mapping} -398 elif isinstance(mapping, list): -399 return {x.strip(): None for x in mapping} +381 +382def ensure_column_mapping(mapping: t.Optional[ColumnMapping]) -> t.Dict: +383 if mapping is None: +384 return {} +385 elif isinstance(mapping, dict): +386 return mapping +387 elif isinstance(mapping, str): +388 col_name_type_strs = [x.strip() for x in mapping.split(",")] +389 return { +390 name_type_str.split(":")[0].strip(): name_type_str.split(":")[1].strip() +391 for name_type_str in col_name_type_strs +392 } +393 # Check if mapping looks like a DataFrame StructType +394 elif hasattr(mapping, "simpleString"): +395 return {struct_field.name: struct_field.dataType.simpleString() for struct_field in mapping} +396 elif isinstance(mapping, list): +397 return {x.strip(): None for x in mapping} +398 +399 raise ValueError(f"Invalid mapping provided: {type(mapping)}") 400 -401 raise ValueError(f"Invalid mapping provided: {type(mapping)}") -402 -403 -404def flatten_schema( -405 schema: t.Dict, depth: int, keys: t.Optional[t.List[str]] = None -406) -> t.List[t.List[str]]: -407 tables = [] -408 keys = keys or [] -409 -410 for k, v in schema.items(): -411 if depth >= 2: -412 tables.extend(flatten_schema(v, depth - 1, keys + [k])) -413 elif depth == 1: -414 tables.append(keys + [k]) +401 +402def flatten_schema( +403 schema: t.Dict, depth: int, keys: t.Optional[t.List[str]] = None +404) -> t.List[t.List[str]]: +405 tables = [] +406 keys = keys or [] +407 +408 for k, v in schema.items(): +409 if depth >= 2: +410 tables.extend(flatten_schema(v, depth - 1, keys + [k])) +411 elif depth == 1: +412 tables.append(keys + [k]) +413 +414 return tables 415 -416 return tables -417 -418 -419def nested_get( -420 d: t.Dict, *path: t.Tuple[str, str], raise_on_missing: bool = True -421) -> t.Optional[t.Any]: -422 """ -423 Get a value for a nested dictionary. -424 -425 Args: -426 d: the dictionary to search. -427 *path: tuples of (name, key), where: -428 `key` is the key in the dictionary to get. -429 `name` is a string to use in the error if `key` isn't found. -430 -431 Returns: -432 The value or None if it doesn't exist. -433 """ -434 for name, key in path: -435 d = d.get(key) # type: ignore -436 if d is None: -437 if raise_on_missing: -438 name = "table" if name == "this" else name -439 raise ValueError(f"Unknown {name}: {key}") -440 return None +416 +417def nested_get( +418 d: t.Dict, *path: t.Tuple[str, str], raise_on_missing: bool = True +419) -> t.Optional[t.Any]: +420 """ +421 Get a value for a nested dictionary. +422 +423 Args: +424 d: the dictionary to search. +425 *path: tuples of (name, key), where: +426 `key` is the key in the dictionary to get. +427 `name` is a string to use in the error if `key` isn't found. +428 +429 Returns: +430 The value or None if it doesn't exist. +431 """ +432 for name, key in path: +433 d = d.get(key) # type: ignore +434 if d is None: +435 if raise_on_missing: +436 name = "table" if name == "this" else name +437 raise ValueError(f"Unknown {name}: {key}") +438 return None +439 +440 return d 441 -442 return d -443 -444 -445def nested_set(d: t.Dict, keys: t.Sequence[str], value: t.Any) -> t.Dict: -446 """ -447 In-place set a value for a nested dictionary -448 -449 Example: -450 >>> nested_set({}, ["top_key", "second_key"], "value") -451 {'top_key': {'second_key': 'value'}} -452 -453 >>> nested_set({"top_key": {"third_key": "third_value"}}, ["top_key", "second_key"], "value") -454 {'top_key': {'third_key': 'third_value', 'second_key': 'value'}} -455 -456 Args: -457 d: dictionary to update. -458 keys: the keys that makeup the path to `value`. -459 value: the value to set in the dictionary for the given key path. -460 -461 Returns: -462 The (possibly) updated dictionary. -463 """ -464 if not keys: -465 return d -466 -467 if len(keys) == 1: -468 d[keys[0]] = value -469 return d -470 -471 subd = d -472 for key in keys[:-1]: -473 if key not in subd: -474 subd = subd.setdefault(key, {}) -475 else: -476 subd = subd[key] -477 -478 subd[keys[-1]] = value -479 return d +442 +443def nested_set(d: t.Dict, keys: t.Sequence[str], value: t.Any) -> t.Dict: +444 """ +445 In-place set a value for a nested dictionary +446 +447 Example: +448 >>> nested_set({}, ["top_key", "second_key"], "value") +449 {'top_key': {'second_key': 'value'}} +450 +451 >>> nested_set({"top_key": {"third_key": "third_value"}}, ["top_key", "second_key"], "value") +452 {'top_key': {'third_key': 'third_value', 'second_key': 'value'}} +453 +454 Args: +455 d: dictionary to update. +456 keys: the keys that makeup the path to `value`. +457 value: the value to set in the dictionary for the given key path. +458 +459 Returns: +460 The (possibly) updated dictionary. +461 """ +462 if not keys: +463 return d +464 +465 if len(keys) == 1: +466 d[keys[0]] = value +467 return d +468 +469 subd = d +470 for key in keys[:-1]: +471 if key not in subd: +472 subd = subd.setdefault(key, {}) +473 else: +474 subd = subd[key] +475 +476 subd[keys[-1]] = value +477 return d
      @@ -1223,93 +1221,91 @@ For example, a generic mapping type might be defined as::

      286 elif isinstance(column_type, str): 287 return self._to_data_type(column_type.upper(), dialect=dialect) 288 -289 raise SchemaError(f"Unknown column type '{column_type}'") +289 return exp.DataType.build("unknown") 290 -291 return exp.DataType.build("unknown") -292 -293 def _normalize(self, schema: t.Dict) -> t.Dict: -294 """ -295 Converts all identifiers in the schema into lowercase, unless they're quoted. -296 -297 Args: -298 schema: the schema to normalize. -299 -300 Returns: -301 The normalized schema mapping. -302 """ -303 flattened_schema = flatten_schema(schema, depth=dict_depth(schema) - 1) -304 -305 normalized_mapping: t.Dict = {} -306 for keys in flattened_schema: -307 columns = nested_get(schema, *zip(keys, keys)) -308 assert columns is not None -309 -310 normalized_keys = [self._normalize_name(key, dialect=self.dialect) for key in keys] -311 for column_name, column_type in columns.items(): -312 nested_set( -313 normalized_mapping, -314 normalized_keys + [self._normalize_name(column_name, dialect=self.dialect)], -315 column_type, -316 ) +291 def _normalize(self, schema: t.Dict) -> t.Dict: +292 """ +293 Converts all identifiers in the schema into lowercase, unless they're quoted. +294 +295 Args: +296 schema: the schema to normalize. +297 +298 Returns: +299 The normalized schema mapping. +300 """ +301 flattened_schema = flatten_schema(schema, depth=dict_depth(schema) - 1) +302 +303 normalized_mapping: t.Dict = {} +304 for keys in flattened_schema: +305 columns = nested_get(schema, *zip(keys, keys)) +306 assert columns is not None +307 +308 normalized_keys = [self._normalize_name(key, dialect=self.dialect) for key in keys] +309 for column_name, column_type in columns.items(): +310 nested_set( +311 normalized_mapping, +312 normalized_keys + [self._normalize_name(column_name, dialect=self.dialect)], +313 column_type, +314 ) +315 +316 return normalized_mapping 317 -318 return normalized_mapping -319 -320 def _normalize_table(self, table: exp.Table, dialect: DialectType = None) -> exp.Table: -321 normalized_table = table.copy() -322 -323 for arg in TABLE_ARGS: -324 value = normalized_table.args.get(arg) -325 if isinstance(value, (str, exp.Identifier)): -326 normalized_table.set( -327 arg, exp.to_identifier(self._normalize_name(value, dialect=dialect)) -328 ) +318 def _normalize_table(self, table: exp.Table, dialect: DialectType = None) -> exp.Table: +319 normalized_table = table.copy() +320 +321 for arg in TABLE_ARGS: +322 value = normalized_table.args.get(arg) +323 if isinstance(value, (str, exp.Identifier)): +324 normalized_table.set( +325 arg, exp.to_identifier(self._normalize_name(value, dialect=dialect)) +326 ) +327 +328 return normalized_table 329 -330 return normalized_table -331 -332 def _normalize_name(self, name: str | exp.Identifier, dialect: DialectType = None) -> str: -333 dialect = dialect or self.dialect -334 -335 try: -336 identifier = sqlglot.maybe_parse(name, dialect=dialect, into=exp.Identifier) -337 except ParseError: -338 return name if isinstance(name, str) else name.name +330 def _normalize_name(self, name: str | exp.Identifier, dialect: DialectType = None) -> str: +331 dialect = dialect or self.dialect +332 +333 try: +334 identifier = sqlglot.maybe_parse(name, dialect=dialect, into=exp.Identifier) +335 except ParseError: +336 return name if isinstance(name, str) else name.name +337 +338 name = identifier.name 339 -340 name = identifier.name -341 -342 if not self.normalize or identifier.quoted: -343 return name +340 if not self.normalize or identifier.quoted: +341 return name +342 +343 return name.upper() if dialect in RESOLVES_IDENTIFIERS_AS_UPPERCASE else name.lower() 344 -345 return name.upper() if dialect in RESOLVES_IDENTIFIERS_AS_UPPERCASE else name.lower() -346 -347 def _depth(self) -> int: -348 # The columns themselves are a mapping, but we don't want to include those -349 return super()._depth() - 1 -350 -351 def _ensure_table(self, table: exp.Table | str, dialect: DialectType = None) -> exp.Table: -352 return exp.maybe_parse(table, into=exp.Table, dialect=dialect or self.dialect) -353 -354 def _to_data_type(self, schema_type: str, dialect: DialectType = None) -> exp.DataType: -355 """ -356 Convert a type represented as a string to the corresponding `sqlglot.exp.DataType` object. -357 -358 Args: -359 schema_type: the type we want to convert. -360 dialect: the SQL dialect that will be used to parse `schema_type`, if needed. -361 -362 Returns: -363 The resulting expression type. -364 """ -365 if schema_type not in self._type_mapping_cache: -366 dialect = dialect or self.dialect -367 -368 try: -369 expression = exp.DataType.build(schema_type, dialect=dialect) -370 self._type_mapping_cache[schema_type] = expression -371 except AttributeError: -372 in_dialect = f" in dialect {dialect}" if dialect else "" -373 raise SchemaError(f"Failed to build type '{schema_type}'{in_dialect}.") -374 -375 return self._type_mapping_cache[schema_type] +345 def _depth(self) -> int: +346 # The columns themselves are a mapping, but we don't want to include those +347 return super()._depth() - 1 +348 +349 def _ensure_table(self, table: exp.Table | str, dialect: DialectType = None) -> exp.Table: +350 return exp.maybe_parse(table, into=exp.Table, dialect=dialect or self.dialect) +351 +352 def _to_data_type(self, schema_type: str, dialect: DialectType = None) -> exp.DataType: +353 """ +354 Convert a type represented as a string to the corresponding `sqlglot.exp.DataType` object. +355 +356 Args: +357 schema_type: the type we want to convert. +358 dialect: the SQL dialect that will be used to parse `schema_type`, if needed. +359 +360 Returns: +361 The resulting expression type. +362 """ +363 if schema_type not in self._type_mapping_cache: +364 dialect = dialect or self.dialect +365 +366 try: +367 expression = exp.DataType.build(schema_type, dialect=dialect) +368 self._type_mapping_cache[schema_type] = expression +369 except AttributeError: +370 in_dialect = f" in dialect {dialect}" if dialect else "" +371 raise SchemaError(f"Failed to build type '{schema_type}'{in_dialect}.") +372 +373 return self._type_mapping_cache[schema_type]
      @@ -1560,9 +1556,7 @@ are assumed to be visible. The nesting should mirror that of the schema:
      286 elif isinstance(column_type, str): 287 return self._to_data_type(column_type.upper(), dialect=dialect) 288 -289 raise SchemaError(f"Unknown column type '{column_type}'") -290 -291 return exp.DataType.build("unknown") +289 return exp.DataType.build("unknown")
      @@ -1610,11 +1604,11 @@ are assumed to be visible. The nesting should mirror that of the schema:
      -
      378def ensure_schema(schema: Schema | t.Optional[t.Dict], **kwargs: t.Any) -> Schema:
      -379    if isinstance(schema, Schema):
      -380        return schema
      -381
      -382    return MappingSchema(schema, **kwargs)
      +            
      376def ensure_schema(schema: Schema | t.Optional[t.Dict], **kwargs: t.Any) -> Schema:
      +377    if isinstance(schema, Schema):
      +378        return schema
      +379
      +380    return MappingSchema(schema, **kwargs)
       
      @@ -1632,24 +1626,24 @@ are assumed to be visible. The nesting should mirror that of the schema:
      -
      385def ensure_column_mapping(mapping: t.Optional[ColumnMapping]) -> t.Dict:
      -386    if mapping is None:
      -387        return {}
      -388    elif isinstance(mapping, dict):
      -389        return mapping
      -390    elif isinstance(mapping, str):
      -391        col_name_type_strs = [x.strip() for x in mapping.split(",")]
      -392        return {
      -393            name_type_str.split(":")[0].strip(): name_type_str.split(":")[1].strip()
      -394            for name_type_str in col_name_type_strs
      -395        }
      -396    # Check if mapping looks like a DataFrame StructType
      -397    elif hasattr(mapping, "simpleString"):
      -398        return {struct_field.name: struct_field.dataType.simpleString() for struct_field in mapping}
      -399    elif isinstance(mapping, list):
      -400        return {x.strip(): None for x in mapping}
      -401
      -402    raise ValueError(f"Invalid mapping provided: {type(mapping)}")
      +            
      383def ensure_column_mapping(mapping: t.Optional[ColumnMapping]) -> t.Dict:
      +384    if mapping is None:
      +385        return {}
      +386    elif isinstance(mapping, dict):
      +387        return mapping
      +388    elif isinstance(mapping, str):
      +389        col_name_type_strs = [x.strip() for x in mapping.split(",")]
      +390        return {
      +391            name_type_str.split(":")[0].strip(): name_type_str.split(":")[1].strip()
      +392            for name_type_str in col_name_type_strs
      +393        }
      +394    # Check if mapping looks like a DataFrame StructType
      +395    elif hasattr(mapping, "simpleString"):
      +396        return {struct_field.name: struct_field.dataType.simpleString() for struct_field in mapping}
      +397    elif isinstance(mapping, list):
      +398        return {x.strip(): None for x in mapping}
      +399
      +400    raise ValueError(f"Invalid mapping provided: {type(mapping)}")
       
      @@ -1667,19 +1661,19 @@ are assumed to be visible. The nesting should mirror that of the schema:
      -
      405def flatten_schema(
      -406    schema: t.Dict, depth: int, keys: t.Optional[t.List[str]] = None
      -407) -> t.List[t.List[str]]:
      -408    tables = []
      -409    keys = keys or []
      -410
      -411    for k, v in schema.items():
      -412        if depth >= 2:
      -413            tables.extend(flatten_schema(v, depth - 1, keys + [k]))
      -414        elif depth == 1:
      -415            tables.append(keys + [k])
      -416
      -417    return tables
      +            
      403def flatten_schema(
      +404    schema: t.Dict, depth: int, keys: t.Optional[t.List[str]] = None
      +405) -> t.List[t.List[str]]:
      +406    tables = []
      +407    keys = keys or []
      +408
      +409    for k, v in schema.items():
      +410        if depth >= 2:
      +411            tables.extend(flatten_schema(v, depth - 1, keys + [k]))
      +412        elif depth == 1:
      +413            tables.append(keys + [k])
      +414
      +415    return tables
       
      @@ -1697,30 +1691,30 @@ are assumed to be visible. The nesting should mirror that of the schema:
      -
      420def nested_get(
      -421    d: t.Dict, *path: t.Tuple[str, str], raise_on_missing: bool = True
      -422) -> t.Optional[t.Any]:
      -423    """
      -424    Get a value for a nested dictionary.
      -425
      -426    Args:
      -427        d: the dictionary to search.
      -428        *path: tuples of (name, key), where:
      -429            `key` is the key in the dictionary to get.
      -430            `name` is a string to use in the error if `key` isn't found.
      -431
      -432    Returns:
      -433        The value or None if it doesn't exist.
      -434    """
      -435    for name, key in path:
      -436        d = d.get(key)  # type: ignore
      -437        if d is None:
      -438            if raise_on_missing:
      -439                name = "table" if name == "this" else name
      -440                raise ValueError(f"Unknown {name}: {key}")
      -441            return None
      -442
      -443    return d
      +            
      418def nested_get(
      +419    d: t.Dict, *path: t.Tuple[str, str], raise_on_missing: bool = True
      +420) -> t.Optional[t.Any]:
      +421    """
      +422    Get a value for a nested dictionary.
      +423
      +424    Args:
      +425        d: the dictionary to search.
      +426        *path: tuples of (name, key), where:
      +427            `key` is the key in the dictionary to get.
      +428            `name` is a string to use in the error if `key` isn't found.
      +429
      +430    Returns:
      +431        The value or None if it doesn't exist.
      +432    """
      +433    for name, key in path:
      +434        d = d.get(key)  # type: ignore
      +435        if d is None:
      +436            if raise_on_missing:
      +437                name = "table" if name == "this" else name
      +438                raise ValueError(f"Unknown {name}: {key}")
      +439            return None
      +440
      +441    return d
       
      @@ -1755,41 +1749,41 @@ are assumed to be visible. The nesting should mirror that of the schema:
      -
      446def nested_set(d: t.Dict, keys: t.Sequence[str], value: t.Any) -> t.Dict:
      -447    """
      -448    In-place set a value for a nested dictionary
      -449
      -450    Example:
      -451        >>> nested_set({}, ["top_key", "second_key"], "value")
      -452        {'top_key': {'second_key': 'value'}}
      -453
      -454        >>> nested_set({"top_key": {"third_key": "third_value"}}, ["top_key", "second_key"], "value")
      -455        {'top_key': {'third_key': 'third_value', 'second_key': 'value'}}
      -456
      -457    Args:
      -458        d: dictionary to update.
      -459        keys: the keys that makeup the path to `value`.
      -460        value: the value to set in the dictionary for the given key path.
      -461
      -462    Returns:
      -463        The (possibly) updated dictionary.
      -464    """
      -465    if not keys:
      -466        return d
      -467
      -468    if len(keys) == 1:
      -469        d[keys[0]] = value
      -470        return d
      -471
      -472    subd = d
      -473    for key in keys[:-1]:
      -474        if key not in subd:
      -475            subd = subd.setdefault(key, {})
      -476        else:
      -477            subd = subd[key]
      -478
      -479    subd[keys[-1]] = value
      -480    return d
      +            
      444def nested_set(d: t.Dict, keys: t.Sequence[str], value: t.Any) -> t.Dict:
      +445    """
      +446    In-place set a value for a nested dictionary
      +447
      +448    Example:
      +449        >>> nested_set({}, ["top_key", "second_key"], "value")
      +450        {'top_key': {'second_key': 'value'}}
      +451
      +452        >>> nested_set({"top_key": {"third_key": "third_value"}}, ["top_key", "second_key"], "value")
      +453        {'top_key': {'third_key': 'third_value', 'second_key': 'value'}}
      +454
      +455    Args:
      +456        d: dictionary to update.
      +457        keys: the keys that makeup the path to `value`.
      +458        value: the value to set in the dictionary for the given key path.
      +459
      +460    Returns:
      +461        The (possibly) updated dictionary.
      +462    """
      +463    if not keys:
      +464        return d
      +465
      +466    if len(keys) == 1:
      +467        d[keys[0]] = value
      +468        return d
      +469
      +470    subd = d
      +471    for key in keys[:-1]:
      +472        if key not in subd:
      +473            subd = subd.setdefault(key, {})
      +474        else:
      +475            subd = subd[key]
      +476
      +477    subd[keys[-1]] = value
      +478    return d
       
      diff --git a/docs/sqlglot/serde.html b/docs/sqlglot/serde.html index fa52092..f971408 100644 --- a/docs/sqlglot/serde.html +++ b/docs/sqlglot/serde.html @@ -66,7 +66,7 @@
      5from sqlglot import expressions as exp 6 7if t.TYPE_CHECKING: - 8 JSON = t.Union[dict, list, str, float, int, bool] + 8 JSON = t.Union[dict, list, str, float, int, bool, None] 9 Node = t.Union[t.List["Node"], exp.DataType.Type, exp.Expression, JSON] 10 11 @@ -85,12 +85,12 @@ 24 klass = node.__class__.__qualname__ 25 if node.__class__.__module__ != exp.__name__: 26 klass = f"{node.__module__}.{klass}" -27 obj = { +27 obj: t.Dict = { 28 "class": klass, 29 "args": {k: dump(v) for k, v in node.args.items() if v is not None and v != []}, 30 } 31 if node.type: -32 obj["type"] = node.type.sql() +32 obj["type"] = dump(node.type) 33 if node.comments: 34 obj["comments"] = node.comments 35 if node._meta is not None: @@ -121,7 +121,7 @@ 60 klass = getattr(module, class_name) 61 62 expression = klass(**{k: load(v) for k, v in obj["args"].items()}) -63 expression.type = obj.get("type") +63 expression.type = t.cast(exp.DataType, load(obj.get("type"))) 64 expression.comments = obj.get("comments") 65 expression._meta = obj.get("meta") 66 @@ -136,7 +136,7 @@
      def - dump( node: Union[List[ForwardRef('Node')], sqlglot.expressions.DataType.Type, sqlglot.expressions.Expression, dict, list, str, float, int, bool]) -> Union[dict, list, str, float, int, bool]: + dump( node: Union[List[ForwardRef('Node')], sqlglot.expressions.DataType.Type, sqlglot.expressions.Expression, dict, list, str, float, int, bool, NoneType]) -> Union[dict, list, str, float, int, bool, NoneType]: @@ -157,12 +157,12 @@ 25 klass = node.__class__.__qualname__ 26 if node.__class__.__module__ != exp.__name__: 27 klass = f"{node.__module__}.{klass}" -28 obj = { +28 obj: t.Dict = { 29 "class": klass, 30 "args": {k: dump(v) for k, v in node.args.items() if v is not None and v != []}, 31 } 32 if node.type: -33 obj["type"] = node.type.sql() +33 obj["type"] = dump(node.type) 34 if node.comments: 35 obj["comments"] = node.comments 36 if node._meta is not None: @@ -183,7 +183,7 @@
      def - load( obj: Union[dict, list, str, float, int, bool]) -> Union[List[ForwardRef('Node')], sqlglot.expressions.DataType.Type, sqlglot.expressions.Expression, dict, list, str, float, int, bool]: + load( obj: Union[dict, list, str, float, int, bool, NoneType]) -> Union[List[ForwardRef('Node')], sqlglot.expressions.DataType.Type, sqlglot.expressions.Expression, dict, list, str, float, int, bool, NoneType]: @@ -210,7 +210,7 @@ 61 klass = getattr(module, class_name) 62 63 expression = klass(**{k: load(v) for k, v in obj["args"].items()}) -64 expression.type = obj.get("type") +64 expression.type = t.cast(exp.DataType, load(obj.get("type"))) 65 expression.comments = obj.get("comments") 66 expression._meta = obj.get("meta") 67 diff --git a/docs/sqlglot/tokens.html b/docs/sqlglot/tokens.html index e11806a..0a27ffe 100644 --- a/docs/sqlglot/tokens.html +++ b/docs/sqlglot/tokens.html @@ -426,6 +426,9 @@
    • INET
    • +
    • + ENUM +
    • ALIAS
    • @@ -927,6 +930,9 @@
    • tokenize
    • +
    • + peek +
    @@ -1099,990 +1105,1006 @@ 144 VARIANT = auto() 145 OBJECT = auto() 146 INET = auto() - 147 - 148 # keywords - 149 ALIAS = auto() - 150 ALTER = auto() - 151 ALWAYS = auto() - 152 ALL = auto() - 153 ANTI = auto() - 154 ANY = auto() - 155 APPLY = auto() - 156 ARRAY = auto() - 157 ASC = auto() - 158 ASOF = auto() - 159 AUTO_INCREMENT = auto() - 160 BEGIN = auto() - 161 BETWEEN = auto() - 162 CACHE = auto() - 163 CASE = auto() - 164 CHARACTER_SET = auto() - 165 COLLATE = auto() - 166 COMMAND = auto() - 167 COMMENT = auto() - 168 COMMIT = auto() - 169 CONSTRAINT = auto() - 170 CREATE = auto() - 171 CROSS = auto() - 172 CUBE = auto() - 173 CURRENT_DATE = auto() - 174 CURRENT_DATETIME = auto() - 175 CURRENT_TIME = auto() - 176 CURRENT_TIMESTAMP = auto() - 177 CURRENT_USER = auto() - 178 DEFAULT = auto() - 179 DELETE = auto() - 180 DESC = auto() - 181 DESCRIBE = auto() - 182 DICTIONARY = auto() - 183 DISTINCT = auto() - 184 DIV = auto() - 185 DROP = auto() - 186 ELSE = auto() - 187 END = auto() - 188 ESCAPE = auto() - 189 EXCEPT = auto() - 190 EXECUTE = auto() - 191 EXISTS = auto() - 192 FALSE = auto() - 193 FETCH = auto() - 194 FILTER = auto() - 195 FINAL = auto() - 196 FIRST = auto() - 197 FOR = auto() - 198 FOREIGN_KEY = auto() - 199 FORMAT = auto() - 200 FROM = auto() - 201 FULL = auto() - 202 FUNCTION = auto() - 203 GLOB = auto() - 204 GLOBAL = auto() - 205 GROUP_BY = auto() - 206 GROUPING_SETS = auto() - 207 HAVING = auto() - 208 HINT = auto() - 209 IF = auto() - 210 ILIKE = auto() - 211 ILIKE_ANY = auto() - 212 IN = auto() - 213 INDEX = auto() - 214 INNER = auto() - 215 INSERT = auto() - 216 INTERSECT = auto() - 217 INTERVAL = auto() - 218 INTO = auto() - 219 INTRODUCER = auto() - 220 IRLIKE = auto() - 221 IS = auto() - 222 ISNULL = auto() - 223 JOIN = auto() - 224 JOIN_MARKER = auto() - 225 KEEP = auto() - 226 LANGUAGE = auto() - 227 LATERAL = auto() - 228 LEFT = auto() - 229 LIKE = auto() - 230 LIKE_ANY = auto() - 231 LIMIT = auto() - 232 LOAD = auto() - 233 LOCK = auto() - 234 MAP = auto() - 235 MATCH_RECOGNIZE = auto() - 236 MERGE = auto() - 237 MOD = auto() - 238 NATURAL = auto() - 239 NEXT = auto() - 240 NEXT_VALUE_FOR = auto() - 241 NOTNULL = auto() - 242 NULL = auto() - 243 OFFSET = auto() - 244 ON = auto() - 245 ORDER_BY = auto() - 246 ORDERED = auto() - 247 ORDINALITY = auto() - 248 OUTER = auto() - 249 OVER = auto() - 250 OVERLAPS = auto() - 251 OVERWRITE = auto() - 252 PARTITION = auto() - 253 PARTITION_BY = auto() - 254 PERCENT = auto() - 255 PIVOT = auto() - 256 PLACEHOLDER = auto() - 257 PRAGMA = auto() - 258 PRIMARY_KEY = auto() - 259 PROCEDURE = auto() - 260 PROPERTIES = auto() - 261 PSEUDO_TYPE = auto() - 262 QUALIFY = auto() - 263 QUOTE = auto() - 264 RANGE = auto() - 265 RECURSIVE = auto() - 266 REPLACE = auto() - 267 RETURNING = auto() - 268 REFERENCES = auto() - 269 RIGHT = auto() - 270 RLIKE = auto() - 271 ROLLBACK = auto() - 272 ROLLUP = auto() - 273 ROW = auto() - 274 ROWS = auto() - 275 SELECT = auto() - 276 SEMI = auto() - 277 SEPARATOR = auto() - 278 SERDE_PROPERTIES = auto() - 279 SET = auto() - 280 SETTINGS = auto() - 281 SHOW = auto() - 282 SIMILAR_TO = auto() - 283 SOME = auto() - 284 STRUCT = auto() - 285 TABLE_SAMPLE = auto() - 286 TEMPORARY = auto() - 287 TOP = auto() - 288 THEN = auto() - 289 TRUE = auto() - 290 UNCACHE = auto() - 291 UNION = auto() - 292 UNNEST = auto() - 293 UNPIVOT = auto() - 294 UPDATE = auto() - 295 USE = auto() - 296 USING = auto() - 297 VALUES = auto() - 298 VIEW = auto() - 299 VOLATILE = auto() - 300 WHEN = auto() - 301 WHERE = auto() - 302 WINDOW = auto() - 303 WITH = auto() - 304 UNIQUE = auto() - 305 + 147 ENUM = auto() + 148 + 149 # keywords + 150 ALIAS = auto() + 151 ALTER = auto() + 152 ALWAYS = auto() + 153 ALL = auto() + 154 ANTI = auto() + 155 ANY = auto() + 156 APPLY = auto() + 157 ARRAY = auto() + 158 ASC = auto() + 159 ASOF = auto() + 160 AUTO_INCREMENT = auto() + 161 BEGIN = auto() + 162 BETWEEN = auto() + 163 CACHE = auto() + 164 CASE = auto() + 165 CHARACTER_SET = auto() + 166 COLLATE = auto() + 167 COMMAND = auto() + 168 COMMENT = auto() + 169 COMMIT = auto() + 170 CONSTRAINT = auto() + 171 CREATE = auto() + 172 CROSS = auto() + 173 CUBE = auto() + 174 CURRENT_DATE = auto() + 175 CURRENT_DATETIME = auto() + 176 CURRENT_TIME = auto() + 177 CURRENT_TIMESTAMP = auto() + 178 CURRENT_USER = auto() + 179 DEFAULT = auto() + 180 DELETE = auto() + 181 DESC = auto() + 182 DESCRIBE = auto() + 183 DICTIONARY = auto() + 184 DISTINCT = auto() + 185 DIV = auto() + 186 DROP = auto() + 187 ELSE = auto() + 188 END = auto() + 189 ESCAPE = auto() + 190 EXCEPT = auto() + 191 EXECUTE = auto() + 192 EXISTS = auto() + 193 FALSE = auto() + 194 FETCH = auto() + 195 FILTER = auto() + 196 FINAL = auto() + 197 FIRST = auto() + 198 FOR = auto() + 199 FOREIGN_KEY = auto() + 200 FORMAT = auto() + 201 FROM = auto() + 202 FULL = auto() + 203 FUNCTION = auto() + 204 GLOB = auto() + 205 GLOBAL = auto() + 206 GROUP_BY = auto() + 207 GROUPING_SETS = auto() + 208 HAVING = auto() + 209 HINT = auto() + 210 IF = auto() + 211 ILIKE = auto() + 212 ILIKE_ANY = auto() + 213 IN = auto() + 214 INDEX = auto() + 215 INNER = auto() + 216 INSERT = auto() + 217 INTERSECT = auto() + 218 INTERVAL = auto() + 219 INTO = auto() + 220 INTRODUCER = auto() + 221 IRLIKE = auto() + 222 IS = auto() + 223 ISNULL = auto() + 224 JOIN = auto() + 225 JOIN_MARKER = auto() + 226 KEEP = auto() + 227 LANGUAGE = auto() + 228 LATERAL = auto() + 229 LEFT = auto() + 230 LIKE = auto() + 231 LIKE_ANY = auto() + 232 LIMIT = auto() + 233 LOAD = auto() + 234 LOCK = auto() + 235 MAP = auto() + 236 MATCH_RECOGNIZE = auto() + 237 MERGE = auto() + 238 MOD = auto() + 239 NATURAL = auto() + 240 NEXT = auto() + 241 NEXT_VALUE_FOR = auto() + 242 NOTNULL = auto() + 243 NULL = auto() + 244 OFFSET = auto() + 245 ON = auto() + 246 ORDER_BY = auto() + 247 ORDERED = auto() + 248 ORDINALITY = auto() + 249 OUTER = auto() + 250 OVER = auto() + 251 OVERLAPS = auto() + 252 OVERWRITE = auto() + 253 PARTITION = auto() + 254 PARTITION_BY = auto() + 255 PERCENT = auto() + 256 PIVOT = auto() + 257 PLACEHOLDER = auto() + 258 PRAGMA = auto() + 259 PRIMARY_KEY = auto() + 260 PROCEDURE = auto() + 261 PROPERTIES = auto() + 262 PSEUDO_TYPE = auto() + 263 QUALIFY = auto() + 264 QUOTE = auto() + 265 RANGE = auto() + 266 RECURSIVE = auto() + 267 REPLACE = auto() + 268 RETURNING = auto() + 269 REFERENCES = auto() + 270 RIGHT = auto() + 271 RLIKE = auto() + 272 ROLLBACK = auto() + 273 ROLLUP = auto() + 274 ROW = auto() + 275 ROWS = auto() + 276 SELECT = auto() + 277 SEMI = auto() + 278 SEPARATOR = auto() + 279 SERDE_PROPERTIES = auto() + 280 SET = auto() + 281 SETTINGS = auto() + 282 SHOW = auto() + 283 SIMILAR_TO = auto() + 284 SOME = auto() + 285 STRUCT = auto() + 286 TABLE_SAMPLE = auto() + 287 TEMPORARY = auto() + 288 TOP = auto() + 289 THEN = auto() + 290 TRUE = auto() + 291 UNCACHE = auto() + 292 UNION = auto() + 293 UNNEST = auto() + 294 UNPIVOT = auto() + 295 UPDATE = auto() + 296 USE = auto() + 297 USING = auto() + 298 VALUES = auto() + 299 VIEW = auto() + 300 VOLATILE = auto() + 301 WHEN = auto() + 302 WHERE = auto() + 303 WINDOW = auto() + 304 WITH = auto() + 305 UNIQUE = auto() 306 - 307class Token: - 308 __slots__ = ("token_type", "text", "line", "col", "start", "end", "comments") - 309 - 310 @classmethod - 311 def number(cls, number: int) -> Token: - 312 """Returns a NUMBER token with `number` as its text.""" - 313 return cls(TokenType.NUMBER, str(number)) - 314 - 315 @classmethod - 316 def string(cls, string: str) -> Token: - 317 """Returns a STRING token with `string` as its text.""" - 318 return cls(TokenType.STRING, string) - 319 - 320 @classmethod - 321 def identifier(cls, identifier: str) -> Token: - 322 """Returns an IDENTIFIER token with `identifier` as its text.""" - 323 return cls(TokenType.IDENTIFIER, identifier) - 324 - 325 @classmethod - 326 def var(cls, var: str) -> Token: - 327 """Returns an VAR token with `var` as its text.""" - 328 return cls(TokenType.VAR, var) - 329 - 330 def __init__( - 331 self, - 332 token_type: TokenType, - 333 text: str, - 334 line: int = 1, - 335 col: int = 1, - 336 start: int = 0, - 337 end: int = 0, - 338 comments: t.List[str] = [], - 339 ) -> None: - 340 """Token initializer. - 341 - 342 Args: - 343 token_type: The TokenType Enum. - 344 text: The text of the token. - 345 line: The line that the token ends on. - 346 col: The column that the token ends on. - 347 start: The start index of the token. - 348 end: The ending index of the token. - 349 """ - 350 self.token_type = token_type - 351 self.text = text - 352 self.line = line - 353 self.col = col - 354 self.start = start - 355 self.end = end - 356 self.comments = comments - 357 - 358 def __repr__(self) -> str: - 359 attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__) - 360 return f"<Token {attributes}>" - 361 - 362 - 363class _Tokenizer(type): - 364 def __new__(cls, clsname, bases, attrs): - 365 klass = super().__new__(cls, clsname, bases, attrs) - 366 - 367 def _convert_quotes(arr: t.List[str | t.Tuple[str, str]]) -> t.Dict[str, str]: - 368 return dict( - 369 (item, item) if isinstance(item, str) else (item[0], item[1]) for item in arr - 370 ) - 371 - 372 def _quotes_to_format( - 373 token_type: TokenType, arr: t.List[str | t.Tuple[str, str]] - 374 ) -> t.Dict[str, t.Tuple[str, TokenType]]: - 375 return {k: (v, token_type) for k, v in _convert_quotes(arr).items()} - 376 - 377 klass._QUOTES = _convert_quotes(klass.QUOTES) - 378 klass._IDENTIFIERS = _convert_quotes(klass.IDENTIFIERS) - 379 - 380 klass._FORMAT_STRINGS = { - 381 **{ - 382 p + s: (e, TokenType.NATIONAL_STRING) - 383 for s, e in klass._QUOTES.items() - 384 for p in ("n", "N") - 385 }, - 386 **_quotes_to_format(TokenType.BIT_STRING, klass.BIT_STRINGS), - 387 **_quotes_to_format(TokenType.BYTE_STRING, klass.BYTE_STRINGS), - 388 **_quotes_to_format(TokenType.HEX_STRING, klass.HEX_STRINGS), - 389 **_quotes_to_format(TokenType.RAW_STRING, klass.RAW_STRINGS), - 390 } - 391 - 392 klass._STRING_ESCAPES = set(klass.STRING_ESCAPES) - 393 klass._IDENTIFIER_ESCAPES = set(klass.IDENTIFIER_ESCAPES) - 394 klass._COMMENTS = dict( - 395 (comment, None) if isinstance(comment, str) else (comment[0], comment[1]) - 396 for comment in klass.COMMENTS - 397 ) - 398 - 399 klass.KEYWORD_TRIE = new_trie( - 400 key.upper() - 401 for key in ( - 402 *klass.KEYWORDS, - 403 *klass._COMMENTS, - 404 *klass._QUOTES, - 405 *klass._FORMAT_STRINGS, - 406 ) - 407 if " " in key or any(single in key for single in klass.SINGLE_TOKENS) - 408 ) - 409 - 410 return klass - 411 - 412 - 413class Tokenizer(metaclass=_Tokenizer): - 414 SINGLE_TOKENS = { - 415 "(": TokenType.L_PAREN, - 416 ")": TokenType.R_PAREN, - 417 "[": TokenType.L_BRACKET, - 418 "]": TokenType.R_BRACKET, - 419 "{": TokenType.L_BRACE, - 420 "}": TokenType.R_BRACE, - 421 "&": TokenType.AMP, - 422 "^": TokenType.CARET, - 423 ":": TokenType.COLON, - 424 ",": TokenType.COMMA, - 425 ".": TokenType.DOT, - 426 "-": TokenType.DASH, - 427 "=": TokenType.EQ, - 428 ">": TokenType.GT, - 429 "<": TokenType.LT, - 430 "%": TokenType.MOD, - 431 "!": TokenType.NOT, - 432 "|": TokenType.PIPE, - 433 "+": TokenType.PLUS, - 434 ";": TokenType.SEMICOLON, - 435 "/": TokenType.SLASH, - 436 "\\": TokenType.BACKSLASH, - 437 "*": TokenType.STAR, - 438 "~": TokenType.TILDA, - 439 "?": TokenType.PLACEHOLDER, - 440 "@": TokenType.PARAMETER, - 441 # used for breaking a var like x'y' but nothing else - 442 # the token type doesn't matter - 443 "'": TokenType.QUOTE, - 444 "`": TokenType.IDENTIFIER, - 445 '"': TokenType.IDENTIFIER, - 446 "#": TokenType.HASH, - 447 } - 448 - 449 BIT_STRINGS: t.List[str | t.Tuple[str, str]] = [] - 450 BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = [] - 451 HEX_STRINGS: t.List[str | t.Tuple[str, str]] = [] - 452 RAW_STRINGS: t.List[str | t.Tuple[str, str]] = [] - 453 IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"'] - 454 IDENTIFIER_ESCAPES = ['"'] - 455 QUOTES: t.List[t.Tuple[str, str] | str] = ["'"] - 456 STRING_ESCAPES = ["'"] - 457 VAR_SINGLE_TOKENS: t.Set[str] = set() - 458 - 459 _COMMENTS: t.Dict[str, str] = {} - 460 _FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {} - 461 _IDENTIFIERS: t.Dict[str, str] = {} - 462 _IDENTIFIER_ESCAPES: t.Set[str] = set() - 463 _QUOTES: t.Dict[str, str] = {} - 464 _STRING_ESCAPES: t.Set[str] = set() - 465 - 466 KEYWORDS: t.Dict[t.Optional[str], TokenType] = { - 467 **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")}, - 468 **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")}, - 469 "{{+": TokenType.BLOCK_START, - 470 "{{-": TokenType.BLOCK_START, - 471 "+}}": TokenType.BLOCK_END, - 472 "-}}": TokenType.BLOCK_END, - 473 "/*+": TokenType.HINT, - 474 "==": TokenType.EQ, - 475 "::": TokenType.DCOLON, - 476 "||": TokenType.DPIPE, - 477 ">=": TokenType.GTE, - 478 "<=": TokenType.LTE, - 479 "<>": TokenType.NEQ, - 480 "!=": TokenType.NEQ, - 481 "<=>": TokenType.NULLSAFE_EQ, - 482 "->": TokenType.ARROW, - 483 "->>": TokenType.DARROW, - 484 "=>": TokenType.FARROW, - 485 "#>": TokenType.HASH_ARROW, - 486 "#>>": TokenType.DHASH_ARROW, - 487 "<->": TokenType.LR_ARROW, - 488 "&&": TokenType.DAMP, - 489 "ALL": TokenType.ALL, - 490 "ALWAYS": TokenType.ALWAYS, - 491 "AND": TokenType.AND, - 492 "ANTI": TokenType.ANTI, - 493 "ANY": TokenType.ANY, - 494 "ASC": TokenType.ASC, - 495 "AS": TokenType.ALIAS, - 496 "ASOF": TokenType.ASOF, - 497 "AUTOINCREMENT": TokenType.AUTO_INCREMENT, - 498 "AUTO_INCREMENT": TokenType.AUTO_INCREMENT, - 499 "BEGIN": TokenType.BEGIN, - 500 "BETWEEN": TokenType.BETWEEN, - 501 "CACHE": TokenType.CACHE, - 502 "UNCACHE": TokenType.UNCACHE, - 503 "CASE": TokenType.CASE, - 504 "CHARACTER SET": TokenType.CHARACTER_SET, - 505 "COLLATE": TokenType.COLLATE, - 506 "COLUMN": TokenType.COLUMN, - 507 "COMMIT": TokenType.COMMIT, - 508 "CONSTRAINT": TokenType.CONSTRAINT, - 509 "CREATE": TokenType.CREATE, - 510 "CROSS": TokenType.CROSS, - 511 "CUBE": TokenType.CUBE, - 512 "CURRENT_DATE": TokenType.CURRENT_DATE, - 513 "CURRENT_TIME": TokenType.CURRENT_TIME, - 514 "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP, - 515 "CURRENT_USER": TokenType.CURRENT_USER, - 516 "DATABASE": TokenType.DATABASE, - 517 "DEFAULT": TokenType.DEFAULT, - 518 "DELETE": TokenType.DELETE, - 519 "DESC": TokenType.DESC, - 520 "DESCRIBE": TokenType.DESCRIBE, - 521 "DISTINCT": TokenType.DISTINCT, - 522 "DIV": TokenType.DIV, - 523 "DROP": TokenType.DROP, - 524 "ELSE": TokenType.ELSE, - 525 "END": TokenType.END, - 526 "ESCAPE": TokenType.ESCAPE, - 527 "EXCEPT": TokenType.EXCEPT, - 528 "EXECUTE": TokenType.EXECUTE, - 529 "EXISTS": TokenType.EXISTS, - 530 "FALSE": TokenType.FALSE, - 531 "FETCH": TokenType.FETCH, - 532 "FILTER": TokenType.FILTER, - 533 "FIRST": TokenType.FIRST, - 534 "FULL": TokenType.FULL, - 535 "FUNCTION": TokenType.FUNCTION, - 536 "FOR": TokenType.FOR, - 537 "FOREIGN KEY": TokenType.FOREIGN_KEY, - 538 "FORMAT": TokenType.FORMAT, - 539 "FROM": TokenType.FROM, - 540 "GEOGRAPHY": TokenType.GEOGRAPHY, - 541 "GEOMETRY": TokenType.GEOMETRY, - 542 "GLOB": TokenType.GLOB, - 543 "GROUP BY": TokenType.GROUP_BY, - 544 "GROUPING SETS": TokenType.GROUPING_SETS, - 545 "HAVING": TokenType.HAVING, - 546 "IF": TokenType.IF, - 547 "ILIKE": TokenType.ILIKE, - 548 "IN": TokenType.IN, - 549 "INDEX": TokenType.INDEX, - 550 "INET": TokenType.INET, - 551 "INNER": TokenType.INNER, - 552 "INSERT": TokenType.INSERT, - 553 "INTERVAL": TokenType.INTERVAL, - 554 "INTERSECT": TokenType.INTERSECT, - 555 "INTO": TokenType.INTO, - 556 "IS": TokenType.IS, - 557 "ISNULL": TokenType.ISNULL, - 558 "JOIN": TokenType.JOIN, - 559 "KEEP": TokenType.KEEP, - 560 "LATERAL": TokenType.LATERAL, - 561 "LEFT": TokenType.LEFT, - 562 "LIKE": TokenType.LIKE, - 563 "LIMIT": TokenType.LIMIT, - 564 "LOAD": TokenType.LOAD, - 565 "LOCK": TokenType.LOCK, - 566 "MERGE": TokenType.MERGE, - 567 "NATURAL": TokenType.NATURAL, - 568 "NEXT": TokenType.NEXT, - 569 "NEXT VALUE FOR": TokenType.NEXT_VALUE_FOR, - 570 "NOT": TokenType.NOT, - 571 "NOTNULL": TokenType.NOTNULL, - 572 "NULL": TokenType.NULL, - 573 "OBJECT": TokenType.OBJECT, - 574 "OFFSET": TokenType.OFFSET, - 575 "ON": TokenType.ON, - 576 "OR": TokenType.OR, - 577 "ORDER BY": TokenType.ORDER_BY, - 578 "ORDINALITY": TokenType.ORDINALITY, - 579 "OUTER": TokenType.OUTER, - 580 "OVER": TokenType.OVER, - 581 "OVERLAPS": TokenType.OVERLAPS, - 582 "OVERWRITE": TokenType.OVERWRITE, - 583 "PARTITION": TokenType.PARTITION, - 584 "PARTITION BY": TokenType.PARTITION_BY, - 585 "PARTITIONED BY": TokenType.PARTITION_BY, - 586 "PARTITIONED_BY": TokenType.PARTITION_BY, - 587 "PERCENT": TokenType.PERCENT, - 588 "PIVOT": TokenType.PIVOT, - 589 "PRAGMA": TokenType.PRAGMA, - 590 "PRIMARY KEY": TokenType.PRIMARY_KEY, - 591 "PROCEDURE": TokenType.PROCEDURE, - 592 "QUALIFY": TokenType.QUALIFY, - 593 "RANGE": TokenType.RANGE, - 594 "RECURSIVE": TokenType.RECURSIVE, - 595 "REGEXP": TokenType.RLIKE, - 596 "REPLACE": TokenType.REPLACE, - 597 "REFERENCES": TokenType.REFERENCES, - 598 "RIGHT": TokenType.RIGHT, - 599 "RLIKE": TokenType.RLIKE, - 600 "ROLLBACK": TokenType.ROLLBACK, - 601 "ROLLUP": TokenType.ROLLUP, - 602 "ROW": TokenType.ROW, - 603 "ROWS": TokenType.ROWS, - 604 "SCHEMA": TokenType.SCHEMA, - 605 "SELECT": TokenType.SELECT, - 606 "SEMI": TokenType.SEMI, - 607 "SET": TokenType.SET, - 608 "SETTINGS": TokenType.SETTINGS, - 609 "SHOW": TokenType.SHOW, - 610 "SIMILAR TO": TokenType.SIMILAR_TO, - 611 "SOME": TokenType.SOME, - 612 "TABLE": TokenType.TABLE, - 613 "TABLESAMPLE": TokenType.TABLE_SAMPLE, - 614 "TEMP": TokenType.TEMPORARY, - 615 "TEMPORARY": TokenType.TEMPORARY, - 616 "THEN": TokenType.THEN, - 617 "TRUE": TokenType.TRUE, - 618 "UNION": TokenType.UNION, - 619 "UNNEST": TokenType.UNNEST, - 620 "UNPIVOT": TokenType.UNPIVOT, - 621 "UPDATE": TokenType.UPDATE, - 622 "USE": TokenType.USE, - 623 "USING": TokenType.USING, - 624 "UUID": TokenType.UUID, - 625 "VALUES": TokenType.VALUES, - 626 "VIEW": TokenType.VIEW, - 627 "VOLATILE": TokenType.VOLATILE, - 628 "WHEN": TokenType.WHEN, - 629 "WHERE": TokenType.WHERE, - 630 "WINDOW": TokenType.WINDOW, - 631 "WITH": TokenType.WITH, - 632 "APPLY": TokenType.APPLY, - 633 "ARRAY": TokenType.ARRAY, - 634 "BIT": TokenType.BIT, - 635 "BOOL": TokenType.BOOLEAN, - 636 "BOOLEAN": TokenType.BOOLEAN, - 637 "BYTE": TokenType.TINYINT, - 638 "TINYINT": TokenType.TINYINT, - 639 "SHORT": TokenType.SMALLINT, - 640 "SMALLINT": TokenType.SMALLINT, - 641 "INT2": TokenType.SMALLINT, - 642 "INTEGER": TokenType.INT, - 643 "INT": TokenType.INT, - 644 "INT4": TokenType.INT, - 645 "LONG": TokenType.BIGINT, - 646 "BIGINT": TokenType.BIGINT, - 647 "INT8": TokenType.BIGINT, - 648 "DEC": TokenType.DECIMAL, - 649 "DECIMAL": TokenType.DECIMAL, - 650 "BIGDECIMAL": TokenType.BIGDECIMAL, - 651 "BIGNUMERIC": TokenType.BIGDECIMAL, - 652 "MAP": TokenType.MAP, - 653 "NULLABLE": TokenType.NULLABLE, - 654 "NUMBER": TokenType.DECIMAL, - 655 "NUMERIC": TokenType.DECIMAL, - 656 "FIXED": TokenType.DECIMAL, - 657 "REAL": TokenType.FLOAT, - 658 "FLOAT": TokenType.FLOAT, - 659 "FLOAT4": TokenType.FLOAT, - 660 "FLOAT8": TokenType.DOUBLE, - 661 "DOUBLE": TokenType.DOUBLE, - 662 "DOUBLE PRECISION": TokenType.DOUBLE, - 663 "JSON": TokenType.JSON, - 664 "CHAR": TokenType.CHAR, - 665 "CHARACTER": TokenType.CHAR, - 666 "NCHAR": TokenType.NCHAR, - 667 "VARCHAR": TokenType.VARCHAR, - 668 "VARCHAR2": TokenType.VARCHAR, - 669 "NVARCHAR": TokenType.NVARCHAR, - 670 "NVARCHAR2": TokenType.NVARCHAR, - 671 "STR": TokenType.TEXT, - 672 "STRING": TokenType.TEXT, - 673 "TEXT": TokenType.TEXT, - 674 "CLOB": TokenType.TEXT, - 675 "LONGVARCHAR": TokenType.TEXT, - 676 "BINARY": TokenType.BINARY, - 677 "BLOB": TokenType.VARBINARY, - 678 "BYTEA": TokenType.VARBINARY, - 679 "VARBINARY": TokenType.VARBINARY, - 680 "TIME": TokenType.TIME, - 681 "TIMESTAMP": TokenType.TIMESTAMP, - 682 "TIMESTAMPTZ": TokenType.TIMESTAMPTZ, - 683 "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ, - 684 "DATE": TokenType.DATE, - 685 "DATETIME": TokenType.DATETIME, - 686 "INT4RANGE": TokenType.INT4RANGE, - 687 "INT4MULTIRANGE": TokenType.INT4MULTIRANGE, - 688 "INT8RANGE": TokenType.INT8RANGE, - 689 "INT8MULTIRANGE": TokenType.INT8MULTIRANGE, - 690 "NUMRANGE": TokenType.NUMRANGE, - 691 "NUMMULTIRANGE": TokenType.NUMMULTIRANGE, - 692 "TSRANGE": TokenType.TSRANGE, - 693 "TSMULTIRANGE": TokenType.TSMULTIRANGE, - 694 "TSTZRANGE": TokenType.TSTZRANGE, - 695 "TSTZMULTIRANGE": TokenType.TSTZMULTIRANGE, - 696 "DATERANGE": TokenType.DATERANGE, - 697 "DATEMULTIRANGE": TokenType.DATEMULTIRANGE, - 698 "UNIQUE": TokenType.UNIQUE, - 699 "STRUCT": TokenType.STRUCT, - 700 "VARIANT": TokenType.VARIANT, - 701 "ALTER": TokenType.ALTER, - 702 "ANALYZE": TokenType.COMMAND, - 703 "CALL": TokenType.COMMAND, - 704 "COMMENT": TokenType.COMMENT, - 705 "COPY": TokenType.COMMAND, - 706 "EXPLAIN": TokenType.COMMAND, - 707 "GRANT": TokenType.COMMAND, - 708 "OPTIMIZE": TokenType.COMMAND, - 709 "PREPARE": TokenType.COMMAND, - 710 "TRUNCATE": TokenType.COMMAND, - 711 "VACUUM": TokenType.COMMAND, - 712 } - 713 - 714 WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = { - 715 " ": TokenType.SPACE, - 716 "\t": TokenType.SPACE, - 717 "\n": TokenType.BREAK, - 718 "\r": TokenType.BREAK, - 719 "\r\n": TokenType.BREAK, + 307 + 308class Token: + 309 __slots__ = ("token_type", "text", "line", "col", "start", "end", "comments") + 310 + 311 @classmethod + 312 def number(cls, number: int) -> Token: + 313 """Returns a NUMBER token with `number` as its text.""" + 314 return cls(TokenType.NUMBER, str(number)) + 315 + 316 @classmethod + 317 def string(cls, string: str) -> Token: + 318 """Returns a STRING token with `string` as its text.""" + 319 return cls(TokenType.STRING, string) + 320 + 321 @classmethod + 322 def identifier(cls, identifier: str) -> Token: + 323 """Returns an IDENTIFIER token with `identifier` as its text.""" + 324 return cls(TokenType.IDENTIFIER, identifier) + 325 + 326 @classmethod + 327 def var(cls, var: str) -> Token: + 328 """Returns an VAR token with `var` as its text.""" + 329 return cls(TokenType.VAR, var) + 330 + 331 def __init__( + 332 self, + 333 token_type: TokenType, + 334 text: str, + 335 line: int = 1, + 336 col: int = 1, + 337 start: int = 0, + 338 end: int = 0, + 339 comments: t.List[str] = [], + 340 ) -> None: + 341 """Token initializer. + 342 + 343 Args: + 344 token_type: The TokenType Enum. + 345 text: The text of the token. + 346 line: The line that the token ends on. + 347 col: The column that the token ends on. + 348 start: The start index of the token. + 349 end: The ending index of the token. + 350 comments: The comments to attach to the token. + 351 """ + 352 self.token_type = token_type + 353 self.text = text + 354 self.line = line + 355 self.col = col + 356 self.start = start + 357 self.end = end + 358 self.comments = comments + 359 + 360 def __repr__(self) -> str: + 361 attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__) + 362 return f"<Token {attributes}>" + 363 + 364 + 365class _Tokenizer(type): + 366 def __new__(cls, clsname, bases, attrs): + 367 klass = super().__new__(cls, clsname, bases, attrs) + 368 + 369 def _convert_quotes(arr: t.List[str | t.Tuple[str, str]]) -> t.Dict[str, str]: + 370 return dict( + 371 (item, item) if isinstance(item, str) else (item[0], item[1]) for item in arr + 372 ) + 373 + 374 def _quotes_to_format( + 375 token_type: TokenType, arr: t.List[str | t.Tuple[str, str]] + 376 ) -> t.Dict[str, t.Tuple[str, TokenType]]: + 377 return {k: (v, token_type) for k, v in _convert_quotes(arr).items()} + 378 + 379 klass._QUOTES = _convert_quotes(klass.QUOTES) + 380 klass._IDENTIFIERS = _convert_quotes(klass.IDENTIFIERS) + 381 + 382 klass._FORMAT_STRINGS = { + 383 **{ + 384 p + s: (e, TokenType.NATIONAL_STRING) + 385 for s, e in klass._QUOTES.items() + 386 for p in ("n", "N") + 387 }, + 388 **_quotes_to_format(TokenType.BIT_STRING, klass.BIT_STRINGS), + 389 **_quotes_to_format(TokenType.BYTE_STRING, klass.BYTE_STRINGS), + 390 **_quotes_to_format(TokenType.HEX_STRING, klass.HEX_STRINGS), + 391 **_quotes_to_format(TokenType.RAW_STRING, klass.RAW_STRINGS), + 392 } + 393 + 394 klass._STRING_ESCAPES = set(klass.STRING_ESCAPES) + 395 klass._IDENTIFIER_ESCAPES = set(klass.IDENTIFIER_ESCAPES) + 396 klass._COMMENTS = { + 397 **dict( + 398 (comment, None) if isinstance(comment, str) else (comment[0], comment[1]) + 399 for comment in klass.COMMENTS + 400 ), + 401 "{#": "#}", # Ensure Jinja comments are tokenized correctly in all dialects + 402 } + 403 + 404 klass._KEYWORD_TRIE = new_trie( + 405 key.upper() + 406 for key in ( + 407 *klass.KEYWORDS, + 408 *klass._COMMENTS, + 409 *klass._QUOTES, + 410 *klass._FORMAT_STRINGS, + 411 ) + 412 if " " in key or any(single in key for single in klass.SINGLE_TOKENS) + 413 ) + 414 + 415 return klass + 416 + 417 + 418class Tokenizer(metaclass=_Tokenizer): + 419 SINGLE_TOKENS = { + 420 "(": TokenType.L_PAREN, + 421 ")": TokenType.R_PAREN, + 422 "[": TokenType.L_BRACKET, + 423 "]": TokenType.R_BRACKET, + 424 "{": TokenType.L_BRACE, + 425 "}": TokenType.R_BRACE, + 426 "&": TokenType.AMP, + 427 "^": TokenType.CARET, + 428 ":": TokenType.COLON, + 429 ",": TokenType.COMMA, + 430 ".": TokenType.DOT, + 431 "-": TokenType.DASH, + 432 "=": TokenType.EQ, + 433 ">": TokenType.GT, + 434 "<": TokenType.LT, + 435 "%": TokenType.MOD, + 436 "!": TokenType.NOT, + 437 "|": TokenType.PIPE, + 438 "+": TokenType.PLUS, + 439 ";": TokenType.SEMICOLON, + 440 "/": TokenType.SLASH, + 441 "\\": TokenType.BACKSLASH, + 442 "*": TokenType.STAR, + 443 "~": TokenType.TILDA, + 444 "?": TokenType.PLACEHOLDER, + 445 "@": TokenType.PARAMETER, + 446 # used for breaking a var like x'y' but nothing else + 447 # the token type doesn't matter + 448 "'": TokenType.QUOTE, + 449 "`": TokenType.IDENTIFIER, + 450 '"': TokenType.IDENTIFIER, + 451 "#": TokenType.HASH, + 452 } + 453 + 454 BIT_STRINGS: t.List[str | t.Tuple[str, str]] = [] + 455 BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = [] + 456 HEX_STRINGS: t.List[str | t.Tuple[str, str]] = [] + 457 RAW_STRINGS: t.List[str | t.Tuple[str, str]] = [] + 458 IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"'] + 459 IDENTIFIER_ESCAPES = ['"'] + 460 QUOTES: t.List[t.Tuple[str, str] | str] = ["'"] + 461 STRING_ESCAPES = ["'"] + 462 VAR_SINGLE_TOKENS: t.Set[str] = set() + 463 + 464 # Autofilled + 465 IDENTIFIERS_CAN_START_WITH_DIGIT: bool = False + 466 + 467 _COMMENTS: t.Dict[str, str] = {} + 468 _FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {} + 469 _IDENTIFIERS: t.Dict[str, str] = {} + 470 _IDENTIFIER_ESCAPES: t.Set[str] = set() + 471 _QUOTES: t.Dict[str, str] = {} + 472 _STRING_ESCAPES: t.Set[str] = set() + 473 _KEYWORD_TRIE: t.Dict = {} + 474 + 475 KEYWORDS: t.Dict[str, TokenType] = { + 476 **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")}, + 477 **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")}, + 478 **{f"{{{{{postfix}": TokenType.BLOCK_START for postfix in ("+", "-")}, + 479 **{f"{prefix}}}}}": TokenType.BLOCK_END for prefix in ("+", "-")}, + 480 "/*+": TokenType.HINT, + 481 "==": TokenType.EQ, + 482 "::": TokenType.DCOLON, + 483 "||": TokenType.DPIPE, + 484 ">=": TokenType.GTE, + 485 "<=": TokenType.LTE, + 486 "<>": TokenType.NEQ, + 487 "!=": TokenType.NEQ, + 488 "<=>": TokenType.NULLSAFE_EQ, + 489 "->": TokenType.ARROW, + 490 "->>": TokenType.DARROW, + 491 "=>": TokenType.FARROW, + 492 "#>": TokenType.HASH_ARROW, + 493 "#>>": TokenType.DHASH_ARROW, + 494 "<->": TokenType.LR_ARROW, + 495 "&&": TokenType.DAMP, + 496 "ALL": TokenType.ALL, + 497 "ALWAYS": TokenType.ALWAYS, + 498 "AND": TokenType.AND, + 499 "ANTI": TokenType.ANTI, + 500 "ANY": TokenType.ANY, + 501 "ASC": TokenType.ASC, + 502 "AS": TokenType.ALIAS, + 503 "ASOF": TokenType.ASOF, + 504 "AUTOINCREMENT": TokenType.AUTO_INCREMENT, + 505 "AUTO_INCREMENT": TokenType.AUTO_INCREMENT, + 506 "BEGIN": TokenType.BEGIN, + 507 "BETWEEN": TokenType.BETWEEN, + 508 "CACHE": TokenType.CACHE, + 509 "UNCACHE": TokenType.UNCACHE, + 510 "CASE": TokenType.CASE, + 511 "CHARACTER SET": TokenType.CHARACTER_SET, + 512 "COLLATE": TokenType.COLLATE, + 513 "COLUMN": TokenType.COLUMN, + 514 "COMMIT": TokenType.COMMIT, + 515 "CONSTRAINT": TokenType.CONSTRAINT, + 516 "CREATE": TokenType.CREATE, + 517 "CROSS": TokenType.CROSS, + 518 "CUBE": TokenType.CUBE, + 519 "CURRENT_DATE": TokenType.CURRENT_DATE, + 520 "CURRENT_TIME": TokenType.CURRENT_TIME, + 521 "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP, + 522 "CURRENT_USER": TokenType.CURRENT_USER, + 523 "DATABASE": TokenType.DATABASE, + 524 "DEFAULT": TokenType.DEFAULT, + 525 "DELETE": TokenType.DELETE, + 526 "DESC": TokenType.DESC, + 527 "DESCRIBE": TokenType.DESCRIBE, + 528 "DISTINCT": TokenType.DISTINCT, + 529 "DIV": TokenType.DIV, + 530 "DROP": TokenType.DROP, + 531 "ELSE": TokenType.ELSE, + 532 "END": TokenType.END, + 533 "ESCAPE": TokenType.ESCAPE, + 534 "EXCEPT": TokenType.EXCEPT, + 535 "EXECUTE": TokenType.EXECUTE, + 536 "EXISTS": TokenType.EXISTS, + 537 "FALSE": TokenType.FALSE, + 538 "FETCH": TokenType.FETCH, + 539 "FILTER": TokenType.FILTER, + 540 "FIRST": TokenType.FIRST, + 541 "FULL": TokenType.FULL, + 542 "FUNCTION": TokenType.FUNCTION, + 543 "FOR": TokenType.FOR, + 544 "FOREIGN KEY": TokenType.FOREIGN_KEY, + 545 "FORMAT": TokenType.FORMAT, + 546 "FROM": TokenType.FROM, + 547 "GEOGRAPHY": TokenType.GEOGRAPHY, + 548 "GEOMETRY": TokenType.GEOMETRY, + 549 "GLOB": TokenType.GLOB, + 550 "GROUP BY": TokenType.GROUP_BY, + 551 "GROUPING SETS": TokenType.GROUPING_SETS, + 552 "HAVING": TokenType.HAVING, + 553 "IF": TokenType.IF, + 554 "ILIKE": TokenType.ILIKE, + 555 "IN": TokenType.IN, + 556 "INDEX": TokenType.INDEX, + 557 "INET": TokenType.INET, + 558 "INNER": TokenType.INNER, + 559 "INSERT": TokenType.INSERT, + 560 "INTERVAL": TokenType.INTERVAL, + 561 "INTERSECT": TokenType.INTERSECT, + 562 "INTO": TokenType.INTO, + 563 "IS": TokenType.IS, + 564 "ISNULL": TokenType.ISNULL, + 565 "JOIN": TokenType.JOIN, + 566 "KEEP": TokenType.KEEP, + 567 "LATERAL": TokenType.LATERAL, + 568 "LEFT": TokenType.LEFT, + 569 "LIKE": TokenType.LIKE, + 570 "LIMIT": TokenType.LIMIT, + 571 "LOAD": TokenType.LOAD, + 572 "LOCK": TokenType.LOCK, + 573 "MERGE": TokenType.MERGE, + 574 "NATURAL": TokenType.NATURAL, + 575 "NEXT": TokenType.NEXT, + 576 "NEXT VALUE FOR": TokenType.NEXT_VALUE_FOR, + 577 "NOT": TokenType.NOT, + 578 "NOTNULL": TokenType.NOTNULL, + 579 "NULL": TokenType.NULL, + 580 "OBJECT": TokenType.OBJECT, + 581 "OFFSET": TokenType.OFFSET, + 582 "ON": TokenType.ON, + 583 "OR": TokenType.OR, + 584 "ORDER BY": TokenType.ORDER_BY, + 585 "ORDINALITY": TokenType.ORDINALITY, + 586 "OUTER": TokenType.OUTER, + 587 "OVER": TokenType.OVER, + 588 "OVERLAPS": TokenType.OVERLAPS, + 589 "OVERWRITE": TokenType.OVERWRITE, + 590 "PARTITION": TokenType.PARTITION, + 591 "PARTITION BY": TokenType.PARTITION_BY, + 592 "PARTITIONED BY": TokenType.PARTITION_BY, + 593 "PARTITIONED_BY": TokenType.PARTITION_BY, + 594 "PERCENT": TokenType.PERCENT, + 595 "PIVOT": TokenType.PIVOT, + 596 "PRAGMA": TokenType.PRAGMA, + 597 "PRIMARY KEY": TokenType.PRIMARY_KEY, + 598 "PROCEDURE": TokenType.PROCEDURE, + 599 "QUALIFY": TokenType.QUALIFY, + 600 "RANGE": TokenType.RANGE, + 601 "RECURSIVE": TokenType.RECURSIVE, + 602 "REGEXP": TokenType.RLIKE, + 603 "REPLACE": TokenType.REPLACE, + 604 "RETURNING": TokenType.RETURNING, + 605 "REFERENCES": TokenType.REFERENCES, + 606 "RIGHT": TokenType.RIGHT, + 607 "RLIKE": TokenType.RLIKE, + 608 "ROLLBACK": TokenType.ROLLBACK, + 609 "ROLLUP": TokenType.ROLLUP, + 610 "ROW": TokenType.ROW, + 611 "ROWS": TokenType.ROWS, + 612 "SCHEMA": TokenType.SCHEMA, + 613 "SELECT": TokenType.SELECT, + 614 "SEMI": TokenType.SEMI, + 615 "SET": TokenType.SET, + 616 "SETTINGS": TokenType.SETTINGS, + 617 "SHOW": TokenType.SHOW, + 618 "SIMILAR TO": TokenType.SIMILAR_TO, + 619 "SOME": TokenType.SOME, + 620 "TABLE": TokenType.TABLE, + 621 "TABLESAMPLE": TokenType.TABLE_SAMPLE, + 622 "TEMP": TokenType.TEMPORARY, + 623 "TEMPORARY": TokenType.TEMPORARY, + 624 "THEN": TokenType.THEN, + 625 "TRUE": TokenType.TRUE, + 626 "UNION": TokenType.UNION, + 627 "UNNEST": TokenType.UNNEST, + 628 "UNPIVOT": TokenType.UNPIVOT, + 629 "UPDATE": TokenType.UPDATE, + 630 "USE": TokenType.USE, + 631 "USING": TokenType.USING, + 632 "UUID": TokenType.UUID, + 633 "VALUES": TokenType.VALUES, + 634 "VIEW": TokenType.VIEW, + 635 "VOLATILE": TokenType.VOLATILE, + 636 "WHEN": TokenType.WHEN, + 637 "WHERE": TokenType.WHERE, + 638 "WINDOW": TokenType.WINDOW, + 639 "WITH": TokenType.WITH, + 640 "APPLY": TokenType.APPLY, + 641 "ARRAY": TokenType.ARRAY, + 642 "BIT": TokenType.BIT, + 643 "BOOL": TokenType.BOOLEAN, + 644 "BOOLEAN": TokenType.BOOLEAN, + 645 "BYTE": TokenType.TINYINT, + 646 "TINYINT": TokenType.TINYINT, + 647 "SHORT": TokenType.SMALLINT, + 648 "SMALLINT": TokenType.SMALLINT, + 649 "INT2": TokenType.SMALLINT, + 650 "INTEGER": TokenType.INT, + 651 "INT": TokenType.INT, + 652 "INT4": TokenType.INT, + 653 "LONG": TokenType.BIGINT, + 654 "BIGINT": TokenType.BIGINT, + 655 "INT8": TokenType.BIGINT, + 656 "DEC": TokenType.DECIMAL, + 657 "DECIMAL": TokenType.DECIMAL, + 658 "BIGDECIMAL": TokenType.BIGDECIMAL, + 659 "BIGNUMERIC": TokenType.BIGDECIMAL, + 660 "MAP": TokenType.MAP, + 661 "NULLABLE": TokenType.NULLABLE, + 662 "NUMBER": TokenType.DECIMAL, + 663 "NUMERIC": TokenType.DECIMAL, + 664 "FIXED": TokenType.DECIMAL, + 665 "REAL": TokenType.FLOAT, + 666 "FLOAT": TokenType.FLOAT, + 667 "FLOAT4": TokenType.FLOAT, + 668 "FLOAT8": TokenType.DOUBLE, + 669 "DOUBLE": TokenType.DOUBLE, + 670 "DOUBLE PRECISION": TokenType.DOUBLE, + 671 "JSON": TokenType.JSON, + 672 "CHAR": TokenType.CHAR, + 673 "CHARACTER": TokenType.CHAR, + 674 "NCHAR": TokenType.NCHAR, + 675 "VARCHAR": TokenType.VARCHAR, + 676 "VARCHAR2": TokenType.VARCHAR, + 677 "NVARCHAR": TokenType.NVARCHAR, + 678 "NVARCHAR2": TokenType.NVARCHAR, + 679 "STR": TokenType.TEXT, + 680 "STRING": TokenType.TEXT, + 681 "TEXT": TokenType.TEXT, + 682 "CLOB": TokenType.TEXT, + 683 "LONGVARCHAR": TokenType.TEXT, + 684 "BINARY": TokenType.BINARY, + 685 "BLOB": TokenType.VARBINARY, + 686 "BYTEA": TokenType.VARBINARY, + 687 "VARBINARY": TokenType.VARBINARY, + 688 "TIME": TokenType.TIME, + 689 "TIMESTAMP": TokenType.TIMESTAMP, + 690 "TIMESTAMPTZ": TokenType.TIMESTAMPTZ, + 691 "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ, + 692 "DATE": TokenType.DATE, + 693 "DATETIME": TokenType.DATETIME, + 694 "INT4RANGE": TokenType.INT4RANGE, + 695 "INT4MULTIRANGE": TokenType.INT4MULTIRANGE, + 696 "INT8RANGE": TokenType.INT8RANGE, + 697 "INT8MULTIRANGE": TokenType.INT8MULTIRANGE, + 698 "NUMRANGE": TokenType.NUMRANGE, + 699 "NUMMULTIRANGE": TokenType.NUMMULTIRANGE, + 700 "TSRANGE": TokenType.TSRANGE, + 701 "TSMULTIRANGE": TokenType.TSMULTIRANGE, + 702 "TSTZRANGE": TokenType.TSTZRANGE, + 703 "TSTZMULTIRANGE": TokenType.TSTZMULTIRANGE, + 704 "DATERANGE": TokenType.DATERANGE, + 705 "DATEMULTIRANGE": TokenType.DATEMULTIRANGE, + 706 "UNIQUE": TokenType.UNIQUE, + 707 "STRUCT": TokenType.STRUCT, + 708 "VARIANT": TokenType.VARIANT, + 709 "ALTER": TokenType.ALTER, + 710 "ANALYZE": TokenType.COMMAND, + 711 "CALL": TokenType.COMMAND, + 712 "COMMENT": TokenType.COMMENT, + 713 "COPY": TokenType.COMMAND, + 714 "EXPLAIN": TokenType.COMMAND, + 715 "GRANT": TokenType.COMMAND, + 716 "OPTIMIZE": TokenType.COMMAND, + 717 "PREPARE": TokenType.COMMAND, + 718 "TRUNCATE": TokenType.COMMAND, + 719 "VACUUM": TokenType.COMMAND, 720 } 721 - 722 COMMANDS = { - 723 TokenType.COMMAND, - 724 TokenType.EXECUTE, - 725 TokenType.FETCH, - 726 TokenType.SHOW, - 727 } - 728 - 729 COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN} - 730 - 731 # handle numeric literals like in hive (3L = BIGINT) - 732 NUMERIC_LITERALS: t.Dict[str, str] = {} - 733 ENCODE: t.Optional[str] = None - 734 - 735 COMMENTS = ["--", ("/*", "*/"), ("{#", "#}")] - 736 KEYWORD_TRIE: t.Dict = {} # autofilled - 737 - 738 __slots__ = ( - 739 "sql", - 740 "size", - 741 "tokens", - 742 "_start", - 743 "_current", - 744 "_line", - 745 "_col", - 746 "_comments", - 747 "_char", - 748 "_end", - 749 "_peek", - 750 "_prev_token_line", - 751 "identifiers_can_start_with_digit", - 752 ) - 753 - 754 def __init__(self) -> None: - 755 self.reset() - 756 - 757 def reset(self) -> None: - 758 self.sql = "" - 759 self.size = 0 - 760 self.tokens: t.List[Token] = [] - 761 self._start = 0 - 762 self._current = 0 - 763 self._line = 1 - 764 self._col = 0 - 765 self._comments: t.List[str] = [] - 766 - 767 self._char = "" - 768 self._end = False - 769 self._peek = "" - 770 self._prev_token_line = -1 - 771 - 772 def tokenize(self, sql: str) -> t.List[Token]: - 773 """Returns a list of tokens corresponding to the SQL string `sql`.""" - 774 self.reset() - 775 self.sql = sql - 776 self.size = len(sql) + 722 WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = { + 723 " ": TokenType.SPACE, + 724 "\t": TokenType.SPACE, + 725 "\n": TokenType.BREAK, + 726 "\r": TokenType.BREAK, + 727 "\r\n": TokenType.BREAK, + 728 } + 729 + 730 COMMANDS = { + 731 TokenType.COMMAND, + 732 TokenType.EXECUTE, + 733 TokenType.FETCH, + 734 TokenType.SHOW, + 735 } + 736 + 737 COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN} + 738 + 739 # handle numeric literals like in hive (3L = BIGINT) + 740 NUMERIC_LITERALS: t.Dict[str, str] = {} + 741 ENCODE: t.Optional[str] = None + 742 + 743 COMMENTS = ["--", ("/*", "*/")] + 744 + 745 __slots__ = ( + 746 "sql", + 747 "size", + 748 "tokens", + 749 "_start", + 750 "_current", + 751 "_line", + 752 "_col", + 753 "_comments", + 754 "_char", + 755 "_end", + 756 "_peek", + 757 "_prev_token_line", + 758 ) + 759 + 760 def __init__(self) -> None: + 761 self.reset() + 762 + 763 def reset(self) -> None: + 764 self.sql = "" + 765 self.size = 0 + 766 self.tokens: t.List[Token] = [] + 767 self._start = 0 + 768 self._current = 0 + 769 self._line = 1 + 770 self._col = 0 + 771 self._comments: t.List[str] = [] + 772 + 773 self._char = "" + 774 self._end = False + 775 self._peek = "" + 776 self._prev_token_line = -1 777 - 778 try: - 779 self._scan() - 780 except Exception as e: - 781 start = max(self._current - 50, 0) - 782 end = min(self._current + 50, self.size - 1) - 783 context = self.sql[start:end] - 784 raise ValueError(f"Error tokenizing '{context}'") from e - 785 - 786 return self.tokens - 787 - 788 def _scan(self, until: t.Optional[t.Callable] = None) -> None: - 789 while self.size and not self._end: - 790 self._start = self._current - 791 self._advance() - 792 - 793 if self._char is None: - 794 break - 795 - 796 if self._char not in self.WHITE_SPACE: - 797 if self._char.isdigit(): - 798 self._scan_number() - 799 elif self._char in self._IDENTIFIERS: - 800 self._scan_identifier(self._IDENTIFIERS[self._char]) - 801 else: - 802 self._scan_keywords() - 803 - 804 if until and until(): - 805 break - 806 - 807 if self.tokens and self._comments: - 808 self.tokens[-1].comments.extend(self._comments) + 778 def tokenize(self, sql: str) -> t.List[Token]: + 779 """Returns a list of tokens corresponding to the SQL string `sql`.""" + 780 self.reset() + 781 self.sql = sql + 782 self.size = len(sql) + 783 + 784 try: + 785 self._scan() + 786 except Exception as e: + 787 start = max(self._current - 50, 0) + 788 end = min(self._current + 50, self.size - 1) + 789 context = self.sql[start:end] + 790 raise ValueError(f"Error tokenizing '{context}'") from e + 791 + 792 return self.tokens + 793 + 794 def _scan(self, until: t.Optional[t.Callable] = None) -> None: + 795 while self.size and not self._end: + 796 self._start = self._current + 797 self._advance() + 798 + 799 if self._char is None: + 800 break + 801 + 802 if self._char not in self.WHITE_SPACE: + 803 if self._char.isdigit(): + 804 self._scan_number() + 805 elif self._char in self._IDENTIFIERS: + 806 self._scan_identifier(self._IDENTIFIERS[self._char]) + 807 else: + 808 self._scan_keywords() 809 - 810 def _chars(self, size: int) -> str: - 811 if size == 1: - 812 return self._char - 813 - 814 start = self._current - 1 - 815 end = start + size - 816 - 817 return self.sql[start:end] if end <= self.size else "" - 818 - 819 def _advance(self, i: int = 1, alnum: bool = False) -> None: - 820 if self.WHITE_SPACE.get(self._char) is TokenType.BREAK: - 821 self._col = 1 - 822 self._line += 1 - 823 else: - 824 self._col += i - 825 - 826 self._current += i - 827 self._end = self._current >= self.size - 828 self._char = self.sql[self._current - 1] - 829 self._peek = "" if self._end else self.sql[self._current] - 830 - 831 if alnum and self._char.isalnum(): - 832 # Here we use local variables instead of attributes for better performance - 833 _col = self._col - 834 _current = self._current - 835 _end = self._end - 836 _peek = self._peek - 837 - 838 while _peek.isalnum(): - 839 _col += 1 - 840 _current += 1 - 841 _end = _current >= self.size - 842 _peek = "" if _end else self.sql[_current] + 810 if until and until(): + 811 break + 812 + 813 if self.tokens and self._comments: + 814 self.tokens[-1].comments.extend(self._comments) + 815 + 816 def _chars(self, size: int) -> str: + 817 if size == 1: + 818 return self._char + 819 + 820 start = self._current - 1 + 821 end = start + size + 822 + 823 return self.sql[start:end] if end <= self.size else "" + 824 + 825 def _advance(self, i: int = 1, alnum: bool = False) -> None: + 826 if self.WHITE_SPACE.get(self._char) is TokenType.BREAK: + 827 self._col = 1 + 828 self._line += 1 + 829 else: + 830 self._col += i + 831 + 832 self._current += i + 833 self._end = self._current >= self.size + 834 self._char = self.sql[self._current - 1] + 835 self._peek = "" if self._end else self.sql[self._current] + 836 + 837 if alnum and self._char.isalnum(): + 838 # Here we use local variables instead of attributes for better performance + 839 _col = self._col + 840 _current = self._current + 841 _end = self._end + 842 _peek = self._peek 843 - 844 self._col = _col - 845 self._current = _current - 846 self._end = _end - 847 self._peek = _peek - 848 self._char = self.sql[_current - 1] + 844 while _peek.isalnum(): + 845 _col += 1 + 846 _current += 1 + 847 _end = _current >= self.size + 848 _peek = "" if _end else self.sql[_current] 849 - 850 @property - 851 def _text(self) -> str: - 852 return self.sql[self._start : self._current] - 853 - 854 def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None: - 855 self._prev_token_line = self._line - 856 self.tokens.append( - 857 Token( - 858 token_type, - 859 text=self._text if text is None else text, - 860 line=self._line, - 861 col=self._col, - 862 start=self._start, - 863 end=self._current - 1, - 864 comments=self._comments, - 865 ) - 866 ) - 867 self._comments = [] - 868 - 869 # If we have either a semicolon or a begin token before the command's token, we'll parse - 870 # whatever follows the command's token as a string - 871 if ( - 872 token_type in self.COMMANDS - 873 and self._peek != ";" - 874 and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS) - 875 ): - 876 start = self._current - 877 tokens = len(self.tokens) - 878 self._scan(lambda: self._peek == ";") - 879 self.tokens = self.tokens[:tokens] - 880 text = self.sql[start : self._current].strip() - 881 if text: - 882 self._add(TokenType.STRING, text) - 883 - 884 def _scan_keywords(self) -> None: - 885 size = 0 - 886 word = None - 887 chars = self._text - 888 char = chars - 889 prev_space = False - 890 skip = False - 891 trie = self.KEYWORD_TRIE - 892 single_token = char in self.SINGLE_TOKENS - 893 - 894 while chars: - 895 if skip: - 896 result = 1 - 897 else: - 898 result, trie = in_trie(trie, char.upper()) - 899 - 900 if result == 0: - 901 break - 902 if result == 2: - 903 word = chars - 904 - 905 size += 1 - 906 end = self._current - 1 + size - 907 - 908 if end < self.size: - 909 char = self.sql[end] - 910 single_token = single_token or char in self.SINGLE_TOKENS - 911 is_space = char in self.WHITE_SPACE - 912 - 913 if not is_space or not prev_space: - 914 if is_space: - 915 char = " " - 916 chars += char - 917 prev_space = is_space - 918 skip = False - 919 else: - 920 skip = True - 921 else: - 922 char = "" - 923 chars = " " + 850 self._col = _col + 851 self._current = _current + 852 self._end = _end + 853 self._peek = _peek + 854 self._char = self.sql[_current - 1] + 855 + 856 @property + 857 def _text(self) -> str: + 858 return self.sql[self._start : self._current] + 859 + 860 def peek(self, i: int = 0) -> str: + 861 i = self._current + i + 862 if i < self.size: + 863 return self.sql[i] + 864 return "" + 865 + 866 def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None: + 867 self._prev_token_line = self._line + 868 self.tokens.append( + 869 Token( + 870 token_type, + 871 text=self._text if text is None else text, + 872 line=self._line, + 873 col=self._col, + 874 start=self._start, + 875 end=self._current - 1, + 876 comments=self._comments, + 877 ) + 878 ) + 879 self._comments = [] + 880 + 881 # If we have either a semicolon or a begin token before the command's token, we'll parse + 882 # whatever follows the command's token as a string + 883 if ( + 884 token_type in self.COMMANDS + 885 and self._peek != ";" + 886 and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS) + 887 ): + 888 start = self._current + 889 tokens = len(self.tokens) + 890 self._scan(lambda: self._peek == ";") + 891 self.tokens = self.tokens[:tokens] + 892 text = self.sql[start : self._current].strip() + 893 if text: + 894 self._add(TokenType.STRING, text) + 895 + 896 def _scan_keywords(self) -> None: + 897 size = 0 + 898 word = None + 899 chars = self._text + 900 char = chars + 901 prev_space = False + 902 skip = False + 903 trie = self._KEYWORD_TRIE + 904 single_token = char in self.SINGLE_TOKENS + 905 + 906 while chars: + 907 if skip: + 908 result = 1 + 909 else: + 910 result, trie = in_trie(trie, char.upper()) + 911 + 912 if result == 0: + 913 break + 914 if result == 2: + 915 word = chars + 916 + 917 size += 1 + 918 end = self._current - 1 + size + 919 + 920 if end < self.size: + 921 char = self.sql[end] + 922 single_token = single_token or char in self.SINGLE_TOKENS + 923 is_space = char in self.WHITE_SPACE 924 - 925 word = None if not single_token and chars[-1] not in self.WHITE_SPACE else word - 926 - 927 if not word: - 928 if self._char in self.SINGLE_TOKENS: - 929 self._add(self.SINGLE_TOKENS[self._char], text=self._char) - 930 return - 931 self._scan_var() - 932 return - 933 - 934 if self._scan_string(word): - 935 return - 936 if self._scan_comment(word): - 937 return + 925 if not is_space or not prev_space: + 926 if is_space: + 927 char = " " + 928 chars += char + 929 prev_space = is_space + 930 skip = False + 931 else: + 932 skip = True + 933 else: + 934 char = "" + 935 chars = " " + 936 + 937 word = None if not single_token and chars[-1] not in self.WHITE_SPACE else word 938 - 939 self._advance(size - 1) - 940 word = word.upper() - 941 self._add(self.KEYWORDS[word], text=word) - 942 - 943 def _scan_comment(self, comment_start: str) -> bool: - 944 if comment_start not in self._COMMENTS: - 945 return False - 946 - 947 comment_start_line = self._line - 948 comment_start_size = len(comment_start) - 949 comment_end = self._COMMENTS[comment_start] + 939 if not word: + 940 if self._char in self.SINGLE_TOKENS: + 941 self._add(self.SINGLE_TOKENS[self._char], text=self._char) + 942 return + 943 self._scan_var() + 944 return + 945 + 946 if self._scan_string(word): + 947 return + 948 if self._scan_comment(word): + 949 return 950 - 951 if comment_end: - 952 # Skip the comment's start delimiter - 953 self._advance(comment_start_size) + 951 self._advance(size - 1) + 952 word = word.upper() + 953 self._add(self.KEYWORDS[word], text=word) 954 - 955 comment_end_size = len(comment_end) - 956 while not self._end and self._chars(comment_end_size) != comment_end: - 957 self._advance(alnum=True) + 955 def _scan_comment(self, comment_start: str) -> bool: + 956 if comment_start not in self._COMMENTS: + 957 return False 958 - 959 self._comments.append(self._text[comment_start_size : -comment_end_size + 1]) - 960 self._advance(comment_end_size - 1) - 961 else: - 962 while not self._end and not self.WHITE_SPACE.get(self._peek) is TokenType.BREAK: - 963 self._advance(alnum=True) - 964 self._comments.append(self._text[comment_start_size:]) - 965 - 966 # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding. - 967 # Multiple consecutive comments are preserved by appending them to the current comments list. - 968 if comment_start_line == self._prev_token_line: - 969 self.tokens[-1].comments.extend(self._comments) - 970 self._comments = [] - 971 self._prev_token_line = self._line - 972 - 973 return True - 974 - 975 def _scan_number(self) -> None: - 976 if self._char == "0": - 977 peek = self._peek.upper() - 978 if peek == "B": - 979 return self._scan_bits() if self.BIT_STRINGS else self._add(TokenType.NUMBER) - 980 elif peek == "X": - 981 return self._scan_hex() if self.HEX_STRINGS else self._add(TokenType.NUMBER) - 982 - 983 decimal = False - 984 scientific = 0 - 985 - 986 while True: - 987 if self._peek.isdigit(): - 988 self._advance() - 989 elif self._peek == "." and not decimal: - 990 decimal = True - 991 self._advance() - 992 elif self._peek in ("-", "+") and scientific == 1: - 993 scientific += 1 - 994 self._advance() - 995 elif self._peek.upper() == "E" and not scientific: - 996 scientific += 1 - 997 self._advance() - 998 elif self._peek.isidentifier(): - 999 number_text = self._text -1000 literal = "" -1001 -1002 while self._peek.strip() and self._peek not in self.SINGLE_TOKENS: -1003 literal += self._peek.upper() -1004 self._advance() -1005 -1006 token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal)) -1007 -1008 if token_type: -1009 self._add(TokenType.NUMBER, number_text) -1010 self._add(TokenType.DCOLON, "::") -1011 return self._add(token_type, literal) -1012 elif self.identifiers_can_start_with_digit: # type: ignore -1013 return self._add(TokenType.VAR) -1014 -1015 self._add(TokenType.NUMBER, number_text) -1016 return self._advance(-len(literal)) -1017 else: -1018 return self._add(TokenType.NUMBER) -1019 -1020 def _scan_bits(self) -> None: -1021 self._advance() -1022 value = self._extract_value() -1023 try: -1024 # If `value` can't be converted to a binary, fallback to tokenizing it as an identifier -1025 int(value, 2) -1026 self._add(TokenType.BIT_STRING, value[2:]) # Drop the 0b -1027 except ValueError: -1028 self._add(TokenType.IDENTIFIER) -1029 -1030 def _scan_hex(self) -> None: -1031 self._advance() -1032 value = self._extract_value() -1033 try: -1034 # If `value` can't be converted to a hex, fallback to tokenizing it as an identifier -1035 int(value, 16) -1036 self._add(TokenType.HEX_STRING, value[2:]) # Drop the 0x -1037 except ValueError: -1038 self._add(TokenType.IDENTIFIER) -1039 -1040 def _extract_value(self) -> str: -1041 while True: -1042 char = self._peek.strip() -1043 if char and char not in self.SINGLE_TOKENS: -1044 self._advance(alnum=True) -1045 else: -1046 break -1047 -1048 return self._text -1049 -1050 def _scan_string(self, start: str) -> bool: -1051 base = None -1052 token_type = TokenType.STRING -1053 -1054 if start in self._QUOTES: -1055 end = self._QUOTES[start] -1056 elif start in self._FORMAT_STRINGS: -1057 end, token_type = self._FORMAT_STRINGS[start] -1058 -1059 if token_type == TokenType.HEX_STRING: -1060 base = 16 -1061 elif token_type == TokenType.BIT_STRING: -1062 base = 2 -1063 else: -1064 return False + 959 comment_start_line = self._line + 960 comment_start_size = len(comment_start) + 961 comment_end = self._COMMENTS[comment_start] + 962 + 963 if comment_end: + 964 # Skip the comment's start delimiter + 965 self._advance(comment_start_size) + 966 + 967 comment_end_size = len(comment_end) + 968 while not self._end and self._chars(comment_end_size) != comment_end: + 969 self._advance(alnum=True) + 970 + 971 self._comments.append(self._text[comment_start_size : -comment_end_size + 1]) + 972 self._advance(comment_end_size - 1) + 973 else: + 974 while not self._end and not self.WHITE_SPACE.get(self._peek) is TokenType.BREAK: + 975 self._advance(alnum=True) + 976 self._comments.append(self._text[comment_start_size:]) + 977 + 978 # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding. + 979 # Multiple consecutive comments are preserved by appending them to the current comments list. + 980 if comment_start_line == self._prev_token_line: + 981 self.tokens[-1].comments.extend(self._comments) + 982 self._comments = [] + 983 self._prev_token_line = self._line + 984 + 985 return True + 986 + 987 def _scan_number(self) -> None: + 988 if self._char == "0": + 989 peek = self._peek.upper() + 990 if peek == "B": + 991 return self._scan_bits() if self.BIT_STRINGS else self._add(TokenType.NUMBER) + 992 elif peek == "X": + 993 return self._scan_hex() if self.HEX_STRINGS else self._add(TokenType.NUMBER) + 994 + 995 decimal = False + 996 scientific = 0 + 997 + 998 while True: + 999 if self._peek.isdigit(): +1000 self._advance() +1001 elif self._peek == "." and not decimal: +1002 after = self.peek(1) +1003 if after.isdigit() or not after.isalpha(): +1004 decimal = True +1005 self._advance() +1006 else: +1007 return self._add(TokenType.VAR) +1008 elif self._peek in ("-", "+") and scientific == 1: +1009 scientific += 1 +1010 self._advance() +1011 elif self._peek.upper() == "E" and not scientific: +1012 scientific += 1 +1013 self._advance() +1014 elif self._peek.isidentifier(): +1015 number_text = self._text +1016 literal = "" +1017 +1018 while self._peek.strip() and self._peek not in self.SINGLE_TOKENS: +1019 literal += self._peek.upper() +1020 self._advance() +1021 +1022 token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal, "")) +1023 +1024 if token_type: +1025 self._add(TokenType.NUMBER, number_text) +1026 self._add(TokenType.DCOLON, "::") +1027 return self._add(token_type, literal) +1028 elif self.IDENTIFIERS_CAN_START_WITH_DIGIT: +1029 return self._add(TokenType.VAR) +1030 +1031 self._add(TokenType.NUMBER, number_text) +1032 return self._advance(-len(literal)) +1033 else: +1034 return self._add(TokenType.NUMBER) +1035 +1036 def _scan_bits(self) -> None: +1037 self._advance() +1038 value = self._extract_value() +1039 try: +1040 # If `value` can't be converted to a binary, fallback to tokenizing it as an identifier +1041 int(value, 2) +1042 self._add(TokenType.BIT_STRING, value[2:]) # Drop the 0b +1043 except ValueError: +1044 self._add(TokenType.IDENTIFIER) +1045 +1046 def _scan_hex(self) -> None: +1047 self._advance() +1048 value = self._extract_value() +1049 try: +1050 # If `value` can't be converted to a hex, fallback to tokenizing it as an identifier +1051 int(value, 16) +1052 self._add(TokenType.HEX_STRING, value[2:]) # Drop the 0x +1053 except ValueError: +1054 self._add(TokenType.IDENTIFIER) +1055 +1056 def _extract_value(self) -> str: +1057 while True: +1058 char = self._peek.strip() +1059 if char and char not in self.SINGLE_TOKENS: +1060 self._advance(alnum=True) +1061 else: +1062 break +1063 +1064 return self._text 1065 -1066 self._advance(len(start)) -1067 text = self._extract_string(end) -1068 -1069 if base: -1070 try: -1071 int(text, base) -1072 except: -1073 raise RuntimeError( -1074 f"Numeric string contains invalid characters from {self._line}:{self._start}" -1075 ) -1076 else: -1077 text = text.encode(self.ENCODE).decode(self.ENCODE) if self.ENCODE else text -1078 -1079 self._add(token_type, text) -1080 return True +1066 def _scan_string(self, start: str) -> bool: +1067 base = None +1068 token_type = TokenType.STRING +1069 +1070 if start in self._QUOTES: +1071 end = self._QUOTES[start] +1072 elif start in self._FORMAT_STRINGS: +1073 end, token_type = self._FORMAT_STRINGS[start] +1074 +1075 if token_type == TokenType.HEX_STRING: +1076 base = 16 +1077 elif token_type == TokenType.BIT_STRING: +1078 base = 2 +1079 else: +1080 return False 1081 -1082 def _scan_identifier(self, identifier_end: str) -> None: -1083 self._advance() -1084 text = self._extract_string(identifier_end, self._IDENTIFIER_ESCAPES) -1085 self._add(TokenType.IDENTIFIER, text) -1086 -1087 def _scan_var(self) -> None: -1088 while True: -1089 char = self._peek.strip() -1090 if char and (char in self.VAR_SINGLE_TOKENS or char not in self.SINGLE_TOKENS): -1091 self._advance(alnum=True) -1092 else: -1093 break +1082 self._advance(len(start)) +1083 text = self._extract_string(end) +1084 +1085 if base: +1086 try: +1087 int(text, base) +1088 except: +1089 raise RuntimeError( +1090 f"Numeric string contains invalid characters from {self._line}:{self._start}" +1091 ) +1092 else: +1093 text = text.encode(self.ENCODE).decode(self.ENCODE) if self.ENCODE else text 1094 -1095 self._add( -1096 TokenType.VAR -1097 if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER -1098 else self.KEYWORDS.get(self._text.upper(), TokenType.VAR) -1099 ) -1100 -1101 def _extract_string(self, delimiter: str, escapes=None) -> str: -1102 text = "" -1103 delim_size = len(delimiter) -1104 escapes = self._STRING_ESCAPES if escapes is None else escapes -1105 -1106 while True: -1107 if self._char in escapes and (self._peek == delimiter or self._peek in escapes): -1108 if self._peek == delimiter: -1109 text += self._peek -1110 else: -1111 text += self._char + self._peek -1112 -1113 if self._current + 1 < self.size: -1114 self._advance(2) -1115 else: -1116 raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._current}") -1117 else: -1118 if self._chars(delim_size) == delimiter: -1119 if delim_size > 1: -1120 self._advance(delim_size - 1) -1121 break -1122 -1123 if self._end: -1124 raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._start}") -1125 -1126 current = self._current - 1 -1127 self._advance(alnum=True) -1128 text += self.sql[current : self._current - 1] -1129 -1130 return text +1095 self._add(token_type, text) +1096 return True +1097 +1098 def _scan_identifier(self, identifier_end: str) -> None: +1099 self._advance() +1100 text = self._extract_string(identifier_end, self._IDENTIFIER_ESCAPES) +1101 self._add(TokenType.IDENTIFIER, text) +1102 +1103 def _scan_var(self) -> None: +1104 while True: +1105 char = self._peek.strip() +1106 if char and (char in self.VAR_SINGLE_TOKENS or char not in self.SINGLE_TOKENS): +1107 self._advance(alnum=True) +1108 else: +1109 break +1110 +1111 self._add( +1112 TokenType.VAR +1113 if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER +1114 else self.KEYWORDS.get(self._text.upper(), TokenType.VAR) +1115 ) +1116 +1117 def _extract_string(self, delimiter: str, escapes=None) -> str: +1118 text = "" +1119 delim_size = len(delimiter) +1120 escapes = self._STRING_ESCAPES if escapes is None else escapes +1121 +1122 while True: +1123 if self._char in escapes and (self._peek == delimiter or self._peek in escapes): +1124 if self._peek == delimiter: +1125 text += self._peek +1126 else: +1127 text += self._char + self._peek +1128 +1129 if self._current + 1 < self.size: +1130 self._advance(2) +1131 else: +1132 raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._current}") +1133 else: +1134 if self._chars(delim_size) == delimiter: +1135 if delim_size > 1: +1136 self._advance(delim_size - 1) +1137 break +1138 +1139 if self._end: +1140 raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._start}") +1141 +1142 current = self._current - 1 +1143 self._advance(alnum=True) +1144 text += self.sql[current : self._current - 1] +1145 +1146 return text
    @@ -2235,164 +2257,165 @@ 145 VARIANT = auto() 146 OBJECT = auto() 147 INET = auto() -148 -149 # keywords -150 ALIAS = auto() -151 ALTER = auto() -152 ALWAYS = auto() -153 ALL = auto() -154 ANTI = auto() -155 ANY = auto() -156 APPLY = auto() -157 ARRAY = auto() -158 ASC = auto() -159 ASOF = auto() -160 AUTO_INCREMENT = auto() -161 BEGIN = auto() -162 BETWEEN = auto() -163 CACHE = auto() -164 CASE = auto() -165 CHARACTER_SET = auto() -166 COLLATE = auto() -167 COMMAND = auto() -168 COMMENT = auto() -169 COMMIT = auto() -170 CONSTRAINT = auto() -171 CREATE = auto() -172 CROSS = auto() -173 CUBE = auto() -174 CURRENT_DATE = auto() -175 CURRENT_DATETIME = auto() -176 CURRENT_TIME = auto() -177 CURRENT_TIMESTAMP = auto() -178 CURRENT_USER = auto() -179 DEFAULT = auto() -180 DELETE = auto() -181 DESC = auto() -182 DESCRIBE = auto() -183 DICTIONARY = auto() -184 DISTINCT = auto() -185 DIV = auto() -186 DROP = auto() -187 ELSE = auto() -188 END = auto() -189 ESCAPE = auto() -190 EXCEPT = auto() -191 EXECUTE = auto() -192 EXISTS = auto() -193 FALSE = auto() -194 FETCH = auto() -195 FILTER = auto() -196 FINAL = auto() -197 FIRST = auto() -198 FOR = auto() -199 FOREIGN_KEY = auto() -200 FORMAT = auto() -201 FROM = auto() -202 FULL = auto() -203 FUNCTION = auto() -204 GLOB = auto() -205 GLOBAL = auto() -206 GROUP_BY = auto() -207 GROUPING_SETS = auto() -208 HAVING = auto() -209 HINT = auto() -210 IF = auto() -211 ILIKE = auto() -212 ILIKE_ANY = auto() -213 IN = auto() -214 INDEX = auto() -215 INNER = auto() -216 INSERT = auto() -217 INTERSECT = auto() -218 INTERVAL = auto() -219 INTO = auto() -220 INTRODUCER = auto() -221 IRLIKE = auto() -222 IS = auto() -223 ISNULL = auto() -224 JOIN = auto() -225 JOIN_MARKER = auto() -226 KEEP = auto() -227 LANGUAGE = auto() -228 LATERAL = auto() -229 LEFT = auto() -230 LIKE = auto() -231 LIKE_ANY = auto() -232 LIMIT = auto() -233 LOAD = auto() -234 LOCK = auto() -235 MAP = auto() -236 MATCH_RECOGNIZE = auto() -237 MERGE = auto() -238 MOD = auto() -239 NATURAL = auto() -240 NEXT = auto() -241 NEXT_VALUE_FOR = auto() -242 NOTNULL = auto() -243 NULL = auto() -244 OFFSET = auto() -245 ON = auto() -246 ORDER_BY = auto() -247 ORDERED = auto() -248 ORDINALITY = auto() -249 OUTER = auto() -250 OVER = auto() -251 OVERLAPS = auto() -252 OVERWRITE = auto() -253 PARTITION = auto() -254 PARTITION_BY = auto() -255 PERCENT = auto() -256 PIVOT = auto() -257 PLACEHOLDER = auto() -258 PRAGMA = auto() -259 PRIMARY_KEY = auto() -260 PROCEDURE = auto() -261 PROPERTIES = auto() -262 PSEUDO_TYPE = auto() -263 QUALIFY = auto() -264 QUOTE = auto() -265 RANGE = auto() -266 RECURSIVE = auto() -267 REPLACE = auto() -268 RETURNING = auto() -269 REFERENCES = auto() -270 RIGHT = auto() -271 RLIKE = auto() -272 ROLLBACK = auto() -273 ROLLUP = auto() -274 ROW = auto() -275 ROWS = auto() -276 SELECT = auto() -277 SEMI = auto() -278 SEPARATOR = auto() -279 SERDE_PROPERTIES = auto() -280 SET = auto() -281 SETTINGS = auto() -282 SHOW = auto() -283 SIMILAR_TO = auto() -284 SOME = auto() -285 STRUCT = auto() -286 TABLE_SAMPLE = auto() -287 TEMPORARY = auto() -288 TOP = auto() -289 THEN = auto() -290 TRUE = auto() -291 UNCACHE = auto() -292 UNION = auto() -293 UNNEST = auto() -294 UNPIVOT = auto() -295 UPDATE = auto() -296 USE = auto() -297 USING = auto() -298 VALUES = auto() -299 VIEW = auto() -300 VOLATILE = auto() -301 WHEN = auto() -302 WHERE = auto() -303 WINDOW = auto() -304 WITH = auto() -305 UNIQUE = auto() +148 ENUM = auto() +149 +150 # keywords +151 ALIAS = auto() +152 ALTER = auto() +153 ALWAYS = auto() +154 ALL = auto() +155 ANTI = auto() +156 ANY = auto() +157 APPLY = auto() +158 ARRAY = auto() +159 ASC = auto() +160 ASOF = auto() +161 AUTO_INCREMENT = auto() +162 BEGIN = auto() +163 BETWEEN = auto() +164 CACHE = auto() +165 CASE = auto() +166 CHARACTER_SET = auto() +167 COLLATE = auto() +168 COMMAND = auto() +169 COMMENT = auto() +170 COMMIT = auto() +171 CONSTRAINT = auto() +172 CREATE = auto() +173 CROSS = auto() +174 CUBE = auto() +175 CURRENT_DATE = auto() +176 CURRENT_DATETIME = auto() +177 CURRENT_TIME = auto() +178 CURRENT_TIMESTAMP = auto() +179 CURRENT_USER = auto() +180 DEFAULT = auto() +181 DELETE = auto() +182 DESC = auto() +183 DESCRIBE = auto() +184 DICTIONARY = auto() +185 DISTINCT = auto() +186 DIV = auto() +187 DROP = auto() +188 ELSE = auto() +189 END = auto() +190 ESCAPE = auto() +191 EXCEPT = auto() +192 EXECUTE = auto() +193 EXISTS = auto() +194 FALSE = auto() +195 FETCH = auto() +196 FILTER = auto() +197 FINAL = auto() +198 FIRST = auto() +199 FOR = auto() +200 FOREIGN_KEY = auto() +201 FORMAT = auto() +202 FROM = auto() +203 FULL = auto() +204 FUNCTION = auto() +205 GLOB = auto() +206 GLOBAL = auto() +207 GROUP_BY = auto() +208 GROUPING_SETS = auto() +209 HAVING = auto() +210 HINT = auto() +211 IF = auto() +212 ILIKE = auto() +213 ILIKE_ANY = auto() +214 IN = auto() +215 INDEX = auto() +216 INNER = auto() +217 INSERT = auto() +218 INTERSECT = auto() +219 INTERVAL = auto() +220 INTO = auto() +221 INTRODUCER = auto() +222 IRLIKE = auto() +223 IS = auto() +224 ISNULL = auto() +225 JOIN = auto() +226 JOIN_MARKER = auto() +227 KEEP = auto() +228 LANGUAGE = auto() +229 LATERAL = auto() +230 LEFT = auto() +231 LIKE = auto() +232 LIKE_ANY = auto() +233 LIMIT = auto() +234 LOAD = auto() +235 LOCK = auto() +236 MAP = auto() +237 MATCH_RECOGNIZE = auto() +238 MERGE = auto() +239 MOD = auto() +240 NATURAL = auto() +241 NEXT = auto() +242 NEXT_VALUE_FOR = auto() +243 NOTNULL = auto() +244 NULL = auto() +245 OFFSET = auto() +246 ON = auto() +247 ORDER_BY = auto() +248 ORDERED = auto() +249 ORDINALITY = auto() +250 OUTER = auto() +251 OVER = auto() +252 OVERLAPS = auto() +253 OVERWRITE = auto() +254 PARTITION = auto() +255 PARTITION_BY = auto() +256 PERCENT = auto() +257 PIVOT = auto() +258 PLACEHOLDER = auto() +259 PRAGMA = auto() +260 PRIMARY_KEY = auto() +261 PROCEDURE = auto() +262 PROPERTIES = auto() +263 PSEUDO_TYPE = auto() +264 QUALIFY = auto() +265 QUOTE = auto() +266 RANGE = auto() +267 RECURSIVE = auto() +268 REPLACE = auto() +269 RETURNING = auto() +270 REFERENCES = auto() +271 RIGHT = auto() +272 RLIKE = auto() +273 ROLLBACK = auto() +274 ROLLUP = auto() +275 ROW = auto() +276 ROWS = auto() +277 SELECT = auto() +278 SEMI = auto() +279 SEPARATOR = auto() +280 SERDE_PROPERTIES = auto() +281 SET = auto() +282 SETTINGS = auto() +283 SHOW = auto() +284 SIMILAR_TO = auto() +285 SOME = auto() +286 STRUCT = auto() +287 TABLE_SAMPLE = auto() +288 TEMPORARY = auto() +289 TOP = auto() +290 THEN = auto() +291 TRUE = auto() +292 UNCACHE = auto() +293 UNION = auto() +294 UNNEST = auto() +295 UNPIVOT = auto() +296 UPDATE = auto() +297 USE = auto() +298 USING = auto() +299 VALUES = auto() +300 VIEW = auto() +301 VOLATILE = auto() +302 WHEN = auto() +303 WHERE = auto() +304 WINDOW = auto() +305 WITH = auto() +306 UNIQUE = auto() @@ -3971,6 +3994,18 @@ + +
    +
    + ENUM = +<TokenType.ENUM: 'ENUM'> + + +
    + + + +
    @@ -5866,60 +5901,61 @@
    -
    308class Token:
    -309    __slots__ = ("token_type", "text", "line", "col", "start", "end", "comments")
    -310
    -311    @classmethod
    -312    def number(cls, number: int) -> Token:
    -313        """Returns a NUMBER token with `number` as its text."""
    -314        return cls(TokenType.NUMBER, str(number))
    -315
    -316    @classmethod
    -317    def string(cls, string: str) -> Token:
    -318        """Returns a STRING token with `string` as its text."""
    -319        return cls(TokenType.STRING, string)
    -320
    -321    @classmethod
    -322    def identifier(cls, identifier: str) -> Token:
    -323        """Returns an IDENTIFIER token with `identifier` as its text."""
    -324        return cls(TokenType.IDENTIFIER, identifier)
    -325
    -326    @classmethod
    -327    def var(cls, var: str) -> Token:
    -328        """Returns an VAR token with `var` as its text."""
    -329        return cls(TokenType.VAR, var)
    -330
    -331    def __init__(
    -332        self,
    -333        token_type: TokenType,
    -334        text: str,
    -335        line: int = 1,
    -336        col: int = 1,
    -337        start: int = 0,
    -338        end: int = 0,
    -339        comments: t.List[str] = [],
    -340    ) -> None:
    -341        """Token initializer.
    -342
    -343        Args:
    -344            token_type: The TokenType Enum.
    -345            text: The text of the token.
    -346            line: The line that the token ends on.
    -347            col: The column that the token ends on.
    -348            start: The start index of the token.
    -349            end: The ending index of the token.
    -350        """
    -351        self.token_type = token_type
    -352        self.text = text
    -353        self.line = line
    -354        self.col = col
    -355        self.start = start
    -356        self.end = end
    -357        self.comments = comments
    -358
    -359    def __repr__(self) -> str:
    -360        attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__)
    -361        return f"<Token {attributes}>"
    +            
    309class Token:
    +310    __slots__ = ("token_type", "text", "line", "col", "start", "end", "comments")
    +311
    +312    @classmethod
    +313    def number(cls, number: int) -> Token:
    +314        """Returns a NUMBER token with `number` as its text."""
    +315        return cls(TokenType.NUMBER, str(number))
    +316
    +317    @classmethod
    +318    def string(cls, string: str) -> Token:
    +319        """Returns a STRING token with `string` as its text."""
    +320        return cls(TokenType.STRING, string)
    +321
    +322    @classmethod
    +323    def identifier(cls, identifier: str) -> Token:
    +324        """Returns an IDENTIFIER token with `identifier` as its text."""
    +325        return cls(TokenType.IDENTIFIER, identifier)
    +326
    +327    @classmethod
    +328    def var(cls, var: str) -> Token:
    +329        """Returns an VAR token with `var` as its text."""
    +330        return cls(TokenType.VAR, var)
    +331
    +332    def __init__(
    +333        self,
    +334        token_type: TokenType,
    +335        text: str,
    +336        line: int = 1,
    +337        col: int = 1,
    +338        start: int = 0,
    +339        end: int = 0,
    +340        comments: t.List[str] = [],
    +341    ) -> None:
    +342        """Token initializer.
    +343
    +344        Args:
    +345            token_type: The TokenType Enum.
    +346            text: The text of the token.
    +347            line: The line that the token ends on.
    +348            col: The column that the token ends on.
    +349            start: The start index of the token.
    +350            end: The ending index of the token.
    +351            comments: The comments to attach to the token.
    +352        """
    +353        self.token_type = token_type
    +354        self.text = text
    +355        self.line = line
    +356        self.col = col
    +357        self.start = start
    +358        self.end = end
    +359        self.comments = comments
    +360
    +361    def __repr__(self) -> str:
    +362        attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__)
    +363        return f"<Token {attributes}>"
     
    @@ -5935,33 +5971,34 @@
    -
    331    def __init__(
    -332        self,
    -333        token_type: TokenType,
    -334        text: str,
    -335        line: int = 1,
    -336        col: int = 1,
    -337        start: int = 0,
    -338        end: int = 0,
    -339        comments: t.List[str] = [],
    -340    ) -> None:
    -341        """Token initializer.
    -342
    -343        Args:
    -344            token_type: The TokenType Enum.
    -345            text: The text of the token.
    -346            line: The line that the token ends on.
    -347            col: The column that the token ends on.
    -348            start: The start index of the token.
    -349            end: The ending index of the token.
    -350        """
    -351        self.token_type = token_type
    -352        self.text = text
    -353        self.line = line
    -354        self.col = col
    -355        self.start = start
    -356        self.end = end
    -357        self.comments = comments
    +            
    332    def __init__(
    +333        self,
    +334        token_type: TokenType,
    +335        text: str,
    +336        line: int = 1,
    +337        col: int = 1,
    +338        start: int = 0,
    +339        end: int = 0,
    +340        comments: t.List[str] = [],
    +341    ) -> None:
    +342        """Token initializer.
    +343
    +344        Args:
    +345            token_type: The TokenType Enum.
    +346            text: The text of the token.
    +347            line: The line that the token ends on.
    +348            col: The column that the token ends on.
    +349            start: The start index of the token.
    +350            end: The ending index of the token.
    +351            comments: The comments to attach to the token.
    +352        """
    +353        self.token_type = token_type
    +354        self.text = text
    +355        self.line = line
    +356        self.col = col
    +357        self.start = start
    +358        self.end = end
    +359        self.comments = comments
     
    @@ -5976,6 +6013,7 @@
  • col: The column that the token ends on.
  • start: The start index of the token.
  • end: The ending index of the token.
  • +
  • comments: The comments to attach to the token.
  • @@ -5993,10 +6031,10 @@
    -
    311    @classmethod
    -312    def number(cls, number: int) -> Token:
    -313        """Returns a NUMBER token with `number` as its text."""
    -314        return cls(TokenType.NUMBER, str(number))
    +            
    312    @classmethod
    +313    def number(cls, number: int) -> Token:
    +314        """Returns a NUMBER token with `number` as its text."""
    +315        return cls(TokenType.NUMBER, str(number))
     
    @@ -6017,10 +6055,10 @@
    -
    316    @classmethod
    -317    def string(cls, string: str) -> Token:
    -318        """Returns a STRING token with `string` as its text."""
    -319        return cls(TokenType.STRING, string)
    +            
    317    @classmethod
    +318    def string(cls, string: str) -> Token:
    +319        """Returns a STRING token with `string` as its text."""
    +320        return cls(TokenType.STRING, string)
     
    @@ -6041,10 +6079,10 @@
    -
    321    @classmethod
    -322    def identifier(cls, identifier: str) -> Token:
    -323        """Returns an IDENTIFIER token with `identifier` as its text."""
    -324        return cls(TokenType.IDENTIFIER, identifier)
    +            
    322    @classmethod
    +323    def identifier(cls, identifier: str) -> Token:
    +324        """Returns an IDENTIFIER token with `identifier` as its text."""
    +325        return cls(TokenType.IDENTIFIER, identifier)
     
    @@ -6065,10 +6103,10 @@
    -
    326    @classmethod
    -327    def var(cls, var: str) -> Token:
    -328        """Returns an VAR token with `var` as its text."""
    -329        return cls(TokenType.VAR, var)
    +            
    327    @classmethod
    +328    def var(cls, var: str) -> Token:
    +329        """Returns an VAR token with `var` as its text."""
    +330        return cls(TokenType.VAR, var)
     
    @@ -6089,724 +6127,735 @@
    -
     414class Tokenizer(metaclass=_Tokenizer):
    - 415    SINGLE_TOKENS = {
    - 416        "(": TokenType.L_PAREN,
    - 417        ")": TokenType.R_PAREN,
    - 418        "[": TokenType.L_BRACKET,
    - 419        "]": TokenType.R_BRACKET,
    - 420        "{": TokenType.L_BRACE,
    - 421        "}": TokenType.R_BRACE,
    - 422        "&": TokenType.AMP,
    - 423        "^": TokenType.CARET,
    - 424        ":": TokenType.COLON,
    - 425        ",": TokenType.COMMA,
    - 426        ".": TokenType.DOT,
    - 427        "-": TokenType.DASH,
    - 428        "=": TokenType.EQ,
    - 429        ">": TokenType.GT,
    - 430        "<": TokenType.LT,
    - 431        "%": TokenType.MOD,
    - 432        "!": TokenType.NOT,
    - 433        "|": TokenType.PIPE,
    - 434        "+": TokenType.PLUS,
    - 435        ";": TokenType.SEMICOLON,
    - 436        "/": TokenType.SLASH,
    - 437        "\\": TokenType.BACKSLASH,
    - 438        "*": TokenType.STAR,
    - 439        "~": TokenType.TILDA,
    - 440        "?": TokenType.PLACEHOLDER,
    - 441        "@": TokenType.PARAMETER,
    - 442        # used for breaking a var like x'y' but nothing else
    - 443        # the token type doesn't matter
    - 444        "'": TokenType.QUOTE,
    - 445        "`": TokenType.IDENTIFIER,
    - 446        '"': TokenType.IDENTIFIER,
    - 447        "#": TokenType.HASH,
    - 448    }
    - 449
    - 450    BIT_STRINGS: t.List[str | t.Tuple[str, str]] = []
    - 451    BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = []
    - 452    HEX_STRINGS: t.List[str | t.Tuple[str, str]] = []
    - 453    RAW_STRINGS: t.List[str | t.Tuple[str, str]] = []
    - 454    IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"']
    - 455    IDENTIFIER_ESCAPES = ['"']
    - 456    QUOTES: t.List[t.Tuple[str, str] | str] = ["'"]
    - 457    STRING_ESCAPES = ["'"]
    - 458    VAR_SINGLE_TOKENS: t.Set[str] = set()
    - 459
    - 460    _COMMENTS: t.Dict[str, str] = {}
    - 461    _FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {}
    - 462    _IDENTIFIERS: t.Dict[str, str] = {}
    - 463    _IDENTIFIER_ESCAPES: t.Set[str] = set()
    - 464    _QUOTES: t.Dict[str, str] = {}
    - 465    _STRING_ESCAPES: t.Set[str] = set()
    - 466
    - 467    KEYWORDS: t.Dict[t.Optional[str], TokenType] = {
    - 468        **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")},
    - 469        **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")},
    - 470        "{{+": TokenType.BLOCK_START,
    - 471        "{{-": TokenType.BLOCK_START,
    - 472        "+}}": TokenType.BLOCK_END,
    - 473        "-}}": TokenType.BLOCK_END,
    - 474        "/*+": TokenType.HINT,
    - 475        "==": TokenType.EQ,
    - 476        "::": TokenType.DCOLON,
    - 477        "||": TokenType.DPIPE,
    - 478        ">=": TokenType.GTE,
    - 479        "<=": TokenType.LTE,
    - 480        "<>": TokenType.NEQ,
    - 481        "!=": TokenType.NEQ,
    - 482        "<=>": TokenType.NULLSAFE_EQ,
    - 483        "->": TokenType.ARROW,
    - 484        "->>": TokenType.DARROW,
    - 485        "=>": TokenType.FARROW,
    - 486        "#>": TokenType.HASH_ARROW,
    - 487        "#>>": TokenType.DHASH_ARROW,
    - 488        "<->": TokenType.LR_ARROW,
    - 489        "&&": TokenType.DAMP,
    - 490        "ALL": TokenType.ALL,
    - 491        "ALWAYS": TokenType.ALWAYS,
    - 492        "AND": TokenType.AND,
    - 493        "ANTI": TokenType.ANTI,
    - 494        "ANY": TokenType.ANY,
    - 495        "ASC": TokenType.ASC,
    - 496        "AS": TokenType.ALIAS,
    - 497        "ASOF": TokenType.ASOF,
    - 498        "AUTOINCREMENT": TokenType.AUTO_INCREMENT,
    - 499        "AUTO_INCREMENT": TokenType.AUTO_INCREMENT,
    - 500        "BEGIN": TokenType.BEGIN,
    - 501        "BETWEEN": TokenType.BETWEEN,
    - 502        "CACHE": TokenType.CACHE,
    - 503        "UNCACHE": TokenType.UNCACHE,
    - 504        "CASE": TokenType.CASE,
    - 505        "CHARACTER SET": TokenType.CHARACTER_SET,
    - 506        "COLLATE": TokenType.COLLATE,
    - 507        "COLUMN": TokenType.COLUMN,
    - 508        "COMMIT": TokenType.COMMIT,
    - 509        "CONSTRAINT": TokenType.CONSTRAINT,
    - 510        "CREATE": TokenType.CREATE,
    - 511        "CROSS": TokenType.CROSS,
    - 512        "CUBE": TokenType.CUBE,
    - 513        "CURRENT_DATE": TokenType.CURRENT_DATE,
    - 514        "CURRENT_TIME": TokenType.CURRENT_TIME,
    - 515        "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP,
    - 516        "CURRENT_USER": TokenType.CURRENT_USER,
    - 517        "DATABASE": TokenType.DATABASE,
    - 518        "DEFAULT": TokenType.DEFAULT,
    - 519        "DELETE": TokenType.DELETE,
    - 520        "DESC": TokenType.DESC,
    - 521        "DESCRIBE": TokenType.DESCRIBE,
    - 522        "DISTINCT": TokenType.DISTINCT,
    - 523        "DIV": TokenType.DIV,
    - 524        "DROP": TokenType.DROP,
    - 525        "ELSE": TokenType.ELSE,
    - 526        "END": TokenType.END,
    - 527        "ESCAPE": TokenType.ESCAPE,
    - 528        "EXCEPT": TokenType.EXCEPT,
    - 529        "EXECUTE": TokenType.EXECUTE,
    - 530        "EXISTS": TokenType.EXISTS,
    - 531        "FALSE": TokenType.FALSE,
    - 532        "FETCH": TokenType.FETCH,
    - 533        "FILTER": TokenType.FILTER,
    - 534        "FIRST": TokenType.FIRST,
    - 535        "FULL": TokenType.FULL,
    - 536        "FUNCTION": TokenType.FUNCTION,
    - 537        "FOR": TokenType.FOR,
    - 538        "FOREIGN KEY": TokenType.FOREIGN_KEY,
    - 539        "FORMAT": TokenType.FORMAT,
    - 540        "FROM": TokenType.FROM,
    - 541        "GEOGRAPHY": TokenType.GEOGRAPHY,
    - 542        "GEOMETRY": TokenType.GEOMETRY,
    - 543        "GLOB": TokenType.GLOB,
    - 544        "GROUP BY": TokenType.GROUP_BY,
    - 545        "GROUPING SETS": TokenType.GROUPING_SETS,
    - 546        "HAVING": TokenType.HAVING,
    - 547        "IF": TokenType.IF,
    - 548        "ILIKE": TokenType.ILIKE,
    - 549        "IN": TokenType.IN,
    - 550        "INDEX": TokenType.INDEX,
    - 551        "INET": TokenType.INET,
    - 552        "INNER": TokenType.INNER,
    - 553        "INSERT": TokenType.INSERT,
    - 554        "INTERVAL": TokenType.INTERVAL,
    - 555        "INTERSECT": TokenType.INTERSECT,
    - 556        "INTO": TokenType.INTO,
    - 557        "IS": TokenType.IS,
    - 558        "ISNULL": TokenType.ISNULL,
    - 559        "JOIN": TokenType.JOIN,
    - 560        "KEEP": TokenType.KEEP,
    - 561        "LATERAL": TokenType.LATERAL,
    - 562        "LEFT": TokenType.LEFT,
    - 563        "LIKE": TokenType.LIKE,
    - 564        "LIMIT": TokenType.LIMIT,
    - 565        "LOAD": TokenType.LOAD,
    - 566        "LOCK": TokenType.LOCK,
    - 567        "MERGE": TokenType.MERGE,
    - 568        "NATURAL": TokenType.NATURAL,
    - 569        "NEXT": TokenType.NEXT,
    - 570        "NEXT VALUE FOR": TokenType.NEXT_VALUE_FOR,
    - 571        "NOT": TokenType.NOT,
    - 572        "NOTNULL": TokenType.NOTNULL,
    - 573        "NULL": TokenType.NULL,
    - 574        "OBJECT": TokenType.OBJECT,
    - 575        "OFFSET": TokenType.OFFSET,
    - 576        "ON": TokenType.ON,
    - 577        "OR": TokenType.OR,
    - 578        "ORDER BY": TokenType.ORDER_BY,
    - 579        "ORDINALITY": TokenType.ORDINALITY,
    - 580        "OUTER": TokenType.OUTER,
    - 581        "OVER": TokenType.OVER,
    - 582        "OVERLAPS": TokenType.OVERLAPS,
    - 583        "OVERWRITE": TokenType.OVERWRITE,
    - 584        "PARTITION": TokenType.PARTITION,
    - 585        "PARTITION BY": TokenType.PARTITION_BY,
    - 586        "PARTITIONED BY": TokenType.PARTITION_BY,
    - 587        "PARTITIONED_BY": TokenType.PARTITION_BY,
    - 588        "PERCENT": TokenType.PERCENT,
    - 589        "PIVOT": TokenType.PIVOT,
    - 590        "PRAGMA": TokenType.PRAGMA,
    - 591        "PRIMARY KEY": TokenType.PRIMARY_KEY,
    - 592        "PROCEDURE": TokenType.PROCEDURE,
    - 593        "QUALIFY": TokenType.QUALIFY,
    - 594        "RANGE": TokenType.RANGE,
    - 595        "RECURSIVE": TokenType.RECURSIVE,
    - 596        "REGEXP": TokenType.RLIKE,
    - 597        "REPLACE": TokenType.REPLACE,
    - 598        "REFERENCES": TokenType.REFERENCES,
    - 599        "RIGHT": TokenType.RIGHT,
    - 600        "RLIKE": TokenType.RLIKE,
    - 601        "ROLLBACK": TokenType.ROLLBACK,
    - 602        "ROLLUP": TokenType.ROLLUP,
    - 603        "ROW": TokenType.ROW,
    - 604        "ROWS": TokenType.ROWS,
    - 605        "SCHEMA": TokenType.SCHEMA,
    - 606        "SELECT": TokenType.SELECT,
    - 607        "SEMI": TokenType.SEMI,
    - 608        "SET": TokenType.SET,
    - 609        "SETTINGS": TokenType.SETTINGS,
    - 610        "SHOW": TokenType.SHOW,
    - 611        "SIMILAR TO": TokenType.SIMILAR_TO,
    - 612        "SOME": TokenType.SOME,
    - 613        "TABLE": TokenType.TABLE,
    - 614        "TABLESAMPLE": TokenType.TABLE_SAMPLE,
    - 615        "TEMP": TokenType.TEMPORARY,
    - 616        "TEMPORARY": TokenType.TEMPORARY,
    - 617        "THEN": TokenType.THEN,
    - 618        "TRUE": TokenType.TRUE,
    - 619        "UNION": TokenType.UNION,
    - 620        "UNNEST": TokenType.UNNEST,
    - 621        "UNPIVOT": TokenType.UNPIVOT,
    - 622        "UPDATE": TokenType.UPDATE,
    - 623        "USE": TokenType.USE,
    - 624        "USING": TokenType.USING,
    - 625        "UUID": TokenType.UUID,
    - 626        "VALUES": TokenType.VALUES,
    - 627        "VIEW": TokenType.VIEW,
    - 628        "VOLATILE": TokenType.VOLATILE,
    - 629        "WHEN": TokenType.WHEN,
    - 630        "WHERE": TokenType.WHERE,
    - 631        "WINDOW": TokenType.WINDOW,
    - 632        "WITH": TokenType.WITH,
    - 633        "APPLY": TokenType.APPLY,
    - 634        "ARRAY": TokenType.ARRAY,
    - 635        "BIT": TokenType.BIT,
    - 636        "BOOL": TokenType.BOOLEAN,
    - 637        "BOOLEAN": TokenType.BOOLEAN,
    - 638        "BYTE": TokenType.TINYINT,
    - 639        "TINYINT": TokenType.TINYINT,
    - 640        "SHORT": TokenType.SMALLINT,
    - 641        "SMALLINT": TokenType.SMALLINT,
    - 642        "INT2": TokenType.SMALLINT,
    - 643        "INTEGER": TokenType.INT,
    - 644        "INT": TokenType.INT,
    - 645        "INT4": TokenType.INT,
    - 646        "LONG": TokenType.BIGINT,
    - 647        "BIGINT": TokenType.BIGINT,
    - 648        "INT8": TokenType.BIGINT,
    - 649        "DEC": TokenType.DECIMAL,
    - 650        "DECIMAL": TokenType.DECIMAL,
    - 651        "BIGDECIMAL": TokenType.BIGDECIMAL,
    - 652        "BIGNUMERIC": TokenType.BIGDECIMAL,
    - 653        "MAP": TokenType.MAP,
    - 654        "NULLABLE": TokenType.NULLABLE,
    - 655        "NUMBER": TokenType.DECIMAL,
    - 656        "NUMERIC": TokenType.DECIMAL,
    - 657        "FIXED": TokenType.DECIMAL,
    - 658        "REAL": TokenType.FLOAT,
    - 659        "FLOAT": TokenType.FLOAT,
    - 660        "FLOAT4": TokenType.FLOAT,
    - 661        "FLOAT8": TokenType.DOUBLE,
    - 662        "DOUBLE": TokenType.DOUBLE,
    - 663        "DOUBLE PRECISION": TokenType.DOUBLE,
    - 664        "JSON": TokenType.JSON,
    - 665        "CHAR": TokenType.CHAR,
    - 666        "CHARACTER": TokenType.CHAR,
    - 667        "NCHAR": TokenType.NCHAR,
    - 668        "VARCHAR": TokenType.VARCHAR,
    - 669        "VARCHAR2": TokenType.VARCHAR,
    - 670        "NVARCHAR": TokenType.NVARCHAR,
    - 671        "NVARCHAR2": TokenType.NVARCHAR,
    - 672        "STR": TokenType.TEXT,
    - 673        "STRING": TokenType.TEXT,
    - 674        "TEXT": TokenType.TEXT,
    - 675        "CLOB": TokenType.TEXT,
    - 676        "LONGVARCHAR": TokenType.TEXT,
    - 677        "BINARY": TokenType.BINARY,
    - 678        "BLOB": TokenType.VARBINARY,
    - 679        "BYTEA": TokenType.VARBINARY,
    - 680        "VARBINARY": TokenType.VARBINARY,
    - 681        "TIME": TokenType.TIME,
    - 682        "TIMESTAMP": TokenType.TIMESTAMP,
    - 683        "TIMESTAMPTZ": TokenType.TIMESTAMPTZ,
    - 684        "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ,
    - 685        "DATE": TokenType.DATE,
    - 686        "DATETIME": TokenType.DATETIME,
    - 687        "INT4RANGE": TokenType.INT4RANGE,
    - 688        "INT4MULTIRANGE": TokenType.INT4MULTIRANGE,
    - 689        "INT8RANGE": TokenType.INT8RANGE,
    - 690        "INT8MULTIRANGE": TokenType.INT8MULTIRANGE,
    - 691        "NUMRANGE": TokenType.NUMRANGE,
    - 692        "NUMMULTIRANGE": TokenType.NUMMULTIRANGE,
    - 693        "TSRANGE": TokenType.TSRANGE,
    - 694        "TSMULTIRANGE": TokenType.TSMULTIRANGE,
    - 695        "TSTZRANGE": TokenType.TSTZRANGE,
    - 696        "TSTZMULTIRANGE": TokenType.TSTZMULTIRANGE,
    - 697        "DATERANGE": TokenType.DATERANGE,
    - 698        "DATEMULTIRANGE": TokenType.DATEMULTIRANGE,
    - 699        "UNIQUE": TokenType.UNIQUE,
    - 700        "STRUCT": TokenType.STRUCT,
    - 701        "VARIANT": TokenType.VARIANT,
    - 702        "ALTER": TokenType.ALTER,
    - 703        "ANALYZE": TokenType.COMMAND,
    - 704        "CALL": TokenType.COMMAND,
    - 705        "COMMENT": TokenType.COMMENT,
    - 706        "COPY": TokenType.COMMAND,
    - 707        "EXPLAIN": TokenType.COMMAND,
    - 708        "GRANT": TokenType.COMMAND,
    - 709        "OPTIMIZE": TokenType.COMMAND,
    - 710        "PREPARE": TokenType.COMMAND,
    - 711        "TRUNCATE": TokenType.COMMAND,
    - 712        "VACUUM": TokenType.COMMAND,
    - 713    }
    - 714
    - 715    WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = {
    - 716        " ": TokenType.SPACE,
    - 717        "\t": TokenType.SPACE,
    - 718        "\n": TokenType.BREAK,
    - 719        "\r": TokenType.BREAK,
    - 720        "\r\n": TokenType.BREAK,
    +            
     419class Tokenizer(metaclass=_Tokenizer):
    + 420    SINGLE_TOKENS = {
    + 421        "(": TokenType.L_PAREN,
    + 422        ")": TokenType.R_PAREN,
    + 423        "[": TokenType.L_BRACKET,
    + 424        "]": TokenType.R_BRACKET,
    + 425        "{": TokenType.L_BRACE,
    + 426        "}": TokenType.R_BRACE,
    + 427        "&": TokenType.AMP,
    + 428        "^": TokenType.CARET,
    + 429        ":": TokenType.COLON,
    + 430        ",": TokenType.COMMA,
    + 431        ".": TokenType.DOT,
    + 432        "-": TokenType.DASH,
    + 433        "=": TokenType.EQ,
    + 434        ">": TokenType.GT,
    + 435        "<": TokenType.LT,
    + 436        "%": TokenType.MOD,
    + 437        "!": TokenType.NOT,
    + 438        "|": TokenType.PIPE,
    + 439        "+": TokenType.PLUS,
    + 440        ";": TokenType.SEMICOLON,
    + 441        "/": TokenType.SLASH,
    + 442        "\\": TokenType.BACKSLASH,
    + 443        "*": TokenType.STAR,
    + 444        "~": TokenType.TILDA,
    + 445        "?": TokenType.PLACEHOLDER,
    + 446        "@": TokenType.PARAMETER,
    + 447        # used for breaking a var like x'y' but nothing else
    + 448        # the token type doesn't matter
    + 449        "'": TokenType.QUOTE,
    + 450        "`": TokenType.IDENTIFIER,
    + 451        '"': TokenType.IDENTIFIER,
    + 452        "#": TokenType.HASH,
    + 453    }
    + 454
    + 455    BIT_STRINGS: t.List[str | t.Tuple[str, str]] = []
    + 456    BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = []
    + 457    HEX_STRINGS: t.List[str | t.Tuple[str, str]] = []
    + 458    RAW_STRINGS: t.List[str | t.Tuple[str, str]] = []
    + 459    IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"']
    + 460    IDENTIFIER_ESCAPES = ['"']
    + 461    QUOTES: t.List[t.Tuple[str, str] | str] = ["'"]
    + 462    STRING_ESCAPES = ["'"]
    + 463    VAR_SINGLE_TOKENS: t.Set[str] = set()
    + 464
    + 465    # Autofilled
    + 466    IDENTIFIERS_CAN_START_WITH_DIGIT: bool = False
    + 467
    + 468    _COMMENTS: t.Dict[str, str] = {}
    + 469    _FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {}
    + 470    _IDENTIFIERS: t.Dict[str, str] = {}
    + 471    _IDENTIFIER_ESCAPES: t.Set[str] = set()
    + 472    _QUOTES: t.Dict[str, str] = {}
    + 473    _STRING_ESCAPES: t.Set[str] = set()
    + 474    _KEYWORD_TRIE: t.Dict = {}
    + 475
    + 476    KEYWORDS: t.Dict[str, TokenType] = {
    + 477        **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")},
    + 478        **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")},
    + 479        **{f"{{{{{postfix}": TokenType.BLOCK_START for postfix in ("+", "-")},
    + 480        **{f"{prefix}}}}}": TokenType.BLOCK_END for prefix in ("+", "-")},
    + 481        "/*+": TokenType.HINT,
    + 482        "==": TokenType.EQ,
    + 483        "::": TokenType.DCOLON,
    + 484        "||": TokenType.DPIPE,
    + 485        ">=": TokenType.GTE,
    + 486        "<=": TokenType.LTE,
    + 487        "<>": TokenType.NEQ,
    + 488        "!=": TokenType.NEQ,
    + 489        "<=>": TokenType.NULLSAFE_EQ,
    + 490        "->": TokenType.ARROW,
    + 491        "->>": TokenType.DARROW,
    + 492        "=>": TokenType.FARROW,
    + 493        "#>": TokenType.HASH_ARROW,
    + 494        "#>>": TokenType.DHASH_ARROW,
    + 495        "<->": TokenType.LR_ARROW,
    + 496        "&&": TokenType.DAMP,
    + 497        "ALL": TokenType.ALL,
    + 498        "ALWAYS": TokenType.ALWAYS,
    + 499        "AND": TokenType.AND,
    + 500        "ANTI": TokenType.ANTI,
    + 501        "ANY": TokenType.ANY,
    + 502        "ASC": TokenType.ASC,
    + 503        "AS": TokenType.ALIAS,
    + 504        "ASOF": TokenType.ASOF,
    + 505        "AUTOINCREMENT": TokenType.AUTO_INCREMENT,
    + 506        "AUTO_INCREMENT": TokenType.AUTO_INCREMENT,
    + 507        "BEGIN": TokenType.BEGIN,
    + 508        "BETWEEN": TokenType.BETWEEN,
    + 509        "CACHE": TokenType.CACHE,
    + 510        "UNCACHE": TokenType.UNCACHE,
    + 511        "CASE": TokenType.CASE,
    + 512        "CHARACTER SET": TokenType.CHARACTER_SET,
    + 513        "COLLATE": TokenType.COLLATE,
    + 514        "COLUMN": TokenType.COLUMN,
    + 515        "COMMIT": TokenType.COMMIT,
    + 516        "CONSTRAINT": TokenType.CONSTRAINT,
    + 517        "CREATE": TokenType.CREATE,
    + 518        "CROSS": TokenType.CROSS,
    + 519        "CUBE": TokenType.CUBE,
    + 520        "CURRENT_DATE": TokenType.CURRENT_DATE,
    + 521        "CURRENT_TIME": TokenType.CURRENT_TIME,
    + 522        "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP,
    + 523        "CURRENT_USER": TokenType.CURRENT_USER,
    + 524        "DATABASE": TokenType.DATABASE,
    + 525        "DEFAULT": TokenType.DEFAULT,
    + 526        "DELETE": TokenType.DELETE,
    + 527        "DESC": TokenType.DESC,
    + 528        "DESCRIBE": TokenType.DESCRIBE,
    + 529        "DISTINCT": TokenType.DISTINCT,
    + 530        "DIV": TokenType.DIV,
    + 531        "DROP": TokenType.DROP,
    + 532        "ELSE": TokenType.ELSE,
    + 533        "END": TokenType.END,
    + 534        "ESCAPE": TokenType.ESCAPE,
    + 535        "EXCEPT": TokenType.EXCEPT,
    + 536        "EXECUTE": TokenType.EXECUTE,
    + 537        "EXISTS": TokenType.EXISTS,
    + 538        "FALSE": TokenType.FALSE,
    + 539        "FETCH": TokenType.FETCH,
    + 540        "FILTER": TokenType.FILTER,
    + 541        "FIRST": TokenType.FIRST,
    + 542        "FULL": TokenType.FULL,
    + 543        "FUNCTION": TokenType.FUNCTION,
    + 544        "FOR": TokenType.FOR,
    + 545        "FOREIGN KEY": TokenType.FOREIGN_KEY,
    + 546        "FORMAT": TokenType.FORMAT,
    + 547        "FROM": TokenType.FROM,
    + 548        "GEOGRAPHY": TokenType.GEOGRAPHY,
    + 549        "GEOMETRY": TokenType.GEOMETRY,
    + 550        "GLOB": TokenType.GLOB,
    + 551        "GROUP BY": TokenType.GROUP_BY,
    + 552        "GROUPING SETS": TokenType.GROUPING_SETS,
    + 553        "HAVING": TokenType.HAVING,
    + 554        "IF": TokenType.IF,
    + 555        "ILIKE": TokenType.ILIKE,
    + 556        "IN": TokenType.IN,
    + 557        "INDEX": TokenType.INDEX,
    + 558        "INET": TokenType.INET,
    + 559        "INNER": TokenType.INNER,
    + 560        "INSERT": TokenType.INSERT,
    + 561        "INTERVAL": TokenType.INTERVAL,
    + 562        "INTERSECT": TokenType.INTERSECT,
    + 563        "INTO": TokenType.INTO,
    + 564        "IS": TokenType.IS,
    + 565        "ISNULL": TokenType.ISNULL,
    + 566        "JOIN": TokenType.JOIN,
    + 567        "KEEP": TokenType.KEEP,
    + 568        "LATERAL": TokenType.LATERAL,
    + 569        "LEFT": TokenType.LEFT,
    + 570        "LIKE": TokenType.LIKE,
    + 571        "LIMIT": TokenType.LIMIT,
    + 572        "LOAD": TokenType.LOAD,
    + 573        "LOCK": TokenType.LOCK,
    + 574        "MERGE": TokenType.MERGE,
    + 575        "NATURAL": TokenType.NATURAL,
    + 576        "NEXT": TokenType.NEXT,
    + 577        "NEXT VALUE FOR": TokenType.NEXT_VALUE_FOR,
    + 578        "NOT": TokenType.NOT,
    + 579        "NOTNULL": TokenType.NOTNULL,
    + 580        "NULL": TokenType.NULL,
    + 581        "OBJECT": TokenType.OBJECT,
    + 582        "OFFSET": TokenType.OFFSET,
    + 583        "ON": TokenType.ON,
    + 584        "OR": TokenType.OR,
    + 585        "ORDER BY": TokenType.ORDER_BY,
    + 586        "ORDINALITY": TokenType.ORDINALITY,
    + 587        "OUTER": TokenType.OUTER,
    + 588        "OVER": TokenType.OVER,
    + 589        "OVERLAPS": TokenType.OVERLAPS,
    + 590        "OVERWRITE": TokenType.OVERWRITE,
    + 591        "PARTITION": TokenType.PARTITION,
    + 592        "PARTITION BY": TokenType.PARTITION_BY,
    + 593        "PARTITIONED BY": TokenType.PARTITION_BY,
    + 594        "PARTITIONED_BY": TokenType.PARTITION_BY,
    + 595        "PERCENT": TokenType.PERCENT,
    + 596        "PIVOT": TokenType.PIVOT,
    + 597        "PRAGMA": TokenType.PRAGMA,
    + 598        "PRIMARY KEY": TokenType.PRIMARY_KEY,
    + 599        "PROCEDURE": TokenType.PROCEDURE,
    + 600        "QUALIFY": TokenType.QUALIFY,
    + 601        "RANGE": TokenType.RANGE,
    + 602        "RECURSIVE": TokenType.RECURSIVE,
    + 603        "REGEXP": TokenType.RLIKE,
    + 604        "REPLACE": TokenType.REPLACE,
    + 605        "RETURNING": TokenType.RETURNING,
    + 606        "REFERENCES": TokenType.REFERENCES,
    + 607        "RIGHT": TokenType.RIGHT,
    + 608        "RLIKE": TokenType.RLIKE,
    + 609        "ROLLBACK": TokenType.ROLLBACK,
    + 610        "ROLLUP": TokenType.ROLLUP,
    + 611        "ROW": TokenType.ROW,
    + 612        "ROWS": TokenType.ROWS,
    + 613        "SCHEMA": TokenType.SCHEMA,
    + 614        "SELECT": TokenType.SELECT,
    + 615        "SEMI": TokenType.SEMI,
    + 616        "SET": TokenType.SET,
    + 617        "SETTINGS": TokenType.SETTINGS,
    + 618        "SHOW": TokenType.SHOW,
    + 619        "SIMILAR TO": TokenType.SIMILAR_TO,
    + 620        "SOME": TokenType.SOME,
    + 621        "TABLE": TokenType.TABLE,
    + 622        "TABLESAMPLE": TokenType.TABLE_SAMPLE,
    + 623        "TEMP": TokenType.TEMPORARY,
    + 624        "TEMPORARY": TokenType.TEMPORARY,
    + 625        "THEN": TokenType.THEN,
    + 626        "TRUE": TokenType.TRUE,
    + 627        "UNION": TokenType.UNION,
    + 628        "UNNEST": TokenType.UNNEST,
    + 629        "UNPIVOT": TokenType.UNPIVOT,
    + 630        "UPDATE": TokenType.UPDATE,
    + 631        "USE": TokenType.USE,
    + 632        "USING": TokenType.USING,
    + 633        "UUID": TokenType.UUID,
    + 634        "VALUES": TokenType.VALUES,
    + 635        "VIEW": TokenType.VIEW,
    + 636        "VOLATILE": TokenType.VOLATILE,
    + 637        "WHEN": TokenType.WHEN,
    + 638        "WHERE": TokenType.WHERE,
    + 639        "WINDOW": TokenType.WINDOW,
    + 640        "WITH": TokenType.WITH,
    + 641        "APPLY": TokenType.APPLY,
    + 642        "ARRAY": TokenType.ARRAY,
    + 643        "BIT": TokenType.BIT,
    + 644        "BOOL": TokenType.BOOLEAN,
    + 645        "BOOLEAN": TokenType.BOOLEAN,
    + 646        "BYTE": TokenType.TINYINT,
    + 647        "TINYINT": TokenType.TINYINT,
    + 648        "SHORT": TokenType.SMALLINT,
    + 649        "SMALLINT": TokenType.SMALLINT,
    + 650        "INT2": TokenType.SMALLINT,
    + 651        "INTEGER": TokenType.INT,
    + 652        "INT": TokenType.INT,
    + 653        "INT4": TokenType.INT,
    + 654        "LONG": TokenType.BIGINT,
    + 655        "BIGINT": TokenType.BIGINT,
    + 656        "INT8": TokenType.BIGINT,
    + 657        "DEC": TokenType.DECIMAL,
    + 658        "DECIMAL": TokenType.DECIMAL,
    + 659        "BIGDECIMAL": TokenType.BIGDECIMAL,
    + 660        "BIGNUMERIC": TokenType.BIGDECIMAL,
    + 661        "MAP": TokenType.MAP,
    + 662        "NULLABLE": TokenType.NULLABLE,
    + 663        "NUMBER": TokenType.DECIMAL,
    + 664        "NUMERIC": TokenType.DECIMAL,
    + 665        "FIXED": TokenType.DECIMAL,
    + 666        "REAL": TokenType.FLOAT,
    + 667        "FLOAT": TokenType.FLOAT,
    + 668        "FLOAT4": TokenType.FLOAT,
    + 669        "FLOAT8": TokenType.DOUBLE,
    + 670        "DOUBLE": TokenType.DOUBLE,
    + 671        "DOUBLE PRECISION": TokenType.DOUBLE,
    + 672        "JSON": TokenType.JSON,
    + 673        "CHAR": TokenType.CHAR,
    + 674        "CHARACTER": TokenType.CHAR,
    + 675        "NCHAR": TokenType.NCHAR,
    + 676        "VARCHAR": TokenType.VARCHAR,
    + 677        "VARCHAR2": TokenType.VARCHAR,
    + 678        "NVARCHAR": TokenType.NVARCHAR,
    + 679        "NVARCHAR2": TokenType.NVARCHAR,
    + 680        "STR": TokenType.TEXT,
    + 681        "STRING": TokenType.TEXT,
    + 682        "TEXT": TokenType.TEXT,
    + 683        "CLOB": TokenType.TEXT,
    + 684        "LONGVARCHAR": TokenType.TEXT,
    + 685        "BINARY": TokenType.BINARY,
    + 686        "BLOB": TokenType.VARBINARY,
    + 687        "BYTEA": TokenType.VARBINARY,
    + 688        "VARBINARY": TokenType.VARBINARY,
    + 689        "TIME": TokenType.TIME,
    + 690        "TIMESTAMP": TokenType.TIMESTAMP,
    + 691        "TIMESTAMPTZ": TokenType.TIMESTAMPTZ,
    + 692        "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ,
    + 693        "DATE": TokenType.DATE,
    + 694        "DATETIME": TokenType.DATETIME,
    + 695        "INT4RANGE": TokenType.INT4RANGE,
    + 696        "INT4MULTIRANGE": TokenType.INT4MULTIRANGE,
    + 697        "INT8RANGE": TokenType.INT8RANGE,
    + 698        "INT8MULTIRANGE": TokenType.INT8MULTIRANGE,
    + 699        "NUMRANGE": TokenType.NUMRANGE,
    + 700        "NUMMULTIRANGE": TokenType.NUMMULTIRANGE,
    + 701        "TSRANGE": TokenType.TSRANGE,
    + 702        "TSMULTIRANGE": TokenType.TSMULTIRANGE,
    + 703        "TSTZRANGE": TokenType.TSTZRANGE,
    + 704        "TSTZMULTIRANGE": TokenType.TSTZMULTIRANGE,
    + 705        "DATERANGE": TokenType.DATERANGE,
    + 706        "DATEMULTIRANGE": TokenType.DATEMULTIRANGE,
    + 707        "UNIQUE": TokenType.UNIQUE,
    + 708        "STRUCT": TokenType.STRUCT,
    + 709        "VARIANT": TokenType.VARIANT,
    + 710        "ALTER": TokenType.ALTER,
    + 711        "ANALYZE": TokenType.COMMAND,
    + 712        "CALL": TokenType.COMMAND,
    + 713        "COMMENT": TokenType.COMMENT,
    + 714        "COPY": TokenType.COMMAND,
    + 715        "EXPLAIN": TokenType.COMMAND,
    + 716        "GRANT": TokenType.COMMAND,
    + 717        "OPTIMIZE": TokenType.COMMAND,
    + 718        "PREPARE": TokenType.COMMAND,
    + 719        "TRUNCATE": TokenType.COMMAND,
    + 720        "VACUUM": TokenType.COMMAND,
      721    }
      722
    - 723    COMMANDS = {
    - 724        TokenType.COMMAND,
    - 725        TokenType.EXECUTE,
    - 726        TokenType.FETCH,
    - 727        TokenType.SHOW,
    - 728    }
    - 729
    - 730    COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN}
    - 731
    - 732    # handle numeric literals like in hive (3L = BIGINT)
    - 733    NUMERIC_LITERALS: t.Dict[str, str] = {}
    - 734    ENCODE: t.Optional[str] = None
    - 735
    - 736    COMMENTS = ["--", ("/*", "*/"), ("{#", "#}")]
    - 737    KEYWORD_TRIE: t.Dict = {}  # autofilled
    - 738
    - 739    __slots__ = (
    - 740        "sql",
    - 741        "size",
    - 742        "tokens",
    - 743        "_start",
    - 744        "_current",
    - 745        "_line",
    - 746        "_col",
    - 747        "_comments",
    - 748        "_char",
    - 749        "_end",
    - 750        "_peek",
    - 751        "_prev_token_line",
    - 752        "identifiers_can_start_with_digit",
    - 753    )
    - 754
    - 755    def __init__(self) -> None:
    - 756        self.reset()
    - 757
    - 758    def reset(self) -> None:
    - 759        self.sql = ""
    - 760        self.size = 0
    - 761        self.tokens: t.List[Token] = []
    - 762        self._start = 0
    - 763        self._current = 0
    - 764        self._line = 1
    - 765        self._col = 0
    - 766        self._comments: t.List[str] = []
    - 767
    - 768        self._char = ""
    - 769        self._end = False
    - 770        self._peek = ""
    - 771        self._prev_token_line = -1
    - 772
    - 773    def tokenize(self, sql: str) -> t.List[Token]:
    - 774        """Returns a list of tokens corresponding to the SQL string `sql`."""
    - 775        self.reset()
    - 776        self.sql = sql
    - 777        self.size = len(sql)
    + 723    WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = {
    + 724        " ": TokenType.SPACE,
    + 725        "\t": TokenType.SPACE,
    + 726        "\n": TokenType.BREAK,
    + 727        "\r": TokenType.BREAK,
    + 728        "\r\n": TokenType.BREAK,
    + 729    }
    + 730
    + 731    COMMANDS = {
    + 732        TokenType.COMMAND,
    + 733        TokenType.EXECUTE,
    + 734        TokenType.FETCH,
    + 735        TokenType.SHOW,
    + 736    }
    + 737
    + 738    COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN}
    + 739
    + 740    # handle numeric literals like in hive (3L = BIGINT)
    + 741    NUMERIC_LITERALS: t.Dict[str, str] = {}
    + 742    ENCODE: t.Optional[str] = None
    + 743
    + 744    COMMENTS = ["--", ("/*", "*/")]
    + 745
    + 746    __slots__ = (
    + 747        "sql",
    + 748        "size",
    + 749        "tokens",
    + 750        "_start",
    + 751        "_current",
    + 752        "_line",
    + 753        "_col",
    + 754        "_comments",
    + 755        "_char",
    + 756        "_end",
    + 757        "_peek",
    + 758        "_prev_token_line",
    + 759    )
    + 760
    + 761    def __init__(self) -> None:
    + 762        self.reset()
    + 763
    + 764    def reset(self) -> None:
    + 765        self.sql = ""
    + 766        self.size = 0
    + 767        self.tokens: t.List[Token] = []
    + 768        self._start = 0
    + 769        self._current = 0
    + 770        self._line = 1
    + 771        self._col = 0
    + 772        self._comments: t.List[str] = []
    + 773
    + 774        self._char = ""
    + 775        self._end = False
    + 776        self._peek = ""
    + 777        self._prev_token_line = -1
      778
    - 779        try:
    - 780            self._scan()
    - 781        except Exception as e:
    - 782            start = max(self._current - 50, 0)
    - 783            end = min(self._current + 50, self.size - 1)
    - 784            context = self.sql[start:end]
    - 785            raise ValueError(f"Error tokenizing '{context}'") from e
    - 786
    - 787        return self.tokens
    - 788
    - 789    def _scan(self, until: t.Optional[t.Callable] = None) -> None:
    - 790        while self.size and not self._end:
    - 791            self._start = self._current
    - 792            self._advance()
    - 793
    - 794            if self._char is None:
    - 795                break
    - 796
    - 797            if self._char not in self.WHITE_SPACE:
    - 798                if self._char.isdigit():
    - 799                    self._scan_number()
    - 800                elif self._char in self._IDENTIFIERS:
    - 801                    self._scan_identifier(self._IDENTIFIERS[self._char])
    - 802                else:
    - 803                    self._scan_keywords()
    - 804
    - 805            if until and until():
    - 806                break
    - 807
    - 808        if self.tokens and self._comments:
    - 809            self.tokens[-1].comments.extend(self._comments)
    + 779    def tokenize(self, sql: str) -> t.List[Token]:
    + 780        """Returns a list of tokens corresponding to the SQL string `sql`."""
    + 781        self.reset()
    + 782        self.sql = sql
    + 783        self.size = len(sql)
    + 784
    + 785        try:
    + 786            self._scan()
    + 787        except Exception as e:
    + 788            start = max(self._current - 50, 0)
    + 789            end = min(self._current + 50, self.size - 1)
    + 790            context = self.sql[start:end]
    + 791            raise ValueError(f"Error tokenizing '{context}'") from e
    + 792
    + 793        return self.tokens
    + 794
    + 795    def _scan(self, until: t.Optional[t.Callable] = None) -> None:
    + 796        while self.size and not self._end:
    + 797            self._start = self._current
    + 798            self._advance()
    + 799
    + 800            if self._char is None:
    + 801                break
    + 802
    + 803            if self._char not in self.WHITE_SPACE:
    + 804                if self._char.isdigit():
    + 805                    self._scan_number()
    + 806                elif self._char in self._IDENTIFIERS:
    + 807                    self._scan_identifier(self._IDENTIFIERS[self._char])
    + 808                else:
    + 809                    self._scan_keywords()
      810
    - 811    def _chars(self, size: int) -> str:
    - 812        if size == 1:
    - 813            return self._char
    - 814
    - 815        start = self._current - 1
    - 816        end = start + size
    - 817
    - 818        return self.sql[start:end] if end <= self.size else ""
    - 819
    - 820    def _advance(self, i: int = 1, alnum: bool = False) -> None:
    - 821        if self.WHITE_SPACE.get(self._char) is TokenType.BREAK:
    - 822            self._col = 1
    - 823            self._line += 1
    - 824        else:
    - 825            self._col += i
    - 826
    - 827        self._current += i
    - 828        self._end = self._current >= self.size
    - 829        self._char = self.sql[self._current - 1]
    - 830        self._peek = "" if self._end else self.sql[self._current]
    - 831
    - 832        if alnum and self._char.isalnum():
    - 833            # Here we use local variables instead of attributes for better performance
    - 834            _col = self._col
    - 835            _current = self._current
    - 836            _end = self._end
    - 837            _peek = self._peek
    - 838
    - 839            while _peek.isalnum():
    - 840                _col += 1
    - 841                _current += 1
    - 842                _end = _current >= self.size
    - 843                _peek = "" if _end else self.sql[_current]
    + 811            if until and until():
    + 812                break
    + 813
    + 814        if self.tokens and self._comments:
    + 815            self.tokens[-1].comments.extend(self._comments)
    + 816
    + 817    def _chars(self, size: int) -> str:
    + 818        if size == 1:
    + 819            return self._char
    + 820
    + 821        start = self._current - 1
    + 822        end = start + size
    + 823
    + 824        return self.sql[start:end] if end <= self.size else ""
    + 825
    + 826    def _advance(self, i: int = 1, alnum: bool = False) -> None:
    + 827        if self.WHITE_SPACE.get(self._char) is TokenType.BREAK:
    + 828            self._col = 1
    + 829            self._line += 1
    + 830        else:
    + 831            self._col += i
    + 832
    + 833        self._current += i
    + 834        self._end = self._current >= self.size
    + 835        self._char = self.sql[self._current - 1]
    + 836        self._peek = "" if self._end else self.sql[self._current]
    + 837
    + 838        if alnum and self._char.isalnum():
    + 839            # Here we use local variables instead of attributes for better performance
    + 840            _col = self._col
    + 841            _current = self._current
    + 842            _end = self._end
    + 843            _peek = self._peek
      844
    - 845            self._col = _col
    - 846            self._current = _current
    - 847            self._end = _end
    - 848            self._peek = _peek
    - 849            self._char = self.sql[_current - 1]
    + 845            while _peek.isalnum():
    + 846                _col += 1
    + 847                _current += 1
    + 848                _end = _current >= self.size
    + 849                _peek = "" if _end else self.sql[_current]
      850
    - 851    @property
    - 852    def _text(self) -> str:
    - 853        return self.sql[self._start : self._current]
    - 854
    - 855    def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None:
    - 856        self._prev_token_line = self._line
    - 857        self.tokens.append(
    - 858            Token(
    - 859                token_type,
    - 860                text=self._text if text is None else text,
    - 861                line=self._line,
    - 862                col=self._col,
    - 863                start=self._start,
    - 864                end=self._current - 1,
    - 865                comments=self._comments,
    - 866            )
    - 867        )
    - 868        self._comments = []
    - 869
    - 870        # If we have either a semicolon or a begin token before the command's token, we'll parse
    - 871        # whatever follows the command's token as a string
    - 872        if (
    - 873            token_type in self.COMMANDS
    - 874            and self._peek != ";"
    - 875            and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS)
    - 876        ):
    - 877            start = self._current
    - 878            tokens = len(self.tokens)
    - 879            self._scan(lambda: self._peek == ";")
    - 880            self.tokens = self.tokens[:tokens]
    - 881            text = self.sql[start : self._current].strip()
    - 882            if text:
    - 883                self._add(TokenType.STRING, text)
    - 884
    - 885    def _scan_keywords(self) -> None:
    - 886        size = 0
    - 887        word = None
    - 888        chars = self._text
    - 889        char = chars
    - 890        prev_space = False
    - 891        skip = False
    - 892        trie = self.KEYWORD_TRIE
    - 893        single_token = char in self.SINGLE_TOKENS
    - 894
    - 895        while chars:
    - 896            if skip:
    - 897                result = 1
    - 898            else:
    - 899                result, trie = in_trie(trie, char.upper())
    - 900
    - 901            if result == 0:
    - 902                break
    - 903            if result == 2:
    - 904                word = chars
    - 905
    - 906            size += 1
    - 907            end = self._current - 1 + size
    - 908
    - 909            if end < self.size:
    - 910                char = self.sql[end]
    - 911                single_token = single_token or char in self.SINGLE_TOKENS
    - 912                is_space = char in self.WHITE_SPACE
    - 913
    - 914                if not is_space or not prev_space:
    - 915                    if is_space:
    - 916                        char = " "
    - 917                    chars += char
    - 918                    prev_space = is_space
    - 919                    skip = False
    - 920                else:
    - 921                    skip = True
    - 922            else:
    - 923                char = ""
    - 924                chars = " "
    + 851            self._col = _col
    + 852            self._current = _current
    + 853            self._end = _end
    + 854            self._peek = _peek
    + 855            self._char = self.sql[_current - 1]
    + 856
    + 857    @property
    + 858    def _text(self) -> str:
    + 859        return self.sql[self._start : self._current]
    + 860
    + 861    def peek(self, i: int = 0) -> str:
    + 862        i = self._current + i
    + 863        if i < self.size:
    + 864            return self.sql[i]
    + 865        return ""
    + 866
    + 867    def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None:
    + 868        self._prev_token_line = self._line
    + 869        self.tokens.append(
    + 870            Token(
    + 871                token_type,
    + 872                text=self._text if text is None else text,
    + 873                line=self._line,
    + 874                col=self._col,
    + 875                start=self._start,
    + 876                end=self._current - 1,
    + 877                comments=self._comments,
    + 878            )
    + 879        )
    + 880        self._comments = []
    + 881
    + 882        # If we have either a semicolon or a begin token before the command's token, we'll parse
    + 883        # whatever follows the command's token as a string
    + 884        if (
    + 885            token_type in self.COMMANDS
    + 886            and self._peek != ";"
    + 887            and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS)
    + 888        ):
    + 889            start = self._current
    + 890            tokens = len(self.tokens)
    + 891            self._scan(lambda: self._peek == ";")
    + 892            self.tokens = self.tokens[:tokens]
    + 893            text = self.sql[start : self._current].strip()
    + 894            if text:
    + 895                self._add(TokenType.STRING, text)
    + 896
    + 897    def _scan_keywords(self) -> None:
    + 898        size = 0
    + 899        word = None
    + 900        chars = self._text
    + 901        char = chars
    + 902        prev_space = False
    + 903        skip = False
    + 904        trie = self._KEYWORD_TRIE
    + 905        single_token = char in self.SINGLE_TOKENS
    + 906
    + 907        while chars:
    + 908            if skip:
    + 909                result = 1
    + 910            else:
    + 911                result, trie = in_trie(trie, char.upper())
    + 912
    + 913            if result == 0:
    + 914                break
    + 915            if result == 2:
    + 916                word = chars
    + 917
    + 918            size += 1
    + 919            end = self._current - 1 + size
    + 920
    + 921            if end < self.size:
    + 922                char = self.sql[end]
    + 923                single_token = single_token or char in self.SINGLE_TOKENS
    + 924                is_space = char in self.WHITE_SPACE
      925
    - 926        word = None if not single_token and chars[-1] not in self.WHITE_SPACE else word
    - 927
    - 928        if not word:
    - 929            if self._char in self.SINGLE_TOKENS:
    - 930                self._add(self.SINGLE_TOKENS[self._char], text=self._char)
    - 931                return
    - 932            self._scan_var()
    - 933            return
    - 934
    - 935        if self._scan_string(word):
    - 936            return
    - 937        if self._scan_comment(word):
    - 938            return
    + 926                if not is_space or not prev_space:
    + 927                    if is_space:
    + 928                        char = " "
    + 929                    chars += char
    + 930                    prev_space = is_space
    + 931                    skip = False
    + 932                else:
    + 933                    skip = True
    + 934            else:
    + 935                char = ""
    + 936                chars = " "
    + 937
    + 938        word = None if not single_token and chars[-1] not in self.WHITE_SPACE else word
      939
    - 940        self._advance(size - 1)
    - 941        word = word.upper()
    - 942        self._add(self.KEYWORDS[word], text=word)
    - 943
    - 944    def _scan_comment(self, comment_start: str) -> bool:
    - 945        if comment_start not in self._COMMENTS:
    - 946            return False
    - 947
    - 948        comment_start_line = self._line
    - 949        comment_start_size = len(comment_start)
    - 950        comment_end = self._COMMENTS[comment_start]
    + 940        if not word:
    + 941            if self._char in self.SINGLE_TOKENS:
    + 942                self._add(self.SINGLE_TOKENS[self._char], text=self._char)
    + 943                return
    + 944            self._scan_var()
    + 945            return
    + 946
    + 947        if self._scan_string(word):
    + 948            return
    + 949        if self._scan_comment(word):
    + 950            return
      951
    - 952        if comment_end:
    - 953            # Skip the comment's start delimiter
    - 954            self._advance(comment_start_size)
    + 952        self._advance(size - 1)
    + 953        word = word.upper()
    + 954        self._add(self.KEYWORDS[word], text=word)
      955
    - 956            comment_end_size = len(comment_end)
    - 957            while not self._end and self._chars(comment_end_size) != comment_end:
    - 958                self._advance(alnum=True)
    + 956    def _scan_comment(self, comment_start: str) -> bool:
    + 957        if comment_start not in self._COMMENTS:
    + 958            return False
      959
    - 960            self._comments.append(self._text[comment_start_size : -comment_end_size + 1])
    - 961            self._advance(comment_end_size - 1)
    - 962        else:
    - 963            while not self._end and not self.WHITE_SPACE.get(self._peek) is TokenType.BREAK:
    - 964                self._advance(alnum=True)
    - 965            self._comments.append(self._text[comment_start_size:])
    - 966
    - 967        # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding.
    - 968        # Multiple consecutive comments are preserved by appending them to the current comments list.
    - 969        if comment_start_line == self._prev_token_line:
    - 970            self.tokens[-1].comments.extend(self._comments)
    - 971            self._comments = []
    - 972            self._prev_token_line = self._line
    - 973
    - 974        return True
    - 975
    - 976    def _scan_number(self) -> None:
    - 977        if self._char == "0":
    - 978            peek = self._peek.upper()
    - 979            if peek == "B":
    - 980                return self._scan_bits() if self.BIT_STRINGS else self._add(TokenType.NUMBER)
    - 981            elif peek == "X":
    - 982                return self._scan_hex() if self.HEX_STRINGS else self._add(TokenType.NUMBER)
    - 983
    - 984        decimal = False
    - 985        scientific = 0
    - 986
    - 987        while True:
    - 988            if self._peek.isdigit():
    - 989                self._advance()
    - 990            elif self._peek == "." and not decimal:
    - 991                decimal = True
    - 992                self._advance()
    - 993            elif self._peek in ("-", "+") and scientific == 1:
    - 994                scientific += 1
    - 995                self._advance()
    - 996            elif self._peek.upper() == "E" and not scientific:
    - 997                scientific += 1
    - 998                self._advance()
    - 999            elif self._peek.isidentifier():
    -1000                number_text = self._text
    -1001                literal = ""
    -1002
    -1003                while self._peek.strip() and self._peek not in self.SINGLE_TOKENS:
    -1004                    literal += self._peek.upper()
    -1005                    self._advance()
    -1006
    -1007                token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal))
    -1008
    -1009                if token_type:
    -1010                    self._add(TokenType.NUMBER, number_text)
    -1011                    self._add(TokenType.DCOLON, "::")
    -1012                    return self._add(token_type, literal)
    -1013                elif self.identifiers_can_start_with_digit:  # type: ignore
    -1014                    return self._add(TokenType.VAR)
    -1015
    -1016                self._add(TokenType.NUMBER, number_text)
    -1017                return self._advance(-len(literal))
    -1018            else:
    -1019                return self._add(TokenType.NUMBER)
    -1020
    -1021    def _scan_bits(self) -> None:
    -1022        self._advance()
    -1023        value = self._extract_value()
    -1024        try:
    -1025            # If `value` can't be converted to a binary, fallback to tokenizing it as an identifier
    -1026            int(value, 2)
    -1027            self._add(TokenType.BIT_STRING, value[2:])  # Drop the 0b
    -1028        except ValueError:
    -1029            self._add(TokenType.IDENTIFIER)
    -1030
    -1031    def _scan_hex(self) -> None:
    -1032        self._advance()
    -1033        value = self._extract_value()
    -1034        try:
    -1035            # If `value` can't be converted to a hex, fallback to tokenizing it as an identifier
    -1036            int(value, 16)
    -1037            self._add(TokenType.HEX_STRING, value[2:])  # Drop the 0x
    -1038        except ValueError:
    -1039            self._add(TokenType.IDENTIFIER)
    -1040
    -1041    def _extract_value(self) -> str:
    -1042        while True:
    -1043            char = self._peek.strip()
    -1044            if char and char not in self.SINGLE_TOKENS:
    -1045                self._advance(alnum=True)
    -1046            else:
    -1047                break
    -1048
    -1049        return self._text
    -1050
    -1051    def _scan_string(self, start: str) -> bool:
    -1052        base = None
    -1053        token_type = TokenType.STRING
    -1054
    -1055        if start in self._QUOTES:
    -1056            end = self._QUOTES[start]
    -1057        elif start in self._FORMAT_STRINGS:
    -1058            end, token_type = self._FORMAT_STRINGS[start]
    -1059
    -1060            if token_type == TokenType.HEX_STRING:
    -1061                base = 16
    -1062            elif token_type == TokenType.BIT_STRING:
    -1063                base = 2
    -1064        else:
    -1065            return False
    + 960        comment_start_line = self._line
    + 961        comment_start_size = len(comment_start)
    + 962        comment_end = self._COMMENTS[comment_start]
    + 963
    + 964        if comment_end:
    + 965            # Skip the comment's start delimiter
    + 966            self._advance(comment_start_size)
    + 967
    + 968            comment_end_size = len(comment_end)
    + 969            while not self._end and self._chars(comment_end_size) != comment_end:
    + 970                self._advance(alnum=True)
    + 971
    + 972            self._comments.append(self._text[comment_start_size : -comment_end_size + 1])
    + 973            self._advance(comment_end_size - 1)
    + 974        else:
    + 975            while not self._end and not self.WHITE_SPACE.get(self._peek) is TokenType.BREAK:
    + 976                self._advance(alnum=True)
    + 977            self._comments.append(self._text[comment_start_size:])
    + 978
    + 979        # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding.
    + 980        # Multiple consecutive comments are preserved by appending them to the current comments list.
    + 981        if comment_start_line == self._prev_token_line:
    + 982            self.tokens[-1].comments.extend(self._comments)
    + 983            self._comments = []
    + 984            self._prev_token_line = self._line
    + 985
    + 986        return True
    + 987
    + 988    def _scan_number(self) -> None:
    + 989        if self._char == "0":
    + 990            peek = self._peek.upper()
    + 991            if peek == "B":
    + 992                return self._scan_bits() if self.BIT_STRINGS else self._add(TokenType.NUMBER)
    + 993            elif peek == "X":
    + 994                return self._scan_hex() if self.HEX_STRINGS else self._add(TokenType.NUMBER)
    + 995
    + 996        decimal = False
    + 997        scientific = 0
    + 998
    + 999        while True:
    +1000            if self._peek.isdigit():
    +1001                self._advance()
    +1002            elif self._peek == "." and not decimal:
    +1003                after = self.peek(1)
    +1004                if after.isdigit() or not after.isalpha():
    +1005                    decimal = True
    +1006                    self._advance()
    +1007                else:
    +1008                    return self._add(TokenType.VAR)
    +1009            elif self._peek in ("-", "+") and scientific == 1:
    +1010                scientific += 1
    +1011                self._advance()
    +1012            elif self._peek.upper() == "E" and not scientific:
    +1013                scientific += 1
    +1014                self._advance()
    +1015            elif self._peek.isidentifier():
    +1016                number_text = self._text
    +1017                literal = ""
    +1018
    +1019                while self._peek.strip() and self._peek not in self.SINGLE_TOKENS:
    +1020                    literal += self._peek.upper()
    +1021                    self._advance()
    +1022
    +1023                token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal, ""))
    +1024
    +1025                if token_type:
    +1026                    self._add(TokenType.NUMBER, number_text)
    +1027                    self._add(TokenType.DCOLON, "::")
    +1028                    return self._add(token_type, literal)
    +1029                elif self.IDENTIFIERS_CAN_START_WITH_DIGIT:
    +1030                    return self._add(TokenType.VAR)
    +1031
    +1032                self._add(TokenType.NUMBER, number_text)
    +1033                return self._advance(-len(literal))
    +1034            else:
    +1035                return self._add(TokenType.NUMBER)
    +1036
    +1037    def _scan_bits(self) -> None:
    +1038        self._advance()
    +1039        value = self._extract_value()
    +1040        try:
    +1041            # If `value` can't be converted to a binary, fallback to tokenizing it as an identifier
    +1042            int(value, 2)
    +1043            self._add(TokenType.BIT_STRING, value[2:])  # Drop the 0b
    +1044        except ValueError:
    +1045            self._add(TokenType.IDENTIFIER)
    +1046
    +1047    def _scan_hex(self) -> None:
    +1048        self._advance()
    +1049        value = self._extract_value()
    +1050        try:
    +1051            # If `value` can't be converted to a hex, fallback to tokenizing it as an identifier
    +1052            int(value, 16)
    +1053            self._add(TokenType.HEX_STRING, value[2:])  # Drop the 0x
    +1054        except ValueError:
    +1055            self._add(TokenType.IDENTIFIER)
    +1056
    +1057    def _extract_value(self) -> str:
    +1058        while True:
    +1059            char = self._peek.strip()
    +1060            if char and char not in self.SINGLE_TOKENS:
    +1061                self._advance(alnum=True)
    +1062            else:
    +1063                break
    +1064
    +1065        return self._text
     1066
    -1067        self._advance(len(start))
    -1068        text = self._extract_string(end)
    -1069
    -1070        if base:
    -1071            try:
    -1072                int(text, base)
    -1073            except:
    -1074                raise RuntimeError(
    -1075                    f"Numeric string contains invalid characters from {self._line}:{self._start}"
    -1076                )
    -1077        else:
    -1078            text = text.encode(self.ENCODE).decode(self.ENCODE) if self.ENCODE else text
    -1079
    -1080        self._add(token_type, text)
    -1081        return True
    +1067    def _scan_string(self, start: str) -> bool:
    +1068        base = None
    +1069        token_type = TokenType.STRING
    +1070
    +1071        if start in self._QUOTES:
    +1072            end = self._QUOTES[start]
    +1073        elif start in self._FORMAT_STRINGS:
    +1074            end, token_type = self._FORMAT_STRINGS[start]
    +1075
    +1076            if token_type == TokenType.HEX_STRING:
    +1077                base = 16
    +1078            elif token_type == TokenType.BIT_STRING:
    +1079                base = 2
    +1080        else:
    +1081            return False
     1082
    -1083    def _scan_identifier(self, identifier_end: str) -> None:
    -1084        self._advance()
    -1085        text = self._extract_string(identifier_end, self._IDENTIFIER_ESCAPES)
    -1086        self._add(TokenType.IDENTIFIER, text)
    -1087
    -1088    def _scan_var(self) -> None:
    -1089        while True:
    -1090            char = self._peek.strip()
    -1091            if char and (char in self.VAR_SINGLE_TOKENS or char not in self.SINGLE_TOKENS):
    -1092                self._advance(alnum=True)
    -1093            else:
    -1094                break
    +1083        self._advance(len(start))
    +1084        text = self._extract_string(end)
    +1085
    +1086        if base:
    +1087            try:
    +1088                int(text, base)
    +1089            except:
    +1090                raise RuntimeError(
    +1091                    f"Numeric string contains invalid characters from {self._line}:{self._start}"
    +1092                )
    +1093        else:
    +1094            text = text.encode(self.ENCODE).decode(self.ENCODE) if self.ENCODE else text
     1095
    -1096        self._add(
    -1097            TokenType.VAR
    -1098            if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER
    -1099            else self.KEYWORDS.get(self._text.upper(), TokenType.VAR)
    -1100        )
    -1101
    -1102    def _extract_string(self, delimiter: str, escapes=None) -> str:
    -1103        text = ""
    -1104        delim_size = len(delimiter)
    -1105        escapes = self._STRING_ESCAPES if escapes is None else escapes
    -1106
    -1107        while True:
    -1108            if self._char in escapes and (self._peek == delimiter or self._peek in escapes):
    -1109                if self._peek == delimiter:
    -1110                    text += self._peek
    -1111                else:
    -1112                    text += self._char + self._peek
    -1113
    -1114                if self._current + 1 < self.size:
    -1115                    self._advance(2)
    -1116                else:
    -1117                    raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._current}")
    -1118            else:
    -1119                if self._chars(delim_size) == delimiter:
    -1120                    if delim_size > 1:
    -1121                        self._advance(delim_size - 1)
    -1122                    break
    -1123
    -1124                if self._end:
    -1125                    raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._start}")
    -1126
    -1127                current = self._current - 1
    -1128                self._advance(alnum=True)
    -1129                text += self.sql[current : self._current - 1]
    -1130
    -1131        return text
    +1096        self._add(token_type, text)
    +1097        return True
    +1098
    +1099    def _scan_identifier(self, identifier_end: str) -> None:
    +1100        self._advance()
    +1101        text = self._extract_string(identifier_end, self._IDENTIFIER_ESCAPES)
    +1102        self._add(TokenType.IDENTIFIER, text)
    +1103
    +1104    def _scan_var(self) -> None:
    +1105        while True:
    +1106            char = self._peek.strip()
    +1107            if char and (char in self.VAR_SINGLE_TOKENS or char not in self.SINGLE_TOKENS):
    +1108                self._advance(alnum=True)
    +1109            else:
    +1110                break
    +1111
    +1112        self._add(
    +1113            TokenType.VAR
    +1114            if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER
    +1115            else self.KEYWORDS.get(self._text.upper(), TokenType.VAR)
    +1116        )
    +1117
    +1118    def _extract_string(self, delimiter: str, escapes=None) -> str:
    +1119        text = ""
    +1120        delim_size = len(delimiter)
    +1121        escapes = self._STRING_ESCAPES if escapes is None else escapes
    +1122
    +1123        while True:
    +1124            if self._char in escapes and (self._peek == delimiter or self._peek in escapes):
    +1125                if self._peek == delimiter:
    +1126                    text += self._peek
    +1127                else:
    +1128                    text += self._char + self._peek
    +1129
    +1130                if self._current + 1 < self.size:
    +1131                    self._advance(2)
    +1132                else:
    +1133                    raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._current}")
    +1134            else:
    +1135                if self._chars(delim_size) == delimiter:
    +1136                    if delim_size > 1:
    +1137                        self._advance(delim_size - 1)
    +1138                    break
    +1139
    +1140                if self._end:
    +1141                    raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._start}")
    +1142
    +1143                current = self._current - 1
    +1144                self._advance(alnum=True)
    +1145                text += self.sql[current : self._current - 1]
    +1146
    +1147        return text
     
    @@ -6823,20 +6872,20 @@
    -
    758    def reset(self) -> None:
    -759        self.sql = ""
    -760        self.size = 0
    -761        self.tokens: t.List[Token] = []
    -762        self._start = 0
    -763        self._current = 0
    -764        self._line = 1
    -765        self._col = 0
    -766        self._comments: t.List[str] = []
    -767
    -768        self._char = ""
    -769        self._end = False
    -770        self._peek = ""
    -771        self._prev_token_line = -1
    +            
    764    def reset(self) -> None:
    +765        self.sql = ""
    +766        self.size = 0
    +767        self.tokens: t.List[Token] = []
    +768        self._start = 0
    +769        self._current = 0
    +770        self._line = 1
    +771        self._col = 0
    +772        self._comments: t.List[str] = []
    +773
    +774        self._char = ""
    +775        self._end = False
    +776        self._peek = ""
    +777        self._prev_token_line = -1
     
    @@ -6854,21 +6903,21 @@
    -
    773    def tokenize(self, sql: str) -> t.List[Token]:
    -774        """Returns a list of tokens corresponding to the SQL string `sql`."""
    -775        self.reset()
    -776        self.sql = sql
    -777        self.size = len(sql)
    -778
    -779        try:
    -780            self._scan()
    -781        except Exception as e:
    -782            start = max(self._current - 50, 0)
    -783            end = min(self._current + 50, self.size - 1)
    -784            context = self.sql[start:end]
    -785            raise ValueError(f"Error tokenizing '{context}'") from e
    -786
    -787        return self.tokens
    +            
    779    def tokenize(self, sql: str) -> t.List[Token]:
    +780        """Returns a list of tokens corresponding to the SQL string `sql`."""
    +781        self.reset()
    +782        self.sql = sql
    +783        self.size = len(sql)
    +784
    +785        try:
    +786            self._scan()
    +787        except Exception as e:
    +788            start = max(self._current - 50, 0)
    +789            end = min(self._current + 50, self.size - 1)
    +790            context = self.sql[start:end]
    +791            raise ValueError(f"Error tokenizing '{context}'") from e
    +792
    +793        return self.tokens
     
    @@ -6876,6 +6925,28 @@
    + +
    + +
    + + def + peek(self, i: int = 0) -> str: + + + +
    + +
    861    def peek(self, i: int = 0) -> str:
    +862        i = self._current + i
    +863        if i < self.size:
    +864            return self.sql[i]
    +865        return ""
    +
    + + + +
    diff --git a/sqlglot/dialects/bigquery.py b/sqlglot/dialects/bigquery.py index 5b10852..2166e65 100644 --- a/sqlglot/dialects/bigquery.py +++ b/sqlglot/dialects/bigquery.py @@ -7,6 +7,7 @@ from sqlglot import exp, generator, parser, tokens, transforms from sqlglot.dialects.dialect import ( Dialect, datestrtodate_sql, + format_time_lambda, inline_array_sql, max_or_greatest, min_or_least, @@ -103,16 +104,26 @@ def _unqualify_unnest(expression: exp.Expression) -> exp.Expression: class BigQuery(Dialect): - unnest_column_only = True - time_mapping = { - "%M": "%-M", - "%d": "%-d", - "%m": "%-m", - "%y": "%-y", - "%H": "%-H", - "%I": "%-I", - "%S": "%-S", - "%j": "%-j", + UNNEST_COLUMN_ONLY = True + + TIME_MAPPING = { + "%D": "%m/%d/%y", + } + + FORMAT_MAPPING = { + "DD": "%d", + "MM": "%m", + "MON": "%b", + "MONTH": "%B", + "YYYY": "%Y", + "YY": "%y", + "HH": "%I", + "HH12": "%I", + "HH24": "%H", + "MI": "%M", + "SS": "%S", + "SSSSS": "%f", + "TZH": "%z", } class Tokenizer(tokens.Tokenizer): @@ -142,6 +153,7 @@ class BigQuery(Dialect): "FLOAT64": TokenType.DOUBLE, "INT64": TokenType.BIGINT, "RECORD": TokenType.STRUCT, + "TIMESTAMP": TokenType.TIMESTAMPTZ, "NOT DETERMINISTIC": TokenType.VOLATILE, "UNKNOWN": TokenType.NULL, } @@ -155,13 +167,21 @@ class BigQuery(Dialect): FUNCTIONS = { **parser.Parser.FUNCTIONS, + "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), + "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), "DATE_TRUNC": lambda args: exp.DateTrunc( unit=exp.Literal.string(str(seq_get(args, 1))), this=seq_get(args, 0), ), - "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), + "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)), + "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")( + [seq_get(args, 1), seq_get(args, 0)] + ), + "PARSE_TIMESTAMP": lambda args: format_time_lambda(exp.StrToTime, "bigquery")( + [seq_get(args, 1), seq_get(args, 0)] + ), "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( this=seq_get(args, 0), @@ -172,15 +192,15 @@ class BigQuery(Dialect): if re.compile(str(seq_get(args, 1))).groups == 1 else None, ), + "SPLIT": lambda args: exp.Split( + # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split + this=seq_get(args, 0), + expression=seq_get(args, 1) or exp.Literal.string(","), + ), "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), - "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), - "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), - "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), + "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), - "PARSE_TIMESTAMP": lambda args: exp.StrToTime( - this=seq_get(args, 1), format=seq_get(args, 0) - ), } FUNCTION_PARSERS = { @@ -274,9 +294,18 @@ class BigQuery(Dialect): exp.IntDiv: rename_func("DIV"), exp.Max: max_or_greatest, exp.Min: min_or_least, + exp.RegexpExtract: lambda self, e: self.func( + "REGEXP_EXTRACT", + e.this, + e.expression, + e.args.get("position"), + e.args.get("occurrence"), + ), + exp.RegexpLike: rename_func("REGEXP_CONTAINS"), exp.Select: transforms.preprocess( [_unqualify_unnest, transforms.eliminate_distinct_on] ), + exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})", exp.StrToTime: lambda self, e: f"PARSE_TIMESTAMP({self.format_time(e)}, {self.sql(e, 'this')})", exp.TimeAdd: _date_add_sql("TIME", "ADD"), exp.TimeSub: _date_add_sql("TIME", "SUB"), @@ -295,7 +324,6 @@ class BigQuery(Dialect): exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" if e.name == "IMMUTABLE" else "NOT DETERMINISTIC", - exp.RegexpLike: rename_func("REGEXP_CONTAINS"), } TYPE_MAPPING = { @@ -315,6 +343,7 @@ class BigQuery(Dialect): exp.DataType.Type.TEXT: "STRING", exp.DataType.Type.TIMESTAMP: "DATETIME", exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", + exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", exp.DataType.Type.TINYINT: "INT64", exp.DataType.Type.VARBINARY: "BYTES", exp.DataType.Type.VARCHAR: "STRING", diff --git a/sqlglot/dialects/clickhouse.py b/sqlglot/dialects/clickhouse.py index fc48379..cfa9a7e 100644 --- a/sqlglot/dialects/clickhouse.py +++ b/sqlglot/dialects/clickhouse.py @@ -21,8 +21,9 @@ def _lower_func(sql: str) -> str: class ClickHouse(Dialect): - normalize_functions = None - null_ordering = "nulls_are_last" + NORMALIZE_FUNCTIONS: bool | str = False + NULL_ORDERING = "nulls_are_last" + STRICT_STRING_CONCAT = True class Tokenizer(tokens.Tokenizer): COMMENTS = ["--", "#", "#!", ("/*", "*/")] @@ -163,11 +164,11 @@ class ClickHouse(Dialect): return this - def _parse_position(self, haystack_first: bool = False) -> exp.Expression: + def _parse_position(self, haystack_first: bool = False) -> exp.StrPosition: return super()._parse_position(haystack_first=True) # https://clickhouse.com/docs/en/sql-reference/statements/select/with/ - def _parse_cte(self) -> exp.Expression: + def _parse_cte(self) -> exp.CTE: index = self._index try: # WITH AS @@ -187,17 +188,19 @@ class ClickHouse(Dialect): ) -> t.Tuple[t.Optional[Token], t.Optional[Token], t.Optional[Token]]: is_global = self._match(TokenType.GLOBAL) and self._prev kind_pre = self._match_set(self.JOIN_KINDS, advance=False) and self._prev + if kind_pre: kind = self._match_set(self.JOIN_KINDS) and self._prev side = self._match_set(self.JOIN_SIDES) and self._prev return is_global, side, kind + return ( is_global, self._match_set(self.JOIN_SIDES) and self._prev, self._match_set(self.JOIN_KINDS) and self._prev, ) - def _parse_join(self, skip_join_token: bool = False) -> t.Optional[exp.Expression]: + def _parse_join(self, skip_join_token: bool = False) -> t.Optional[exp.Join]: join = super()._parse_join(skip_join_token) if join: @@ -205,9 +208,14 @@ class ClickHouse(Dialect): return join def _parse_function( - self, functions: t.Optional[t.Dict[str, t.Callable]] = None, anonymous: bool = False + self, + functions: t.Optional[t.Dict[str, t.Callable]] = None, + anonymous: bool = False, + optional_parens: bool = True, ) -> t.Optional[exp.Expression]: - func = super()._parse_function(functions, anonymous) + func = super()._parse_function( + functions=functions, anonymous=anonymous, optional_parens=optional_parens + ) if isinstance(func, exp.Anonymous): params = self._parse_func_params(func) @@ -227,10 +235,12 @@ class ClickHouse(Dialect): ) -> t.Optional[t.List[t.Optional[exp.Expression]]]: if self._match_pair(TokenType.R_PAREN, TokenType.L_PAREN): return self._parse_csv(self._parse_lambda) + if self._match(TokenType.L_PAREN): params = self._parse_csv(self._parse_lambda) self._match_r_paren(this) return params + return None def _parse_quantile(self) -> exp.Quantile: @@ -247,12 +257,12 @@ class ClickHouse(Dialect): def _parse_primary_key( self, wrapped_optional: bool = False, in_props: bool = False - ) -> exp.Expression: + ) -> exp.PrimaryKeyColumnConstraint | exp.PrimaryKey: return super()._parse_primary_key( wrapped_optional=wrapped_optional or in_props, in_props=in_props ) - def _parse_on_property(self) -> t.Optional[exp.Property]: + def _parse_on_property(self) -> t.Optional[exp.Expression]: index = self._index if self._match_text_seq("CLUSTER"): this = self._parse_id_var() @@ -329,6 +339,16 @@ class ClickHouse(Dialect): "NAMED COLLECTION", } + def safeconcat_sql(self, expression: exp.SafeConcat) -> str: + # Clickhouse errors out if we try to cast a NULL value to TEXT + return self.func( + "CONCAT", + *[ + exp.func("if", e.is_(exp.null()), e, exp.cast(e, "text")) + for e in expression.expressions + ], + ) + def cte_sql(self, expression: exp.CTE) -> str: if isinstance(expression.this, exp.Alias): return self.sql(expression, "this") diff --git a/sqlglot/dialects/dialect.py b/sqlglot/dialects/dialect.py index 4958bc6..f5d523b 100644 --- a/sqlglot/dialects/dialect.py +++ b/sqlglot/dialects/dialect.py @@ -25,6 +25,8 @@ class Dialects(str, Enum): BIGQUERY = "bigquery" CLICKHOUSE = "clickhouse" + DATABRICKS = "databricks" + DRILL = "drill" DUCKDB = "duckdb" HIVE = "hive" MYSQL = "mysql" @@ -38,11 +40,9 @@ class Dialects(str, Enum): SQLITE = "sqlite" STARROCKS = "starrocks" TABLEAU = "tableau" + TERADATA = "teradata" TRINO = "trino" TSQL = "tsql" - DATABRICKS = "databricks" - DRILL = "drill" - TERADATA = "teradata" class _Dialect(type): @@ -76,16 +76,19 @@ class _Dialect(type): enum = Dialects.__members__.get(clsname.upper()) cls.classes[enum.value if enum is not None else clsname.lower()] = klass - klass.time_trie = new_trie(klass.time_mapping) - klass.inverse_time_mapping = {v: k for k, v in klass.time_mapping.items()} - klass.inverse_time_trie = new_trie(klass.inverse_time_mapping) + klass.TIME_TRIE = new_trie(klass.TIME_MAPPING) + klass.FORMAT_TRIE = ( + new_trie(klass.FORMAT_MAPPING) if klass.FORMAT_MAPPING else klass.TIME_TRIE + ) + klass.INVERSE_TIME_MAPPING = {v: k for k, v in klass.TIME_MAPPING.items()} + klass.INVERSE_TIME_TRIE = new_trie(klass.INVERSE_TIME_MAPPING) klass.tokenizer_class = getattr(klass, "Tokenizer", Tokenizer) klass.parser_class = getattr(klass, "Parser", Parser) klass.generator_class = getattr(klass, "Generator", Generator) - klass.quote_start, klass.quote_end = list(klass.tokenizer_class._QUOTES.items())[0] - klass.identifier_start, klass.identifier_end = list( + klass.QUOTE_START, klass.QUOTE_END = list(klass.tokenizer_class._QUOTES.items())[0] + klass.IDENTIFIER_START, klass.IDENTIFIER_END = list( klass.tokenizer_class._IDENTIFIERS.items() )[0] @@ -99,43 +102,80 @@ class _Dialect(type): (None, None), ) - klass.bit_start, klass.bit_end = get_start_end(TokenType.BIT_STRING) - klass.hex_start, klass.hex_end = get_start_end(TokenType.HEX_STRING) - klass.byte_start, klass.byte_end = get_start_end(TokenType.BYTE_STRING) - klass.raw_start, klass.raw_end = get_start_end(TokenType.RAW_STRING) + klass.BIT_START, klass.BIT_END = get_start_end(TokenType.BIT_STRING) + klass.HEX_START, klass.HEX_END = get_start_end(TokenType.HEX_STRING) + klass.BYTE_START, klass.BYTE_END = get_start_end(TokenType.BYTE_STRING) + klass.RAW_START, klass.RAW_END = get_start_end(TokenType.RAW_STRING) - klass.tokenizer_class.identifiers_can_start_with_digit = ( - klass.identifiers_can_start_with_digit - ) + dialect_properties = { + **{ + k: v + for k, v in vars(klass).items() + if not callable(v) and not isinstance(v, classmethod) and not k.startswith("__") + }, + "STRING_ESCAPE": klass.tokenizer_class.STRING_ESCAPES[0], + "IDENTIFIER_ESCAPE": klass.tokenizer_class.IDENTIFIER_ESCAPES[0], + } + + # Pass required dialect properties to the tokenizer, parser and generator classes + for subclass in (klass.tokenizer_class, klass.parser_class, klass.generator_class): + for name, value in dialect_properties.items(): + if hasattr(subclass, name): + setattr(subclass, name, value) + + if not klass.STRICT_STRING_CONCAT: + klass.parser_class.BITWISE[TokenType.DPIPE] = exp.SafeDPipe return klass class Dialect(metaclass=_Dialect): - index_offset = 0 - unnest_column_only = False - alias_post_tablesample = False - identifiers_can_start_with_digit = False - normalize_functions: t.Optional[str] = "upper" - null_ordering = "nulls_are_small" - - date_format = "'%Y-%m-%d'" - dateint_format = "'%Y%m%d'" - time_format = "'%Y-%m-%d %H:%M:%S'" - time_mapping: t.Dict[str, str] = {} - - # autofilled - quote_start = None - quote_end = None - identifier_start = None - identifier_end = None - - time_trie = None - inverse_time_mapping = None - inverse_time_trie = None - tokenizer_class = None - parser_class = None - generator_class = None + # Determines the base index offset for arrays + INDEX_OFFSET = 0 + + # If true unnest table aliases are considered only as column aliases + UNNEST_COLUMN_ONLY = False + + # Determines whether or not the table alias comes after tablesample + ALIAS_POST_TABLESAMPLE = False + + # Determines whether or not an unquoted identifier can start with a digit + IDENTIFIERS_CAN_START_WITH_DIGIT = False + + # Determines whether or not CONCAT's arguments must be strings + STRICT_STRING_CONCAT = False + + # Determines how function names are going to be normalized + NORMALIZE_FUNCTIONS: bool | str = "upper" + + # Indicates the default null ordering method to use if not explicitly set + # Options are: "nulls_are_small", "nulls_are_large", "nulls_are_last" + NULL_ORDERING = "nulls_are_small" + + DATE_FORMAT = "'%Y-%m-%d'" + DATEINT_FORMAT = "'%Y%m%d'" + TIME_FORMAT = "'%Y-%m-%d %H:%M:%S'" + + # Custom time mappings in which the key represents dialect time format + # and the value represents a python time format + TIME_MAPPING: t.Dict[str, str] = {} + + # https://cloud.google.com/bigquery/docs/reference/standard-sql/format-elements#format_model_rules_date_time + # https://docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Data-Type-Conversions/Character-to-DATE-Conversion/Forcing-a-FORMAT-on-CAST-for-Converting-Character-to-DATE + # special syntax cast(x as date format 'yyyy') defaults to time_mapping + FORMAT_MAPPING: t.Dict[str, str] = {} + + # Autofilled + tokenizer_class = Tokenizer + parser_class = Parser + generator_class = Generator + + # A trie of the time_mapping keys + TIME_TRIE: t.Dict = {} + FORMAT_TRIE: t.Dict = {} + + INVERSE_TIME_MAPPING: t.Dict[str, str] = {} + INVERSE_TIME_TRIE: t.Dict = {} def __eq__(self, other: t.Any) -> bool: return type(self) == other @@ -164,20 +204,13 @@ class Dialect(metaclass=_Dialect): ) -> t.Optional[exp.Expression]: if isinstance(expression, str): return exp.Literal.string( - format_time( - expression[1:-1], # the time formats are quoted - cls.time_mapping, - cls.time_trie, - ) + # the time formats are quoted + format_time(expression[1:-1], cls.TIME_MAPPING, cls.TIME_TRIE) ) + if expression and expression.is_string: - return exp.Literal.string( - format_time( - expression.this, - cls.time_mapping, - cls.time_trie, - ) - ) + return exp.Literal.string(format_time(expression.this, cls.TIME_MAPPING, cls.TIME_TRIE)) + return expression def parse(self, sql: str, **opts) -> t.List[t.Optional[exp.Expression]]: @@ -200,48 +233,14 @@ class Dialect(metaclass=_Dialect): @property def tokenizer(self) -> Tokenizer: if not hasattr(self, "_tokenizer"): - self._tokenizer = self.tokenizer_class() # type: ignore + self._tokenizer = self.tokenizer_class() return self._tokenizer def parser(self, **opts) -> Parser: - return self.parser_class( # type: ignore - **{ - "index_offset": self.index_offset, - "unnest_column_only": self.unnest_column_only, - "alias_post_tablesample": self.alias_post_tablesample, - "null_ordering": self.null_ordering, - **opts, - }, - ) + return self.parser_class(**opts) def generator(self, **opts) -> Generator: - return self.generator_class( # type: ignore - **{ - "quote_start": self.quote_start, - "quote_end": self.quote_end, - "bit_start": self.bit_start, - "bit_end": self.bit_end, - "hex_start": self.hex_start, - "hex_end": self.hex_end, - "byte_start": self.byte_start, - "byte_end": self.byte_end, - "raw_start": self.raw_start, - "raw_end": self.raw_end, - "identifier_start": self.identifier_start, - "identifier_end": self.identifier_end, - "string_escape": self.tokenizer_class.STRING_ESCAPES[0], - "identifier_escape": self.tokenizer_class.IDENTIFIER_ESCAPES[0], - "index_offset": self.index_offset, - "time_mapping": self.inverse_time_mapping, - "time_trie": self.inverse_time_trie, - "unnest_column_only": self.unnest_column_only, - "alias_post_tablesample": self.alias_post_tablesample, - "identifiers_can_start_with_digit": self.identifiers_can_start_with_digit, - "normalize_functions": self.normalize_functions, - "null_ordering": self.null_ordering, - **opts, - } - ) + return self.generator_class(**opts) DialectType = t.Union[str, Dialect, t.Type[Dialect], None] @@ -279,10 +278,7 @@ def inline_array_sql(self: Generator, expression: exp.Array) -> str: def no_ilike_sql(self: Generator, expression: exp.ILike) -> str: return self.like_sql( - exp.Like( - this=exp.Lower(this=expression.this), - expression=expression.args["expression"], - ) + exp.Like(this=exp.Lower(this=expression.this), expression=expression.expression) ) @@ -359,6 +355,7 @@ def var_map_sql( for key, value in zip(keys.expressions, values.expressions): args.append(self.sql(key)) args.append(self.sql(value)) + return self.func(map_func_name, *args) @@ -381,7 +378,7 @@ def format_time_lambda( this=seq_get(args, 0), format=Dialect[dialect].format_time( seq_get(args, 1) - or (Dialect[dialect].time_format if default is True else default or None) + or (Dialect[dialect].TIME_FORMAT if default is True else default or None) ), ) @@ -437,9 +434,7 @@ def parse_date_delta_with_interval( expression = exp.Literal.number(expression.this) return expression_class( - this=args[0], - expression=expression, - unit=exp.Literal.string(interval.text("unit")), + this=args[0], expression=expression, unit=exp.Literal.string(interval.text("unit")) ) return func @@ -462,9 +457,7 @@ def timestamptrunc_sql(self: Generator, expression: exp.TimestampTrunc) -> str: def locate_to_strposition(args: t.List) -> exp.Expression: return exp.StrPosition( - this=seq_get(args, 1), - substr=seq_get(args, 0), - position=seq_get(args, 2), + this=seq_get(args, 1), substr=seq_get(args, 0), position=seq_get(args, 2) ) @@ -546,13 +539,21 @@ def ts_or_ds_to_date_sql(dialect: str) -> t.Callable: def _ts_or_ds_to_date_sql(self: Generator, expression: exp.TsOrDsToDate) -> str: _dialect = Dialect.get_or_raise(dialect) time_format = self.format_time(expression) - if time_format and time_format not in (_dialect.time_format, _dialect.date_format): + if time_format and time_format not in (_dialect.TIME_FORMAT, _dialect.DATE_FORMAT): return f"CAST({str_to_time_sql(self, expression)} AS DATE)" return f"CAST({self.sql(expression, 'this')} AS DATE)" return _ts_or_ds_to_date_sql +def concat_to_dpipe_sql(self: Generator, expression: exp.Concat | exp.SafeConcat) -> str: + this, *rest_args = expression.expressions + for arg in rest_args: + this = exp.DPipe(this=this, expression=arg) + + return self.sql(this) + + # Spark, DuckDB use (almost) the same naming scheme for the output columns of the PIVOT operator def pivot_column_names(aggregations: t.List[exp.Expression], dialect: DialectType) -> t.List[str]: names = [] diff --git a/sqlglot/dialects/drill.py b/sqlglot/dialects/drill.py index 924b979..3cca986 100644 --- a/sqlglot/dialects/drill.py +++ b/sqlglot/dialects/drill.py @@ -16,21 +16,10 @@ from sqlglot.dialects.dialect import ( ) -def _str_to_time_sql(self: generator.Generator, expression: exp.TsOrDsToDate) -> str: - return f"STRPTIME({self.sql(expression, 'this')}, {self.format_time(expression)})" - - -def _ts_or_ds_to_date_sql(self: generator.Generator, expression: exp.TsOrDsToDate) -> str: - time_format = self.format_time(expression) - if time_format and time_format not in (Drill.time_format, Drill.date_format): - return f"CAST({_str_to_time_sql(self, expression)} AS DATE)" - return f"CAST({self.sql(expression, 'this')} AS DATE)" - - def _date_add_sql(kind: str) -> t.Callable[[generator.Generator, exp.DateAdd | exp.DateSub], str]: def func(self: generator.Generator, expression: exp.DateAdd | exp.DateSub) -> str: this = self.sql(expression, "this") - unit = exp.Var(this=expression.text("unit").upper() or "DAY") + unit = exp.var(expression.text("unit").upper() or "DAY") return ( f"DATE_{kind}({this}, {self.sql(exp.Interval(this=expression.expression, unit=unit))})" ) @@ -41,19 +30,19 @@ def _date_add_sql(kind: str) -> t.Callable[[generator.Generator, exp.DateAdd | e def _str_to_date(self: generator.Generator, expression: exp.StrToDate) -> str: this = self.sql(expression, "this") time_format = self.format_time(expression) - if time_format == Drill.date_format: + if time_format == Drill.DATE_FORMAT: return f"CAST({this} AS DATE)" return f"TO_DATE({this}, {time_format})" class Drill(Dialect): - normalize_functions = None - null_ordering = "nulls_are_last" - date_format = "'yyyy-MM-dd'" - dateint_format = "'yyyyMMdd'" - time_format = "'yyyy-MM-dd HH:mm:ss'" + NORMALIZE_FUNCTIONS: bool | str = False + NULL_ORDERING = "nulls_are_last" + DATE_FORMAT = "'yyyy-MM-dd'" + DATEINT_FORMAT = "'yyyyMMdd'" + TIME_FORMAT = "'yyyy-MM-dd HH:mm:ss'" - time_mapping = { + TIME_MAPPING = { "y": "%Y", "Y": "%Y", "YYYY": "%Y", @@ -93,6 +82,7 @@ class Drill(Dialect): class Parser(parser.Parser): STRICT_CAST = False + CONCAT_NULL_OUTPUTS_STRING = True FUNCTIONS = { **parser.Parser.FUNCTIONS, @@ -135,8 +125,8 @@ class Drill(Dialect): exp.DateAdd: _date_add_sql("ADD"), exp.DateStrToDate: datestrtodate_sql, exp.DateSub: _date_add_sql("SUB"), - exp.DateToDi: lambda self, e: f"CAST(TO_DATE({self.sql(e, 'this')}, {Drill.dateint_format}) AS INT)", - exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS VARCHAR), {Drill.dateint_format})", + exp.DateToDi: lambda self, e: f"CAST(TO_DATE({self.sql(e, 'this')}, {Drill.DATEINT_FORMAT}) AS INT)", + exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS VARCHAR), {Drill.DATEINT_FORMAT})", exp.If: lambda self, e: f"`IF`({self.format_args(e.this, e.args.get('true'), e.args.get('false'))})", exp.ILike: lambda self, e: f" {self.sql(e, 'this')} `ILIKE` {self.sql(e, 'expression')}", exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"), @@ -154,7 +144,7 @@ class Drill(Dialect): exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"), exp.ToChar: lambda self, e: self.function_fallback_sql(e), exp.TryCast: no_trycast_sql, - exp.TsOrDsAdd: lambda self, e: f"DATE_ADD(CAST({self.sql(e, 'this')} AS DATE), {self.sql(exp.Interval(this=e.expression, unit=exp.Var(this='DAY')))})", + exp.TsOrDsAdd: lambda self, e: f"DATE_ADD(CAST({self.sql(e, 'this')} AS DATE), {self.sql(exp.Interval(this=e.expression, unit=exp.var('DAY')))})", exp.TsOrDsToDate: ts_or_ds_to_date_sql("drill"), exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)", } diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py index f31da73..f0c1820 100644 --- a/sqlglot/dialects/duckdb.py +++ b/sqlglot/dialects/duckdb.py @@ -56,11 +56,7 @@ def _sort_array_reverse(args: t.List) -> exp.Expression: def _parse_date_diff(args: t.List) -> exp.Expression: - return exp.DateDiff( - this=seq_get(args, 2), - expression=seq_get(args, 1), - unit=seq_get(args, 0), - ) + return exp.DateDiff(this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0)) def _struct_sql(self: generator.Generator, expression: exp.Struct) -> str: @@ -90,7 +86,7 @@ def _regexp_extract_sql(self: generator.Generator, expression: exp.RegexpExtract class DuckDB(Dialect): - null_ordering = "nulls_are_last" + NULL_ORDERING = "nulls_are_last" class Tokenizer(tokens.Tokenizer): KEYWORDS = { @@ -118,6 +114,8 @@ class DuckDB(Dialect): } class Parser(parser.Parser): + CONCAT_NULL_OUTPUTS_STRING = True + FUNCTIONS = { **parser.Parser.FUNCTIONS, "ARRAY_LENGTH": exp.ArraySize.from_arg_list, @@ -127,10 +125,7 @@ class DuckDB(Dialect): "DATE_DIFF": _parse_date_diff, "EPOCH": exp.TimeToUnix.from_arg_list, "EPOCH_MS": lambda args: exp.UnixToTime( - this=exp.Div( - this=seq_get(args, 0), - expression=exp.Literal.number(1000), - ) + this=exp.Div(this=seq_get(args, 0), expression=exp.Literal.number(1000)) ), "LIST_REVERSE_SORT": _sort_array_reverse, "LIST_SORT": exp.SortArray.from_arg_list, @@ -191,8 +186,8 @@ class DuckDB(Dialect): "DATE_DIFF", f"'{e.args.get('unit', 'day')}'", e.expression, e.this ), exp.DateStrToDate: datestrtodate_sql, - exp.DateToDi: lambda self, e: f"CAST(STRFTIME({self.sql(e, 'this')}, {DuckDB.dateint_format}) AS INT)", - exp.DiToDate: lambda self, e: f"CAST(STRPTIME(CAST({self.sql(e, 'this')} AS TEXT), {DuckDB.dateint_format}) AS DATE)", + exp.DateToDi: lambda self, e: f"CAST(STRFTIME({self.sql(e, 'this')}, {DuckDB.DATEINT_FORMAT}) AS INT)", + exp.DiToDate: lambda self, e: f"CAST(STRPTIME(CAST({self.sql(e, 'this')} AS TEXT), {DuckDB.DATEINT_FORMAT}) AS DATE)", exp.Explode: rename_func("UNNEST"), exp.IntDiv: lambda self, e: self.binary(e, "//"), exp.JSONExtract: arrow_json_extract_sql, @@ -242,11 +237,27 @@ class DuckDB(Dialect): STAR_MAPPING = {**generator.Generator.STAR_MAPPING, "except": "EXCLUDE"} + UNWRAPPED_INTERVAL_VALUES = (exp.Column, exp.Literal, exp.Paren) + PROPERTIES_LOCATION = { **generator.Generator.PROPERTIES_LOCATION, exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, } + def interval_sql(self, expression: exp.Interval) -> str: + multiplier: t.Optional[int] = None + unit = expression.text("unit").lower() + + if unit.startswith("week"): + multiplier = 7 + if unit.startswith("quarter"): + multiplier = 90 + + if multiplier: + return f"({multiplier} * {super().interval_sql(exp.Interval(this=expression.this, unit=exp.var('day')))})" + + return super().interval_sql(expression) + def tablesample_sql( self, expression: exp.TableSample, seed_prefix: str = "SEED", sep: str = " AS " ) -> str: diff --git a/sqlglot/dialects/hive.py b/sqlglot/dialects/hive.py index 650a1e1..8847119 100644 --- a/sqlglot/dialects/hive.py +++ b/sqlglot/dialects/hive.py @@ -80,12 +80,12 @@ def _date_diff_sql(self: generator.Generator, expression: exp.DateDiff) -> str: _, multiplier = DATE_DELTA_INTERVAL.get(unit, ("", 1)) multiplier_sql = f" / {multiplier}" if multiplier > 1 else "" diff_sql = f"{sql_func}({self.format_args(expression.this, expression.expression)})" + return f"{diff_sql}{multiplier_sql}" def _json_format_sql(self: generator.Generator, expression: exp.JSONFormat) -> str: this = expression.this - if not this.type: from sqlglot.optimizer.annotate_types import annotate_types @@ -113,7 +113,7 @@ def _str_to_unix_sql(self: generator.Generator, expression: exp.StrToUnix) -> st def _str_to_date_sql(self: generator.Generator, expression: exp.StrToDate) -> str: this = self.sql(expression, "this") time_format = self.format_time(expression) - if time_format not in (Hive.time_format, Hive.date_format): + if time_format not in (Hive.TIME_FORMAT, Hive.DATE_FORMAT): this = f"FROM_UNIXTIME(UNIX_TIMESTAMP({this}, {time_format}))" return f"CAST({this} AS DATE)" @@ -121,7 +121,7 @@ def _str_to_date_sql(self: generator.Generator, expression: exp.StrToDate) -> st def _str_to_time_sql(self: generator.Generator, expression: exp.StrToTime) -> str: this = self.sql(expression, "this") time_format = self.format_time(expression) - if time_format not in (Hive.time_format, Hive.date_format): + if time_format not in (Hive.TIME_FORMAT, Hive.DATE_FORMAT): this = f"FROM_UNIXTIME(UNIX_TIMESTAMP({this}, {time_format}))" return f"CAST({this} AS TIMESTAMP)" @@ -130,7 +130,7 @@ def _time_format( self: generator.Generator, expression: exp.UnixToStr | exp.StrToUnix ) -> t.Optional[str]: time_format = self.format_time(expression) - if time_format == Hive.time_format: + if time_format == Hive.TIME_FORMAT: return None return time_format @@ -144,16 +144,16 @@ def _time_to_str(self: generator.Generator, expression: exp.TimeToStr) -> str: def _to_date_sql(self: generator.Generator, expression: exp.TsOrDsToDate) -> str: this = self.sql(expression, "this") time_format = self.format_time(expression) - if time_format and time_format not in (Hive.time_format, Hive.date_format): + if time_format and time_format not in (Hive.TIME_FORMAT, Hive.DATE_FORMAT): return f"TO_DATE({this}, {time_format})" return f"TO_DATE({this})" class Hive(Dialect): - alias_post_tablesample = True - identifiers_can_start_with_digit = True + ALIAS_POST_TABLESAMPLE = True + IDENTIFIERS_CAN_START_WITH_DIGIT = True - time_mapping = { + TIME_MAPPING = { "y": "%Y", "Y": "%Y", "YYYY": "%Y", @@ -184,9 +184,9 @@ class Hive(Dialect): "EEEE": "%A", } - date_format = "'yyyy-MM-dd'" - dateint_format = "'yyyyMMdd'" - time_format = "'yyyy-MM-dd HH:mm:ss'" + DATE_FORMAT = "'yyyy-MM-dd'" + DATEINT_FORMAT = "'yyyyMMdd'" + TIME_FORMAT = "'yyyy-MM-dd HH:mm:ss'" class Tokenizer(tokens.Tokenizer): QUOTES = ["'", '"'] @@ -224,9 +224,7 @@ class Hive(Dialect): "BASE64": exp.ToBase64.from_arg_list, "COLLECT_LIST": exp.ArrayAgg.from_arg_list, "DATE_ADD": lambda args: exp.TsOrDsAdd( - this=seq_get(args, 0), - expression=seq_get(args, 1), - unit=exp.Literal.string("DAY"), + this=seq_get(args, 0), expression=seq_get(args, 1), unit=exp.Literal.string("DAY") ), "DATEDIFF": lambda args: exp.DateDiff( this=exp.TsOrDsToDate(this=seq_get(args, 0)), @@ -234,10 +232,7 @@ class Hive(Dialect): ), "DATE_SUB": lambda args: exp.TsOrDsAdd( this=seq_get(args, 0), - expression=exp.Mul( - this=seq_get(args, 1), - expression=exp.Literal.number(-1), - ), + expression=exp.Mul(this=seq_get(args, 1), expression=exp.Literal.number(-1)), unit=exp.Literal.string("DAY"), ), "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")( @@ -349,8 +344,8 @@ class Hive(Dialect): exp.DateDiff: _date_diff_sql, exp.DateStrToDate: rename_func("TO_DATE"), exp.DateSub: _add_date_sql, - exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.dateint_format}) AS INT)", - exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.dateint_format})", + exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.DATEINT_FORMAT}) AS INT)", + exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.DATEINT_FORMAT})", exp.FileFormatProperty: lambda self, e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}", exp.FromBase64: rename_func("UNBASE64"), exp.If: if_sql, @@ -415,10 +410,7 @@ class Hive(Dialect): ) def with_properties(self, properties: exp.Properties) -> str: - return self.properties( - properties, - prefix=self.seg("TBLPROPERTIES"), - ) + return self.properties(properties, prefix=self.seg("TBLPROPERTIES")) def datatype_sql(self, expression: exp.DataType) -> str: if ( diff --git a/sqlglot/dialects/mysql.py b/sqlglot/dialects/mysql.py index 75023ff..d2462e1 100644 --- a/sqlglot/dialects/mysql.py +++ b/sqlglot/dialects/mysql.py @@ -94,10 +94,10 @@ def _date_add_sql(kind: str) -> t.Callable[[generator.Generator, exp.DateAdd | e class MySQL(Dialect): - time_format = "'%Y-%m-%d %T'" + TIME_FORMAT = "'%Y-%m-%d %T'" # https://prestodb.io/docs/current/functions/datetime.html#mysql-date-functions - time_mapping = { + TIME_MAPPING = { "%M": "%B", "%c": "%-m", "%e": "%-d", @@ -128,6 +128,7 @@ class MySQL(Dialect): "MEDIUMBLOB": TokenType.MEDIUMBLOB, "MEDIUMTEXT": TokenType.MEDIUMTEXT, "SEPARATOR": TokenType.SEPARATOR, + "ENUM": TokenType.ENUM, "START": TokenType.BEGIN, "_ARMSCII8": TokenType.INTRODUCER, "_ASCII": TokenType.INTRODUCER, @@ -279,6 +280,16 @@ class MySQL(Dialect): "SWAPS", } + TYPE_TOKENS = { + *parser.Parser.TYPE_TOKENS, + TokenType.SET, + } + + ENUM_TYPE_TOKENS = { + *parser.Parser.ENUM_TYPE_TOKENS, + TokenType.SET, + } + LOG_DEFAULTS_TO_LN = True def _parse_show_mysql( @@ -372,12 +383,7 @@ class MySQL(Dialect): else: collate = None - return self.expression( - exp.SetItem, - this=charset, - collate=collate, - kind="NAMES", - ) + return self.expression(exp.SetItem, this=charset, collate=collate, kind="NAMES") class Generator(generator.Generator): LOCKING_READS_SUPPORTED = True @@ -472,9 +478,7 @@ class MySQL(Dialect): def _prefixed_sql(self, prefix: str, expression: exp.Expression, arg: str) -> str: sql = self.sql(expression, arg) - if not sql: - return "" - return f" {prefix} {sql}" + return f" {prefix} {sql}" if sql else "" def _oldstyle_limit_sql(self, expression: exp.Show) -> str: limit = self.sql(expression, "limit") diff --git a/sqlglot/dialects/oracle.py b/sqlglot/dialects/oracle.py index 7722753..8d35e92 100644 --- a/sqlglot/dialects/oracle.py +++ b/sqlglot/dialects/oracle.py @@ -24,21 +24,15 @@ def _parse_xml_table(self: parser.Parser) -> exp.XMLTable: if self._match_text_seq("COLUMNS"): columns = self._parse_csv(lambda: self._parse_column_def(self._parse_field(any_token=True))) - return self.expression( - exp.XMLTable, - this=this, - passing=passing, - columns=columns, - by_ref=by_ref, - ) + return self.expression(exp.XMLTable, this=this, passing=passing, columns=columns, by_ref=by_ref) class Oracle(Dialect): - alias_post_tablesample = True + ALIAS_POST_TABLESAMPLE = True # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212 # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes - time_mapping = { + TIME_MAPPING = { "AM": "%p", # Meridian indicator with or without periods "A.M.": "%p", # Meridian indicator with or without periods "PM": "%p", # Meridian indicator with or without periods @@ -87,7 +81,7 @@ class Oracle(Dialect): column.set("join_mark", self._match(TokenType.JOIN_MARKER)) return column - def _parse_hint(self) -> t.Optional[exp.Expression]: + def _parse_hint(self) -> t.Optional[exp.Hint]: if self._match(TokenType.HINT): start = self._curr while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH): @@ -129,7 +123,7 @@ class Oracle(Dialect): exp.Group: transforms.preprocess([transforms.unalias_group]), exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */", exp.ILike: no_ilike_sql, - exp.IfNull: rename_func("NVL"), + exp.Coalesce: rename_func("NVL"), exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "), @@ -179,7 +173,6 @@ class Oracle(Dialect): "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE, "MINUS": TokenType.EXCEPT, "NVARCHAR2": TokenType.NVARCHAR, - "RETURNING": TokenType.RETURNING, "SAMPLE": TokenType.TABLE_SAMPLE, "START": TokenType.BEGIN, "TOP": TokenType.TOP, diff --git a/sqlglot/dialects/postgres.py b/sqlglot/dialects/postgres.py index 8d84024..8c2a4ab 100644 --- a/sqlglot/dialects/postgres.py +++ b/sqlglot/dialects/postgres.py @@ -183,9 +183,10 @@ def _to_timestamp(args: t.List) -> exp.Expression: class Postgres(Dialect): - null_ordering = "nulls_are_large" - time_format = "'YYYY-MM-DD HH24:MI:SS'" - time_mapping = { + INDEX_OFFSET = 1 + NULL_ORDERING = "nulls_are_large" + TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'" + TIME_MAPPING = { "AM": "%p", "PM": "%p", "D": "%u", # 1-based day of week @@ -241,7 +242,6 @@ class Postgres(Dialect): "REFRESH": TokenType.COMMAND, "REINDEX": TokenType.COMMAND, "RESET": TokenType.COMMAND, - "RETURNING": TokenType.RETURNING, "REVOKE": TokenType.COMMAND, "SERIAL": TokenType.SERIAL, "SMALLSERIAL": TokenType.SMALLSERIAL, @@ -258,6 +258,7 @@ class Postgres(Dialect): class Parser(parser.Parser): STRICT_CAST = False + CONCAT_NULL_OUTPUTS_STRING = True FUNCTIONS = { **parser.Parser.FUNCTIONS, @@ -268,6 +269,7 @@ class Postgres(Dialect): "NOW": exp.CurrentTimestamp.from_arg_list, "TO_CHAR": format_time_lambda(exp.TimeToStr, "postgres"), "TO_TIMESTAMP": _to_timestamp, + "UNNEST": exp.Explode.from_arg_list, } FUNCTION_PARSERS = { @@ -303,7 +305,7 @@ class Postgres(Dialect): value = self._parse_bitwise() if part and part.is_string: - part = exp.Var(this=part.name) + part = exp.var(part.name) return self.expression(exp.Extract, this=part, expression=value) @@ -328,6 +330,7 @@ class Postgres(Dialect): **generator.Generator.TRANSFORMS, exp.BitwiseXor: lambda self, e: self.binary(e, "#"), exp.ColumnDef: transforms.preprocess([_auto_increment_to_serial, _serial_to_generated]), + exp.Explode: rename_func("UNNEST"), exp.JSONExtract: arrow_json_extract_sql, exp.JSONExtractScalar: arrow_json_extract_scalar_sql, exp.JSONBExtract: lambda self, e: self.binary(e, "#>"), diff --git a/sqlglot/dialects/presto.py b/sqlglot/dialects/presto.py index d839864..a8a9884 100644 --- a/sqlglot/dialects/presto.py +++ b/sqlglot/dialects/presto.py @@ -102,7 +102,7 @@ def _str_to_time_sql( def _ts_or_ds_to_date_sql(self: generator.Generator, expression: exp.TsOrDsToDate) -> str: time_format = self.format_time(expression) - if time_format and time_format not in (Presto.time_format, Presto.date_format): + if time_format and time_format not in (Presto.TIME_FORMAT, Presto.DATE_FORMAT): return f"CAST({_str_to_time_sql(self, expression)} AS DATE)" return f"CAST(SUBSTR(CAST({self.sql(expression, 'this')} AS VARCHAR), 1, 10) AS DATE)" @@ -119,7 +119,7 @@ def _ts_or_ds_add_sql(self: generator.Generator, expression: exp.TsOrDsAdd) -> s exp.Literal.number(1), exp.Literal.number(10), ), - Presto.date_format, + Presto.DATE_FORMAT, ) return self.func( @@ -145,9 +145,7 @@ def _approx_percentile(args: t.List) -> exp.Expression: ) if len(args) == 3: return exp.ApproxQuantile( - this=seq_get(args, 0), - quantile=seq_get(args, 1), - accuracy=seq_get(args, 2), + this=seq_get(args, 0), quantile=seq_get(args, 1), accuracy=seq_get(args, 2) ) return exp.ApproxQuantile.from_arg_list(args) @@ -160,10 +158,8 @@ def _from_unixtime(args: t.List) -> exp.Expression: minutes=seq_get(args, 2), ) if len(args) == 2: - return exp.UnixToTime( - this=seq_get(args, 0), - zone=seq_get(args, 1), - ) + return exp.UnixToTime(this=seq_get(args, 0), zone=seq_get(args, 1)) + return exp.UnixToTime.from_arg_list(args) @@ -173,21 +169,17 @@ def _unnest_sequence(expression: exp.Expression) -> exp.Expression: unnest = exp.Unnest(expressions=[expression.this]) if expression.alias: - return exp.alias_( - unnest, - alias="_u", - table=[expression.alias], - copy=False, - ) + return exp.alias_(unnest, alias="_u", table=[expression.alias], copy=False) return unnest return expression class Presto(Dialect): - index_offset = 1 - null_ordering = "nulls_are_last" - time_format = MySQL.time_format - time_mapping = MySQL.time_mapping + INDEX_OFFSET = 1 + NULL_ORDERING = "nulls_are_last" + TIME_FORMAT = MySQL.TIME_FORMAT + TIME_MAPPING = MySQL.TIME_MAPPING + STRICT_STRING_CONCAT = True class Tokenizer(tokens.Tokenizer): KEYWORDS = { @@ -205,14 +197,10 @@ class Presto(Dialect): "CARDINALITY": exp.ArraySize.from_arg_list, "CONTAINS": exp.ArrayContains.from_arg_list, "DATE_ADD": lambda args: exp.DateAdd( - this=seq_get(args, 2), - expression=seq_get(args, 1), - unit=seq_get(args, 0), + this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0) ), "DATE_DIFF": lambda args: exp.DateDiff( - this=seq_get(args, 2), - expression=seq_get(args, 1), - unit=seq_get(args, 0), + this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0) ), "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "presto"), "DATE_PARSE": format_time_lambda(exp.StrToTime, "presto"), @@ -225,9 +213,7 @@ class Presto(Dialect): "NOW": exp.CurrentTimestamp.from_arg_list, "SEQUENCE": exp.GenerateSeries.from_arg_list, "STRPOS": lambda args: exp.StrPosition( - this=seq_get(args, 0), - substr=seq_get(args, 1), - instance=seq_get(args, 2), + this=seq_get(args, 0), substr=seq_get(args, 1), instance=seq_get(args, 2) ), "TO_UNIXTIME": exp.TimeToUnix.from_arg_list, "TO_HEX": exp.Hex.from_arg_list, @@ -242,7 +228,7 @@ class Presto(Dialect): INTERVAL_ALLOWS_PLURAL_FORM = False JOIN_HINTS = False TABLE_HINTS = False - IS_BOOL = False + IS_BOOL_ALLOWED = False STRUCT_DELIMITER = ("(", ")") PROPERTIES_LOCATION = { @@ -284,10 +270,10 @@ class Presto(Dialect): exp.DateDiff: lambda self, e: self.func( "DATE_DIFF", exp.Literal.string(e.text("unit") or "day"), e.expression, e.this ), - exp.DateStrToDate: lambda self, e: f"CAST(DATE_PARSE({self.sql(e, 'this')}, {Presto.date_format}) AS DATE)", - exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Presto.dateint_format}) AS INT)", + exp.DateStrToDate: lambda self, e: f"CAST(DATE_PARSE({self.sql(e, 'this')}, {Presto.DATE_FORMAT}) AS DATE)", + exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Presto.DATEINT_FORMAT}) AS INT)", exp.Decode: _decode_sql, - exp.DiToDate: lambda self, e: f"CAST(DATE_PARSE(CAST({self.sql(e, 'this')} AS VARCHAR), {Presto.dateint_format}) AS DATE)", + exp.DiToDate: lambda self, e: f"CAST(DATE_PARSE(CAST({self.sql(e, 'this')} AS VARCHAR), {Presto.DATEINT_FORMAT}) AS DATE)", exp.Encode: _encode_sql, exp.FileFormatProperty: lambda self, e: f"FORMAT='{e.name.upper()}'", exp.Group: transforms.preprocess([transforms.unalias_group]), @@ -322,7 +308,7 @@ class Presto(Dialect): exp.TimestampTrunc: timestamptrunc_sql, exp.TimeStrToDate: timestrtotime_sql, exp.TimeStrToTime: timestrtotime_sql, - exp.TimeStrToUnix: lambda self, e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {Presto.time_format}))", + exp.TimeStrToUnix: lambda self, e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {Presto.TIME_FORMAT}))", exp.TimeToStr: lambda self, e: f"DATE_FORMAT({self.sql(e, 'this')}, {self.format_time(e)})", exp.TimeToUnix: rename_func("TO_UNIXTIME"), exp.TryCast: transforms.preprocess([transforms.epoch_cast_to_ts]), @@ -367,8 +353,16 @@ class Presto(Dialect): to = target_type.copy() if target_type is start.to: - end = exp.Cast(this=end, to=to) + end = exp.cast(end, to) else: - start = exp.Cast(this=start, to=to) + start = exp.cast(start, to) return self.func("SEQUENCE", start, end, step) + + def offset_limit_modifiers( + self, expression: exp.Expression, fetch: bool, limit: t.Optional[exp.Fetch | exp.Limit] + ) -> t.List[str]: + return [ + self.sql(expression, "offset"), + self.sql(limit), + ] diff --git a/sqlglot/dialects/redshift.py b/sqlglot/dialects/redshift.py index b0a6774..a7e25fa 100644 --- a/sqlglot/dialects/redshift.py +++ b/sqlglot/dialects/redshift.py @@ -3,7 +3,7 @@ from __future__ import annotations import typing as t from sqlglot import exp, transforms -from sqlglot.dialects.dialect import rename_func +from sqlglot.dialects.dialect import concat_to_dpipe_sql, rename_func from sqlglot.dialects.postgres import Postgres from sqlglot.helper import seq_get from sqlglot.tokens import TokenType @@ -14,9 +14,9 @@ def _json_sql(self: Postgres.Generator, expression: exp.JSONExtract | exp.JSONEx class Redshift(Postgres): - time_format = "'YYYY-MM-DD HH:MI:SS'" - time_mapping = { - **Postgres.time_mapping, + TIME_FORMAT = "'YYYY-MM-DD HH:MI:SS'" + TIME_MAPPING = { + **Postgres.TIME_MAPPING, "MON": "%b", "HH": "%H", } @@ -51,7 +51,7 @@ class Redshift(Postgres): and this.expressions and this.expressions[0].this == exp.column("MAX") ): - this.set("expressions", [exp.Var(this="MAX")]) + this.set("expressions", [exp.var("MAX")]) return this @@ -94,6 +94,7 @@ class Redshift(Postgres): TRANSFORMS = { **Postgres.Generator.TRANSFORMS, + exp.Concat: concat_to_dpipe_sql, exp.CurrentTimestamp: lambda self, e: "SYSDATE", exp.DateAdd: lambda self, e: self.func( "DATEADD", exp.var(e.text("unit") or "day"), e.expression, e.this @@ -106,6 +107,7 @@ class Redshift(Postgres): exp.FromBase: rename_func("STRTOL"), exp.JSONExtract: _json_sql, exp.JSONExtractScalar: _json_sql, + exp.SafeConcat: concat_to_dpipe_sql, exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", exp.TsOrDsToDate: lambda self, e: self.sql(e.this), @@ -170,6 +172,6 @@ class Redshift(Postgres): precision = expression.args.get("expressions") if not precision: - expression.append("expressions", exp.Var(this="MAX")) + expression.append("expressions", exp.var("MAX")) return super().datatype_sql(expression) diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py index 821d991..148b6d8 100644 --- a/sqlglot/dialects/snowflake.py +++ b/sqlglot/dialects/snowflake.py @@ -167,10 +167,10 @@ def _parse_convert_timezone(args: t.List) -> exp.Expression: class Snowflake(Dialect): - null_ordering = "nulls_are_large" - time_format = "'yyyy-mm-dd hh24:mi:ss'" + NULL_ORDERING = "nulls_are_large" + TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'" - time_mapping = { + TIME_MAPPING = { "YYYY": "%Y", "yyyy": "%Y", "YY": "%y", @@ -210,14 +210,10 @@ class Snowflake(Dialect): "CONVERT_TIMEZONE": _parse_convert_timezone, "DATE_TRUNC": date_trunc_to_time, "DATEADD": lambda args: exp.DateAdd( - this=seq_get(args, 2), - expression=seq_get(args, 1), - unit=seq_get(args, 0), + this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0) ), "DATEDIFF": lambda args: exp.DateDiff( - this=seq_get(args, 2), - expression=seq_get(args, 1), - unit=seq_get(args, 0), + this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0) ), "DIV0": _div0_to_if, "IFF": exp.If.from_arg_list, @@ -246,9 +242,7 @@ class Snowflake(Dialect): COLUMN_OPERATORS = { **parser.Parser.COLUMN_OPERATORS, TokenType.COLON: lambda self, this, path: self.expression( - exp.Bracket, - this=this, - expressions=[path], + exp.Bracket, this=this, expressions=[path] ), } @@ -275,6 +269,7 @@ class Snowflake(Dialect): QUOTES = ["'", "$$"] STRING_ESCAPES = ["\\", "'"] HEX_STRINGS = [("x'", "'"), ("X'", "'")] + COMMENTS = ["--", "//", ("/*", "*/")] KEYWORDS = { **tokens.Tokenizer.KEYWORDS, diff --git a/sqlglot/dialects/spark2.py b/sqlglot/dialects/spark2.py index bf24240..ed6992d 100644 --- a/sqlglot/dialects/spark2.py +++ b/sqlglot/dialects/spark2.py @@ -38,7 +38,7 @@ def _parse_as_cast(to_type: str) -> t.Callable[[t.List], exp.Expression]: def _str_to_date(self: Hive.Generator, expression: exp.StrToDate) -> str: this = self.sql(expression, "this") time_format = self.format_time(expression) - if time_format == Hive.date_format: + if time_format == Hive.DATE_FORMAT: return f"TO_DATE({this})" return f"TO_DATE({this}, {time_format})" @@ -133,13 +133,13 @@ class Spark2(Hive): "WEEKOFYEAR": lambda args: exp.WeekOfYear( this=exp.TsOrDsToDate(this=seq_get(args, 0)), ), - "DATE": lambda args: exp.Cast(this=seq_get(args, 0), to=exp.DataType.build("date")), "DATE_TRUNC": lambda args: exp.TimestampTrunc( this=seq_get(args, 1), unit=exp.var(seq_get(args, 0)), ), "TRUNC": lambda args: exp.DateTrunc(unit=seq_get(args, 1), this=seq_get(args, 0)), "BOOLEAN": _parse_as_cast("boolean"), + "DATE": _parse_as_cast("date"), "DOUBLE": _parse_as_cast("double"), "FLOAT": _parse_as_cast("float"), "INT": _parse_as_cast("int"), @@ -162,11 +162,9 @@ class Spark2(Hive): def _parse_add_column(self) -> t.Optional[exp.Expression]: return self._match_text_seq("ADD", "COLUMNS") and self._parse_schema() - def _parse_drop_column(self) -> t.Optional[exp.Expression]: + def _parse_drop_column(self) -> t.Optional[exp.Drop | exp.Command]: return self._match_text_seq("DROP", "COLUMNS") and self.expression( - exp.Drop, - this=self._parse_schema(), - kind="COLUMNS", + exp.Drop, this=self._parse_schema(), kind="COLUMNS" ) def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]: diff --git a/sqlglot/dialects/sqlite.py b/sqlglot/dialects/sqlite.py index 4e800b0..3b837ea 100644 --- a/sqlglot/dialects/sqlite.py +++ b/sqlglot/dialects/sqlite.py @@ -5,6 +5,7 @@ from sqlglot.dialects.dialect import ( Dialect, arrow_json_extract_scalar_sql, arrow_json_extract_sql, + concat_to_dpipe_sql, count_if_to_sum, no_ilike_sql, no_pivot_sql, @@ -62,10 +63,6 @@ class SQLite(Dialect): IDENTIFIERS = ['"', ("[", "]"), "`"] HEX_STRINGS = [("x'", "'"), ("X'", "'"), ("0x", ""), ("0X", "")] - KEYWORDS = { - **tokens.Tokenizer.KEYWORDS, - } - class Parser(parser.Parser): FUNCTIONS = { **parser.Parser.FUNCTIONS, @@ -100,6 +97,7 @@ class SQLite(Dialect): TRANSFORMS = { **generator.Generator.TRANSFORMS, + exp.Concat: concat_to_dpipe_sql, exp.CountIf: count_if_to_sum, exp.Create: transforms.preprocess([_transform_create]), exp.CurrentDate: lambda *_: "CURRENT_DATE", @@ -116,6 +114,7 @@ class SQLite(Dialect): exp.LogicalOr: rename_func("MAX"), exp.LogicalAnd: rename_func("MIN"), exp.Pivot: no_pivot_sql, + exp.SafeConcat: concat_to_dpipe_sql, exp.Select: transforms.preprocess( [transforms.eliminate_distinct_on, transforms.eliminate_qualify] ), diff --git a/sqlglot/dialects/tableau.py b/sqlglot/dialects/tableau.py index d5fba17..67ef76b 100644 --- a/sqlglot/dialects/tableau.py +++ b/sqlglot/dialects/tableau.py @@ -1,7 +1,7 @@ from __future__ import annotations from sqlglot import exp, generator, parser, transforms -from sqlglot.dialects.dialect import Dialect +from sqlglot.dialects.dialect import Dialect, rename_func class Tableau(Dialect): @@ -11,6 +11,7 @@ class Tableau(Dialect): TRANSFORMS = { **generator.Generator.TRANSFORMS, + exp.Coalesce: rename_func("IFNULL"), exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), } @@ -25,9 +26,6 @@ class Tableau(Dialect): false = self.sql(expression, "false") return f"IF {this} THEN {true} ELSE {false} END" - def coalesce_sql(self, expression: exp.Coalesce) -> str: - return f"IFNULL({self.sql(expression, 'this')}, {self.expressions(expression)})" - def count_sql(self, expression: exp.Count) -> str: this = expression.this if isinstance(this, exp.Distinct): diff --git a/sqlglot/dialects/teradata.py b/sqlglot/dialects/teradata.py index 514aecb..d5e5dd8 100644 --- a/sqlglot/dialects/teradata.py +++ b/sqlglot/dialects/teradata.py @@ -1,18 +1,32 @@ from __future__ import annotations -import typing as t - from sqlglot import exp, generator, parser, tokens, transforms -from sqlglot.dialects.dialect import ( - Dialect, - format_time_lambda, - max_or_greatest, - min_or_least, -) +from sqlglot.dialects.dialect import Dialect, max_or_greatest, min_or_least from sqlglot.tokens import TokenType class Teradata(Dialect): + TIME_MAPPING = { + "Y": "%Y", + "YYYY": "%Y", + "YY": "%y", + "MMMM": "%B", + "MMM": "%b", + "DD": "%d", + "D": "%-d", + "HH": "%H", + "H": "%-H", + "MM": "%M", + "M": "%-M", + "SS": "%S", + "S": "%-S", + "SSSSSS": "%f", + "E": "%a", + "EE": "%a", + "EEE": "%a", + "EEEE": "%A", + } + class Tokenizer(tokens.Tokenizer): # https://docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Comparison-Operators-and-Functions/Comparison-Operators/ANSI-Compliance KEYWORDS = { @@ -31,7 +45,7 @@ class Teradata(Dialect): "ST_GEOMETRY": TokenType.GEOMETRY, } - # teradata does not support % for modulus + # Teradata does not support % as a modulo operator SINGLE_TOKENS = {**tokens.Tokenizer.SINGLE_TOKENS} SINGLE_TOKENS.pop("%") @@ -101,7 +115,7 @@ class Teradata(Dialect): # FROM before SET in Teradata UPDATE syntax # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause - def _parse_update(self) -> exp.Expression: + def _parse_update(self) -> exp.Update: return self.expression( exp.Update, **{ # type: ignore @@ -122,14 +136,6 @@ class Teradata(Dialect): return self.expression(exp.RangeN, this=this, expressions=expressions, each=each) - def _parse_cast(self, strict: bool) -> exp.Expression: - cast = t.cast(exp.Cast, super()._parse_cast(strict)) - if cast.to.this == exp.DataType.Type.DATE and self._match(TokenType.FORMAT): - return format_time_lambda(exp.TimeToStr, "teradata")( - [cast.this, self._parse_string()] - ) - return cast - class Generator(generator.Generator): JOIN_HINTS = False TABLE_HINTS = False @@ -151,7 +157,7 @@ class Teradata(Dialect): exp.Max: max_or_greatest, exp.Min: min_or_least, exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), - exp.TimeToStr: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})", + exp.StrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})", exp.ToChar: lambda self, e: self.function_fallback_sql(e), } diff --git a/sqlglot/dialects/tsql.py b/sqlglot/dialects/tsql.py index f6ad888..6d674f5 100644 --- a/sqlglot/dialects/tsql.py +++ b/sqlglot/dialects/tsql.py @@ -64,9 +64,9 @@ def _format_time_lambda( format=exp.Literal.string( format_time( args[0].name, - {**TSQL.time_mapping, **FULL_FORMAT_TIME_MAPPING} + {**TSQL.TIME_MAPPING, **FULL_FORMAT_TIME_MAPPING} if full_format_mapping - else TSQL.time_mapping, + else TSQL.TIME_MAPPING, ) ), ) @@ -86,9 +86,9 @@ def _parse_format(args: t.List) -> exp.Expression: return exp.TimeToStr( this=args[0], format=exp.Literal.string( - format_time(fmt.name, TSQL.format_time_mapping) + format_time(fmt.name, TSQL.FORMAT_TIME_MAPPING) if len(fmt.name) == 1 - else format_time(fmt.name, TSQL.time_mapping) + else format_time(fmt.name, TSQL.TIME_MAPPING) ), ) @@ -138,7 +138,7 @@ def _format_sql(self: generator.Generator, expression: exp.NumberToStr | exp.Tim if isinstance(expression, exp.NumberToStr) else exp.Literal.string( format_time( - expression.text("format"), t.cast(t.Dict[str, str], TSQL.inverse_time_mapping) + expression.text("format"), t.cast(t.Dict[str, str], TSQL.INVERSE_TIME_MAPPING) ) ) ) @@ -166,10 +166,10 @@ def _string_agg_sql(self: generator.Generator, expression: exp.GroupConcat) -> s class TSQL(Dialect): - null_ordering = "nulls_are_small" - time_format = "'yyyy-mm-dd hh:mm:ss'" + NULL_ORDERING = "nulls_are_small" + TIME_FORMAT = "'yyyy-mm-dd hh:mm:ss'" - time_mapping = { + TIME_MAPPING = { "year": "%Y", "qq": "%q", "q": "%q", @@ -213,7 +213,7 @@ class TSQL(Dialect): "yy": "%y", } - convert_format_mapping = { + CONVERT_FORMAT_MAPPING = { "0": "%b %d %Y %-I:%M%p", "1": "%m/%d/%y", "2": "%y.%m.%d", @@ -253,8 +253,8 @@ class TSQL(Dialect): "120": "%Y-%m-%d %H:%M:%S", "121": "%Y-%m-%d %H:%M:%S.%f", } - # not sure if complete - format_time_mapping = { + + FORMAT_TIME_MAPPING = { "y": "%B %Y", "d": "%m/%d/%Y", "H": "%-H", @@ -312,9 +312,7 @@ class TSQL(Dialect): FUNCTIONS = { **parser.Parser.FUNCTIONS, "CHARINDEX": lambda args: exp.StrPosition( - this=seq_get(args, 1), - substr=seq_get(args, 0), - position=seq_get(args, 2), + this=seq_get(args, 1), substr=seq_get(args, 0), position=seq_get(args, 2) ), "DATEADD": parse_date_delta(exp.DateAdd, unit_mapping=DATE_DELTA_INTERVAL), "DATEDIFF": parse_date_delta(exp.DateDiff, unit_mapping=DATE_DELTA_INTERVAL), @@ -363,6 +361,8 @@ class TSQL(Dialect): LOG_BASE_FIRST = False LOG_DEFAULTS_TO_LN = True + CONCAT_NULL_OUTPUTS_STRING = True + def _parse_system_time(self) -> t.Optional[exp.Expression]: if not self._match_text_seq("FOR", "SYSTEM_TIME"): return None @@ -400,7 +400,7 @@ class TSQL(Dialect): table.set("system_time", self._parse_system_time()) return table - def _parse_returns(self) -> exp.Expression: + def _parse_returns(self) -> exp.ReturnsProperty: table = self._parse_id_var(any_token=False, tokens=self.RETURNS_TABLE_TOKENS) returns = super()._parse_returns() returns.set("table", table) @@ -423,12 +423,12 @@ class TSQL(Dialect): format_val = self._parse_number() format_val_name = format_val.name if format_val else "" - if format_val_name not in TSQL.convert_format_mapping: + if format_val_name not in TSQL.CONVERT_FORMAT_MAPPING: raise ValueError( f"CONVERT function at T-SQL does not support format style {format_val_name}" ) - format_norm = exp.Literal.string(TSQL.convert_format_mapping[format_val_name]) + format_norm = exp.Literal.string(TSQL.CONVERT_FORMAT_MAPPING[format_val_name]) # Check whether the convert entails a string to date format if to.this == DataType.Type.DATE: diff --git a/sqlglot/executor/env.py b/sqlglot/executor/env.py index 51cffbd..d2c4e72 100644 --- a/sqlglot/executor/env.py +++ b/sqlglot/executor/env.py @@ -151,6 +151,7 @@ ENV = { "CAST": cast, "COALESCE": lambda *args: next((a for a in args if a is not None), None), "CONCAT": null_if_any(lambda *args: "".join(args)), + "SAFECONCAT": null_if_any(lambda *args: "".join(str(arg) for arg in args)), "CONCATWS": null_if_any(lambda this, *args: this.join(args)), "DATESTRTODATE": null_if_any(lambda arg: datetime.date.fromisoformat(arg)), "DIV": null_if_any(lambda e, this: e / this), @@ -159,7 +160,6 @@ ENV = { "EXTRACT": null_if_any(lambda this, e: getattr(e, this)), "GT": null_if_any(lambda this, e: this > e), "GTE": null_if_any(lambda this, e: this >= e), - "IFNULL": lambda e, alt: alt if e is None else e, "IF": lambda predicate, true, false: true if predicate else false, "INTDIV": null_if_any(lambda e, this: e // this), "INTERVAL": interval, diff --git a/sqlglot/executor/python.py b/sqlglot/executor/python.py index f114e5c..3f96f90 100644 --- a/sqlglot/executor/python.py +++ b/sqlglot/executor/python.py @@ -394,7 +394,7 @@ def _lambda_sql(self, e: exp.Lambda) -> str: names = {e.name.lower() for e in e.expressions} e = e.transform( - lambda n: exp.Var(this=n.name) + lambda n: exp.var(n.name) if isinstance(n, exp.Identifier) and n.name.lower() in names else n ) diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py index da4a4ed..c7d4664 100644 --- a/sqlglot/expressions.py +++ b/sqlglot/expressions.py @@ -1500,6 +1500,7 @@ class Index(Expression): arg_types = { "this": False, "table": False, + "using": False, "where": False, "columns": False, "unique": False, @@ -1623,7 +1624,7 @@ class Lambda(Expression): class Limit(Expression): - arg_types = {"this": False, "expression": True} + arg_types = {"this": False, "expression": True, "offset": False} class Literal(Condition): @@ -1869,6 +1870,10 @@ class EngineProperty(Property): arg_types = {"this": True} +class ToTableProperty(Property): + arg_types = {"this": True} + + class ExecuteAsProperty(Property): arg_types = {"this": True} @@ -3072,12 +3077,35 @@ class Select(Subqueryable): Returns: The modified expression. """ - inst = _maybe_copy(self, copy) inst.set("locks", [Lock(update=update)]) return inst + def hint(self, *hints: ExpOrStr, dialect: DialectType = None, copy: bool = True) -> Select: + """ + Set hints for this expression. + + Examples: + >>> Select().select("x").from_("tbl").hint("BROADCAST(y)").sql(dialect="spark") + 'SELECT /*+ BROADCAST(y) */ x FROM tbl' + + Args: + hints: The SQL code strings to parse as the hints. + If an `Expression` instance is passed, it will be used as-is. + dialect: The dialect used to parse the hints. + copy: If `False`, modify this expression instance in-place. + + Returns: + The modified expression. + """ + inst = _maybe_copy(self, copy) + inst.set( + "hint", Hint(expressions=[maybe_parse(h, copy=copy, dialect=dialect) for h in hints]) + ) + + return inst + @property def named_selects(self) -> t.List[str]: return [e.output_name for e in self.expressions if e.alias_or_name] @@ -3244,6 +3272,7 @@ class DataType(Expression): DATE = auto() DATETIME = auto() DATETIME64 = auto() + ENUM = auto() INT4RANGE = auto() INT4MULTIRANGE = auto() INT8RANGE = auto() @@ -3284,6 +3313,7 @@ class DataType(Expression): OBJECT = auto() ROWVERSION = auto() SERIAL = auto() + SET = auto() SMALLINT = auto() SMALLMONEY = auto() SMALLSERIAL = auto() @@ -3334,6 +3364,7 @@ class DataType(Expression): NUMERIC_TYPES = {*INTEGER_TYPES, *FLOAT_TYPES} TEMPORAL_TYPES = { + Type.TIME, Type.TIMESTAMP, Type.TIMESTAMPTZ, Type.TIMESTAMPLTZ, @@ -3342,6 +3373,8 @@ class DataType(Expression): Type.DATETIME64, } + META_TYPES = {"UNKNOWN", "NULL"} + @classmethod def build( cls, dtype: str | DataType | DataType.Type, dialect: DialectType = None, **kwargs @@ -3349,8 +3382,9 @@ class DataType(Expression): from sqlglot import parse_one if isinstance(dtype, str): - if dtype.upper() in cls.Type.__members__: - data_type_exp: t.Optional[Expression] = DataType(this=DataType.Type[dtype.upper()]) + upper = dtype.upper() + if upper in DataType.META_TYPES: + data_type_exp: t.Optional[Expression] = DataType(this=DataType.Type[upper]) else: data_type_exp = parse_one(dtype, read=dialect, into=DataType) @@ -3483,6 +3517,10 @@ class Dot(Binary): def name(self) -> str: return self.expression.name + @property + def output_name(self) -> str: + return self.name + @classmethod def build(self, expressions: t.Sequence[Expression]) -> Dot: """Build a Dot object with a sequence of expressions.""" @@ -3502,6 +3540,10 @@ class DPipe(Binary): pass +class SafeDPipe(DPipe): + pass + + class EQ(Binary, Predicate): pass @@ -3615,6 +3657,10 @@ class Not(Unary): class Paren(Unary): arg_types = {"this": True, "with": False} + @property + def output_name(self) -> str: + return self.this.name + class Neg(Unary): pass @@ -3904,6 +3950,7 @@ class Ceil(Func): class Coalesce(Func): arg_types = {"this": True, "expressions": False} is_var_len_args = True + _sql_names = ["COALESCE", "IFNULL", "NVL"] class Concat(Func): @@ -3911,12 +3958,17 @@ class Concat(Func): is_var_len_args = True +class SafeConcat(Concat): + pass + + class ConcatWs(Concat): _sql_names = ["CONCAT_WS"] class Count(AggFunc): - arg_types = {"this": False} + arg_types = {"this": False, "expressions": False} + is_var_len_args = True class CountIf(AggFunc): @@ -4049,6 +4101,11 @@ class DateToDi(Func): pass +class Date(Func): + arg_types = {"expressions": True} + is_var_len_args = True + + class Day(Func): pass @@ -4102,11 +4159,6 @@ class If(Func): arg_types = {"this": True, "true": True, "false": False} -class IfNull(Func): - arg_types = {"this": True, "expression": False} - _sql_names = ["IFNULL", "NVL"] - - class Initcap(Func): arg_types = {"this": True, "expression": False} @@ -5608,22 +5660,27 @@ def replace_children(expression: Expression, fun: t.Callable, *args, **kwargs) - expression.args[k] = new_child_nodes if is_list_arg else seq_get(new_child_nodes, 0) -def column_table_names(expression: Expression) -> t.List[str]: +def column_table_names(expression: Expression, exclude: str = "") -> t.Set[str]: """ Return all table names referenced through columns in an expression. Example: >>> import sqlglot - >>> column_table_names(sqlglot.parse_one("a.b AND c.d AND c.e")) - ['c', 'a'] + >>> sorted(column_table_names(sqlglot.parse_one("a.b AND c.d AND c.e"))) + ['a', 'c'] Args: expression: expression to find table names. + exclude: a table name to exclude Returns: A list of unique names. """ - return list(dict.fromkeys(column.table for column in expression.find_all(Column))) + return { + table + for table in (column.table for column in expression.find_all(Column)) + if table and table != exclude + } def table_name(table: Table | str) -> str: @@ -5649,12 +5706,13 @@ def table_name(table: Table | str) -> str: return ".".join(part for part in (table.text("catalog"), table.text("db"), table.name) if part) -def replace_tables(expression: E, mapping: t.Dict[str, str]) -> E: +def replace_tables(expression: E, mapping: t.Dict[str, str], copy: bool = True) -> E: """Replace all tables in expression according to the mapping. Args: expression: expression node to be transformed and replaced. mapping: mapping of table names. + copy: whether or not to copy the expression. Examples: >>> from sqlglot import exp, parse_one @@ -5675,7 +5733,7 @@ def replace_tables(expression: E, mapping: t.Dict[str, str]) -> E: ) return node - return expression.transform(_replace_tables) + return expression.transform(_replace_tables, copy=copy) def replace_placeholders(expression: Expression, *args, **kwargs) -> Expression: diff --git a/sqlglot/generator.py b/sqlglot/generator.py index 97cbe15..d3cf9f0 100644 --- a/sqlglot/generator.py +++ b/sqlglot/generator.py @@ -14,47 +14,32 @@ logger = logging.getLogger("sqlglot") class Generator: """ - Generator interprets the given syntax tree and produces a SQL string as an output. + Generator converts a given syntax tree to the corresponding SQL string. Args: - time_mapping (dict): the dictionary of custom time mappings in which the key - represents a python time format and the output the target time format - time_trie (trie): a trie of the time_mapping keys - pretty (bool): if set to True the returned string will be formatted. Default: False. - quote_start (str): specifies which starting character to use to delimit quotes. Default: '. - quote_end (str): specifies which ending character to use to delimit quotes. Default: '. - identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ". - identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ". - bit_start (str): specifies which starting character to use to delimit bit literals. Default: None. - bit_end (str): specifies which ending character to use to delimit bit literals. Default: None. - hex_start (str): specifies which starting character to use to delimit hex literals. Default: None. - hex_end (str): specifies which ending character to use to delimit hex literals. Default: None. - byte_start (str): specifies which starting character to use to delimit byte literals. Default: None. - byte_end (str): specifies which ending character to use to delimit byte literals. Default: None. - raw_start (str): specifies which starting character to use to delimit raw literals. Default: None. - raw_end (str): specifies which ending character to use to delimit raw literals. Default: None. - identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always. - normalize (bool): if set to True all identifiers will lower cased - string_escape (str): specifies a string escape character. Default: '. - identifier_escape (str): specifies an identifier escape character. Default: ". - pad (int): determines padding in a formatted string. Default: 2. - indent (int): determines the size of indentation in a formatted string. Default: 4. - unnest_column_only (bool): if true unnest table aliases are considered only as column aliases - normalize_functions (str): normalize function names, "upper", "lower", or None - Default: "upper" - alias_post_tablesample (bool): if the table alias comes after tablesample - Default: False - identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit - Default: False - unsupported_level (ErrorLevel): determines the generator's behavior when it encounters - unsupported expressions. Default ErrorLevel.WARN. - null_ordering (str): Indicates the default null ordering method to use if not explicitly set. - Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". - Default: "nulls_are_small" - max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. + pretty: Whether or not to format the produced SQL string. + Default: False. + identify: Determines when an identifier should be quoted. Possible values are: + False (default): Never quote, except in cases where it's mandatory by the dialect. + True or 'always': Always quote. + 'safe': Only quote identifiers that are case insensitive. + normalize: Whether or not to normalize identifiers to lowercase. + Default: False. + pad: Determines the pad size in a formatted string. + Default: 2. + indent: Determines the indentation size in a formatted string. + Default: 2. + normalize_functions: Whether or not to normalize all function names. Possible values are: + "upper" or True (default): Convert names to uppercase. + "lower": Convert names to lowercase. + False: Disables function name normalization. + unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. + Default ErrorLevel.WARN. + max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3 - leading_comma (bool): if the the comma is leading or trailing in select statements + leading_comma: Determines whether or not the comma is leading or trailing in select expressions. + This is only relevant when generating in pretty mode. Default: False max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true @@ -86,6 +71,7 @@ class Generator: exp.SettingsProperty: lambda self, e: f"SETTINGS{self.seg('')}{(self.expressions(e))}", exp.SqlSecurityProperty: lambda self, e: f"SQL SECURITY {'DEFINER' if e.args.get('definer') else 'INVOKER'}", exp.TemporaryProperty: lambda self, e: f"TEMPORARY", + exp.ToTableProperty: lambda self, e: f"TO {self.sql(e.this)}", exp.TransientProperty: lambda self, e: "TRANSIENT", exp.StabilityProperty: lambda self, e: e.name, exp.VolatileProperty: lambda self, e: "VOLATILE", @@ -138,15 +124,24 @@ class Generator: # Whether or not limit and fetch are supported (possible values: "ALL", "LIMIT", "FETCH") LIMIT_FETCH = "ALL" - # Whether a table is allowed to be renamed with a db + # Whether or not a table is allowed to be renamed with a db RENAME_TABLE_WITH_DB = True # The separator for grouping sets and rollups GROUPINGS_SEP = "," - # The string used for creating index on a table + # The string used for creating an index on a table INDEX_ON = "ON" + # Whether or not join hints should be generated + JOIN_HINTS = True + + # Whether or not table hints should be generated + TABLE_HINTS = True + + # Whether or not comparing against booleans (e.g. x IS TRUE) is supported + IS_BOOL_ALLOWED = True + TYPE_MAPPING = { exp.DataType.Type.NCHAR: "CHAR", exp.DataType.Type.NVARCHAR: "VARCHAR", @@ -228,6 +223,7 @@ class Generator: exp.SqlSecurityProperty: exp.Properties.Location.POST_CREATE, exp.StabilityProperty: exp.Properties.Location.POST_SCHEMA, exp.TemporaryProperty: exp.Properties.Location.POST_CREATE, + exp.ToTableProperty: exp.Properties.Location.POST_SCHEMA, exp.TransientProperty: exp.Properties.Location.POST_CREATE, exp.MergeTreeTTL: exp.Properties.Location.POST_SCHEMA, exp.VolatileProperty: exp.Properties.Location.POST_CREATE, @@ -235,128 +231,110 @@ class Generator: exp.WithJournalTableProperty: exp.Properties.Location.POST_NAME, } - JOIN_HINTS = True - TABLE_HINTS = True - IS_BOOL = True - + # Keywords that can't be used as unquoted identifier names RESERVED_KEYWORDS: t.Set[str] = set() - WITH_SEPARATED_COMMENTS = (exp.Select, exp.From, exp.Where, exp.With) - UNWRAPPED_INTERVAL_VALUES = (exp.Column, exp.Literal, exp.Neg, exp.Paren) + + # Expressions whose comments are separated from them for better formatting + WITH_SEPARATED_COMMENTS: t.Tuple[t.Type[exp.Expression], ...] = ( + exp.Select, + exp.From, + exp.Where, + exp.With, + ) + + # Expressions that can remain unwrapped when appearing in the context of an INTERVAL + UNWRAPPED_INTERVAL_VALUES: t.Tuple[t.Type[exp.Expression], ...] = ( + exp.Column, + exp.Literal, + exp.Neg, + exp.Paren, + ) SENTINEL_LINE_BREAK = "__SQLGLOT__LB__" + # Autofilled + INVERSE_TIME_MAPPING: t.Dict[str, str] = {} + INVERSE_TIME_TRIE: t.Dict = {} + INDEX_OFFSET = 0 + UNNEST_COLUMN_ONLY = False + ALIAS_POST_TABLESAMPLE = False + IDENTIFIERS_CAN_START_WITH_DIGIT = False + STRICT_STRING_CONCAT = False + NORMALIZE_FUNCTIONS: bool | str = "upper" + NULL_ORDERING = "nulls_are_small" + + # Delimiters for quotes, identifiers and the corresponding escape characters + QUOTE_START = "'" + QUOTE_END = "'" + IDENTIFIER_START = '"' + IDENTIFIER_END = '"' + STRING_ESCAPE = "'" + IDENTIFIER_ESCAPE = '"' + + # Delimiters for bit, hex, byte and raw literals + BIT_START: t.Optional[str] = None + BIT_END: t.Optional[str] = None + HEX_START: t.Optional[str] = None + HEX_END: t.Optional[str] = None + BYTE_START: t.Optional[str] = None + BYTE_END: t.Optional[str] = None + RAW_START: t.Optional[str] = None + RAW_END: t.Optional[str] = None + __slots__ = ( - "time_mapping", - "time_trie", "pretty", - "quote_start", - "quote_end", - "identifier_start", - "identifier_end", - "bit_start", - "bit_end", - "hex_start", - "hex_end", - "byte_start", - "byte_end", - "raw_start", - "raw_end", "identify", "normalize", - "string_escape", - "identifier_escape", "pad", - "index_offset", - "unnest_column_only", - "alias_post_tablesample", - "identifiers_can_start_with_digit", + "_indent", "normalize_functions", "unsupported_level", - "unsupported_messages", - "null_ordering", "max_unsupported", - "_indent", + "leading_comma", + "max_text_width", + "comments", + "unsupported_messages", "_escaped_quote_end", "_escaped_identifier_end", - "_leading_comma", - "_max_text_width", - "_comments", "_cache", ) def __init__( self, - time_mapping=None, - time_trie=None, - pretty=None, - quote_start=None, - quote_end=None, - identifier_start=None, - identifier_end=None, - bit_start=None, - bit_end=None, - hex_start=None, - hex_end=None, - byte_start=None, - byte_end=None, - raw_start=None, - raw_end=None, - identify=False, - normalize=False, - string_escape=None, - identifier_escape=None, - pad=2, - indent=2, - index_offset=0, - unnest_column_only=False, - alias_post_tablesample=False, - identifiers_can_start_with_digit=False, - normalize_functions="upper", - unsupported_level=ErrorLevel.WARN, - null_ordering=None, - max_unsupported=3, - leading_comma=False, - max_text_width=80, - comments=True, + pretty: t.Optional[bool] = None, + identify: str | bool = False, + normalize: bool = False, + pad: int = 2, + indent: int = 2, + normalize_functions: t.Optional[str | bool] = None, + unsupported_level: ErrorLevel = ErrorLevel.WARN, + max_unsupported: int = 3, + leading_comma: bool = False, + max_text_width: int = 80, + comments: bool = True, ): import sqlglot - self.time_mapping = time_mapping or {} - self.time_trie = time_trie self.pretty = pretty if pretty is not None else sqlglot.pretty - self.quote_start = quote_start or "'" - self.quote_end = quote_end or "'" - self.identifier_start = identifier_start or '"' - self.identifier_end = identifier_end or '"' - self.bit_start = bit_start - self.bit_end = bit_end - self.hex_start = hex_start - self.hex_end = hex_end - self.byte_start = byte_start - self.byte_end = byte_end - self.raw_start = raw_start - self.raw_end = raw_end self.identify = identify self.normalize = normalize - self.string_escape = string_escape or "'" - self.identifier_escape = identifier_escape or '"' self.pad = pad - self.index_offset = index_offset - self.unnest_column_only = unnest_column_only - self.alias_post_tablesample = alias_post_tablesample - self.identifiers_can_start_with_digit = identifiers_can_start_with_digit - self.normalize_functions = normalize_functions + self._indent = indent self.unsupported_level = unsupported_level - self.unsupported_messages = [] self.max_unsupported = max_unsupported - self.null_ordering = null_ordering - self._indent = indent - self._escaped_quote_end = self.string_escape + self.quote_end - self._escaped_identifier_end = self.identifier_escape + self.identifier_end - self._leading_comma = leading_comma - self._max_text_width = max_text_width - self._comments = comments - self._cache = None + self.leading_comma = leading_comma + self.max_text_width = max_text_width + self.comments = comments + + # This is both a Dialect property and a Generator argument, so we prioritize the latter + self.normalize_functions = ( + self.NORMALIZE_FUNCTIONS if normalize_functions is None else normalize_functions + ) + + self.unsupported_messages: t.List[str] = [] + self._escaped_quote_end: str = self.STRING_ESCAPE + self.QUOTE_END + self._escaped_identifier_end: str = self.IDENTIFIER_ESCAPE + self.IDENTIFIER_END + self._cache: t.Optional[t.Dict[int, str]] = None def generate( self, @@ -364,17 +342,19 @@ class Generator: cache: t.Optional[t.Dict[int, str]] = None, ) -> str: """ - Generates a SQL string by interpreting the given syntax tree. + Generates the SQL string corresponding to the given syntax tree. - Args - expression: the syntax tree. - cache: an optional sql string cache. this leverages the hash of an expression which is slow, so only use this if you set _hash on each node. + Args: + expression: The syntax tree. + cache: An optional sql string cache. This leverages the hash of an Expression + which can be slow to compute, so only use it if you set _hash on each node. - Returns - the SQL string. + Returns: + The SQL string corresponding to `expression`. """ if cache is not None: self._cache = cache + self.unsupported_messages = [] sql = self.sql(expression).strip() self._cache = None @@ -414,7 +394,11 @@ class Generator: expression: t.Optional[exp.Expression] = None, comments: t.Optional[t.List[str]] = None, ) -> str: - comments = ((expression and expression.comments) if comments is None else comments) if self._comments else None # type: ignore + comments = ( + ((expression and expression.comments) if comments is None else comments) # type: ignore + if self.comments + else None + ) if not comments or isinstance(expression, exp.Binary): return sql @@ -454,7 +438,7 @@ class Generator: return result def normalize_func(self, name: str) -> str: - if self.normalize_functions == "upper": + if self.normalize_functions == "upper" or self.normalize_functions is True: return name.upper() if self.normalize_functions == "lower": return name.lower() @@ -522,7 +506,7 @@ class Generator: else: raise ValueError(f"Expected an Expression. Received {type(expression)}: {expression}") - sql = self.maybe_comment(sql, expression) if self._comments and comment else sql + sql = self.maybe_comment(sql, expression) if self.comments and comment else sql if self._cache is not None: self._cache[expression_id] = sql @@ -770,25 +754,25 @@ class Generator: def bitstring_sql(self, expression: exp.BitString) -> str: this = self.sql(expression, "this") - if self.bit_start: - return f"{self.bit_start}{this}{self.bit_end}" + if self.BIT_START: + return f"{self.BIT_START}{this}{self.BIT_END}" return f"{int(this, 2)}" def hexstring_sql(self, expression: exp.HexString) -> str: this = self.sql(expression, "this") - if self.hex_start: - return f"{self.hex_start}{this}{self.hex_end}" + if self.HEX_START: + return f"{self.HEX_START}{this}{self.HEX_END}" return f"{int(this, 16)}" def bytestring_sql(self, expression: exp.ByteString) -> str: this = self.sql(expression, "this") - if self.byte_start: - return f"{self.byte_start}{this}{self.byte_end}" + if self.BYTE_START: + return f"{self.BYTE_START}{this}{self.BYTE_END}" return this def rawstring_sql(self, expression: exp.RawString) -> str: - if self.raw_start: - return f"{self.raw_start}{expression.name}{self.raw_end}" + if self.RAW_START: + return f"{self.RAW_START}{expression.name}{self.RAW_END}" return self.sql(exp.Literal.string(expression.name.replace("\\", "\\\\"))) def datatypesize_sql(self, expression: exp.DataTypeSize) -> str: @@ -883,24 +867,27 @@ class Generator: name = f"{expression.name} " if expression.name else "" table = self.sql(expression, "table") table = f"{self.INDEX_ON} {table} " if table else "" + using = self.sql(expression, "using") + using = f"USING {using} " if using else "" index = "INDEX " if not table else "" columns = self.expressions(expression, key="columns", flat=True) + columns = f"({columns})" if columns else "" partition_by = self.expressions(expression, key="partition_by", flat=True) partition_by = f" PARTITION BY {partition_by}" if partition_by else "" - return f"{unique}{primary}{amp}{index}{name}{table}({columns}){partition_by}" + return f"{unique}{primary}{amp}{index}{name}{table}{using}{columns}{partition_by}" def identifier_sql(self, expression: exp.Identifier) -> str: text = expression.name lower = text.lower() text = lower if self.normalize and not expression.quoted else text - text = text.replace(self.identifier_end, self._escaped_identifier_end) + text = text.replace(self.IDENTIFIER_END, self._escaped_identifier_end) if ( expression.quoted or should_identify(text, self.identify) or lower in self.RESERVED_KEYWORDS - or (not self.identifiers_can_start_with_digit and text[:1].isdigit()) + or (not self.IDENTIFIERS_CAN_START_WITH_DIGIT and text[:1].isdigit()) ): - text = f"{self.identifier_start}{text}{self.identifier_end}" + text = f"{self.IDENTIFIER_START}{text}{self.IDENTIFIER_END}" return text def inputoutputformat_sql(self, expression: exp.InputOutputFormat) -> str: @@ -1197,7 +1184,7 @@ class Generator: def tablesample_sql( self, expression: exp.TableSample, seed_prefix: str = "SEED", sep=" AS " ) -> str: - if self.alias_post_tablesample and expression.this.alias: + if self.ALIAS_POST_TABLESAMPLE and expression.this.alias: table = expression.this.copy() table.set("alias", None) this = self.sql(table) @@ -1372,7 +1359,15 @@ class Generator: def limit_sql(self, expression: exp.Limit) -> str: this = self.sql(expression, "this") - return f"{this}{self.seg('LIMIT')} {self.sql(expression, 'expression')}" + args = ", ".join( + sql + for sql in ( + self.sql(expression, "offset"), + self.sql(expression, "expression"), + ) + if sql + ) + return f"{this}{self.seg('LIMIT')} {args}" def offset_sql(self, expression: exp.Offset) -> str: this = self.sql(expression, "this") @@ -1418,10 +1413,10 @@ class Generator: def literal_sql(self, expression: exp.Literal) -> str: text = expression.this or "" if expression.is_string: - text = text.replace(self.quote_end, self._escaped_quote_end) + text = text.replace(self.QUOTE_END, self._escaped_quote_end) if self.pretty: text = text.replace("\n", self.SENTINEL_LINE_BREAK) - text = f"{self.quote_start}{text}{self.quote_end}" + text = f"{self.QUOTE_START}{text}{self.QUOTE_END}" return text def loaddata_sql(self, expression: exp.LoadData) -> str: @@ -1463,9 +1458,9 @@ class Generator: nulls_first = expression.args.get("nulls_first") nulls_last = not nulls_first - nulls_are_large = self.null_ordering == "nulls_are_large" - nulls_are_small = self.null_ordering == "nulls_are_small" - nulls_are_last = self.null_ordering == "nulls_are_last" + nulls_are_large = self.NULL_ORDERING == "nulls_are_large" + nulls_are_small = self.NULL_ORDERING == "nulls_are_small" + nulls_are_last = self.NULL_ORDERING == "nulls_are_last" sort_order = " DESC" if desc else "" nulls_sort_change = "" @@ -1521,7 +1516,7 @@ class Generator: return f"{self.seg('MATCH_RECOGNIZE')} {self.wrap(body)}{alias}" def query_modifiers(self, expression: exp.Expression, *sqls: str) -> str: - limit = expression.args.get("limit") + limit: t.Optional[exp.Fetch | exp.Limit] = expression.args.get("limit") if self.LIMIT_FETCH == "LIMIT" and isinstance(limit, exp.Fetch): limit = exp.Limit(expression=limit.args.get("count")) @@ -1540,12 +1535,19 @@ class Generator: self.sql(expression, "having"), *self.after_having_modifiers(expression), self.sql(expression, "order"), - self.sql(expression, "offset") if fetch else self.sql(limit), - self.sql(limit) if fetch else self.sql(expression, "offset"), + *self.offset_limit_modifiers(expression, fetch, limit), *self.after_limit_modifiers(expression), sep="", ) + def offset_limit_modifiers( + self, expression: exp.Expression, fetch: bool, limit: t.Optional[exp.Fetch | exp.Limit] + ) -> t.List[str]: + return [ + self.sql(expression, "offset") if fetch else self.sql(limit), + self.sql(limit) if fetch else self.sql(expression, "offset"), + ] + def after_having_modifiers(self, expression: exp.Expression) -> t.List[str]: return [ self.sql(expression, "qualify"), @@ -1634,7 +1636,7 @@ class Generator: def unnest_sql(self, expression: exp.Unnest) -> str: args = self.expressions(expression, flat=True) alias = expression.args.get("alias") - if alias and self.unnest_column_only: + if alias and self.UNNEST_COLUMN_ONLY: columns = alias.columns alias = self.sql(columns[0]) if columns else "" else: @@ -1697,7 +1699,7 @@ class Generator: return f"{this} BETWEEN {low} AND {high}" def bracket_sql(self, expression: exp.Bracket) -> str: - expressions = apply_index_offset(expression.this, expression.expressions, self.index_offset) + expressions = apply_index_offset(expression.this, expression.expressions, self.INDEX_OFFSET) expressions_sql = ", ".join(self.sql(e) for e in expressions) return f"{self.sql(expression, 'this')}[{expressions_sql}]" @@ -1729,7 +1731,7 @@ class Generator: statements.append("END") - if self.pretty and self.text_width(statements) > self._max_text_width: + if self.pretty and self.text_width(statements) > self.max_text_width: return self.indent("\n".join(statements), skip_first=True, skip_last=True) return " ".join(statements) @@ -1759,10 +1761,11 @@ class Generator: else: return self.func("TRIM", expression.this, expression.expression) - def concat_sql(self, expression: exp.Concat) -> str: - if len(expression.expressions) == 1: - return self.sql(expression.expressions[0]) - return self.function_fallback_sql(expression) + def safeconcat_sql(self, expression: exp.SafeConcat) -> str: + expressions = expression.expressions + if self.STRICT_STRING_CONCAT: + expressions = (exp.cast(e, "text") for e in expressions) + return self.func("CONCAT", *expressions) def check_sql(self, expression: exp.Check) -> str: this = self.sql(expression, key="this") @@ -1785,9 +1788,7 @@ class Generator: return f"PRIMARY KEY ({expressions}){options}" def if_sql(self, expression: exp.If) -> str: - return self.case_sql( - exp.Case(ifs=[expression.copy()], default=expression.args.get("false")) - ) + return self.case_sql(exp.Case(ifs=[expression], default=expression.args.get("false"))) def matchagainst_sql(self, expression: exp.MatchAgainst) -> str: modifier = expression.args.get("modifier") @@ -1798,7 +1799,6 @@ class Generator: return f"{self.sql(expression, 'this')}: {self.sql(expression, 'expression')}" def jsonobject_sql(self, expression: exp.JSONObject) -> str: - expressions = self.expressions(expression) null_handling = expression.args.get("null_handling") null_handling = f" {null_handling}" if null_handling else "" unique_keys = expression.args.get("unique_keys") @@ -1811,7 +1811,11 @@ class Generator: format_json = " FORMAT JSON" if expression.args.get("format_json") else "" encoding = self.sql(expression, "encoding") encoding = f" ENCODING {encoding}" if encoding else "" - return f"JSON_OBJECT({expressions}{null_handling}{unique_keys}{return_type}{format_json}{encoding})" + return self.func( + "JSON_OBJECT", + *expression.expressions, + suffix=f"{null_handling}{unique_keys}{return_type}{format_json}{encoding})", + ) def openjsoncolumndef_sql(self, expression: exp.OpenJSONColumnDef) -> str: this = self.sql(expression, "this") @@ -1930,7 +1934,7 @@ class Generator: for i, e in enumerate(expression.flatten(unnest=False)) ) - sep = "\n" if self.text_width(sqls) > self._max_text_width else " " + sep = "\n" if self.text_width(sqls) > self.max_text_width else " " return f"{sep}{op} ".join(sqls) def bitwiseand_sql(self, expression: exp.BitwiseAnd) -> str: @@ -2093,6 +2097,11 @@ class Generator: def dpipe_sql(self, expression: exp.DPipe) -> str: return self.binary(expression, "||") + def safedpipe_sql(self, expression: exp.SafeDPipe) -> str: + if self.STRICT_STRING_CONCAT: + return self.func("CONCAT", *(exp.cast(e, "text") for e in expression.flatten())) + return self.dpipe_sql(expression) + def div_sql(self, expression: exp.Div) -> str: return self.binary(expression, "/") @@ -2127,7 +2136,7 @@ class Generator: return self.binary(expression, "ILIKE ANY") def is_sql(self, expression: exp.Is) -> str: - if not self.IS_BOOL and isinstance(expression.expression, exp.Boolean): + if not self.IS_BOOL_ALLOWED and isinstance(expression.expression, exp.Boolean): return self.sql( expression.this if expression.expression.this else exp.not_(expression.this) ) @@ -2197,12 +2206,18 @@ class Generator: return self.func(expression.sql_name(), *args) - def func(self, name: str, *args: t.Optional[exp.Expression | str]) -> str: - return f"{self.normalize_func(name)}({self.format_args(*args)})" + def func( + self, + name: str, + *args: t.Optional[exp.Expression | str], + prefix: str = "(", + suffix: str = ")", + ) -> str: + return f"{self.normalize_func(name)}{prefix}{self.format_args(*args)}{suffix}" def format_args(self, *args: t.Optional[str | exp.Expression]) -> str: arg_sqls = tuple(self.sql(arg) for arg in args if arg is not None) - if self.pretty and self.text_width(arg_sqls) > self._max_text_width: + if self.pretty and self.text_width(arg_sqls) > self.max_text_width: return self.indent("\n" + f",\n".join(arg_sqls) + "\n", skip_first=True, skip_last=True) return ", ".join(arg_sqls) @@ -2210,7 +2225,9 @@ class Generator: return sum(len(arg) for arg in args) def format_time(self, expression: exp.Expression) -> t.Optional[str]: - return format_time(self.sql(expression, "format"), self.time_mapping, self.time_trie) + return format_time( + self.sql(expression, "format"), self.INVERSE_TIME_MAPPING, self.INVERSE_TIME_TRIE + ) def expressions( self, @@ -2242,7 +2259,7 @@ class Generator: comments = self.maybe_comment("", e) if isinstance(e, exp.Expression) else "" if self.pretty: - if self._leading_comma: + if self.leading_comma: result_sqls.append(f"{sep if i > 0 else pad}{prefix}{sql}{comments}") else: result_sqls.append( diff --git a/sqlglot/helper.py b/sqlglot/helper.py index 4215fee..2f48ab5 100644 --- a/sqlglot/helper.py +++ b/sqlglot/helper.py @@ -208,7 +208,7 @@ def while_changing(expression: Expression, func: t.Callable[[Expression], E]) -> return expression -def tsort(dag: t.Dict[T, t.List[T]]) -> t.List[T]: +def tsort(dag: t.Dict[T, t.Set[T]]) -> t.List[T]: """ Sorts a given directed acyclic graph in topological order. @@ -220,22 +220,24 @@ def tsort(dag: t.Dict[T, t.List[T]]) -> t.List[T]: """ result = [] - def visit(node: T, visited: t.Set[T]) -> None: - if node in result: - return - if node in visited: - raise ValueError("Cycle error") + for node, deps in tuple(dag.items()): + for dep in deps: + if not dep in dag: + dag[dep] = set() + + while dag: + current = {node for node, deps in dag.items() if not deps} - visited.add(node) + if not current: + raise ValueError("Cycle error") - for dep in dag.get(node, []): - visit(dep, visited) + for node in current: + dag.pop(node) - visited.remove(node) - result.append(node) + for deps in dag.values(): + deps -= current - for node in dag: - visit(node, set()) + result.extend(sorted(current)) # type: ignore return result diff --git a/sqlglot/optimizer/annotate_types.py b/sqlglot/optimizer/annotate_types.py index 6238759..39e2c53 100644 --- a/sqlglot/optimizer/annotate_types.py +++ b/sqlglot/optimizer/annotate_types.py @@ -1,13 +1,25 @@ +from __future__ import annotations + +import typing as t + from sqlglot import exp +from sqlglot._typing import E from sqlglot.helper import ensure_list, subclasses from sqlglot.optimizer.scope import Scope, traverse_scope -from sqlglot.schema import ensure_schema +from sqlglot.schema import Schema, ensure_schema + +if t.TYPE_CHECKING: + B = t.TypeVar("B", bound=exp.Binary) -def annotate_types(expression, schema=None, annotators=None, coerces_to=None): +def annotate_types( + expression: E, + schema: t.Optional[t.Dict | Schema] = None, + annotators: t.Optional[t.Dict[t.Type[E], t.Callable[[TypeAnnotator, E], E]]] = None, + coerces_to: t.Optional[t.Dict[exp.DataType.Type, t.Set[exp.DataType.Type]]] = None, +) -> E: """ - Recursively infer & annotate types in an expression syntax tree against a schema. - Assumes that we've already executed the optimizer's qualify_columns step. + Infers the types of an expression, annotating its AST accordingly. Example: >>> import sqlglot @@ -18,12 +30,13 @@ def annotate_types(expression, schema=None, annotators=None, coerces_to=None): Args: - expression (sqlglot.Expression): Expression to annotate. - schema (dict|sqlglot.optimizer.Schema): Database schema. - annotators (dict): Maps expression type to corresponding annotation function. - coerces_to (dict): Maps expression type to set of types that it can be coerced into. + expression: Expression to annotate. + schema: Database schema. + annotators: Maps expression type to corresponding annotation function. + coerces_to: Maps expression type to set of types that it can be coerced into. + Returns: - sqlglot.Expression: expression annotated with types + The expression annotated with types. """ schema = ensure_schema(schema) @@ -31,276 +44,241 @@ def annotate_types(expression, schema=None, annotators=None, coerces_to=None): return TypeAnnotator(schema, annotators, coerces_to).annotate(expression) -class TypeAnnotator: - ANNOTATORS = { - **{ - expr_type: lambda self, expr: self._annotate_unary(expr) - for expr_type in subclasses(exp.__name__, exp.Unary) - }, - **{ - expr_type: lambda self, expr: self._annotate_binary(expr) - for expr_type in subclasses(exp.__name__, exp.Binary) - }, - exp.Cast: lambda self, expr: self._annotate_with_type(expr, expr.args["to"]), - exp.TryCast: lambda self, expr: self._annotate_with_type(expr, expr.args["to"]), - exp.DataType: lambda self, expr: self._annotate_with_type(expr, expr.copy()), - exp.Alias: lambda self, expr: self._annotate_unary(expr), - exp.Between: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BOOLEAN), - exp.In: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BOOLEAN), - exp.Literal: lambda self, expr: self._annotate_literal(expr), - exp.Boolean: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BOOLEAN), - exp.Null: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.NULL), - exp.Anonymous: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.UNKNOWN), - exp.ApproxDistinct: lambda self, expr: self._annotate_with_type( - expr, exp.DataType.Type.BIGINT - ), - exp.Avg: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE), - exp.Min: lambda self, expr: self._annotate_by_args(expr, "this", "expressions"), - exp.Max: lambda self, expr: self._annotate_by_args(expr, "this", "expressions"), - exp.Sum: lambda self, expr: self._annotate_by_args( - expr, "this", "expressions", promote=True - ), - exp.Ceil: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT), - exp.Count: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BIGINT), - exp.CurrentDate: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE), - exp.CurrentDatetime: lambda self, expr: self._annotate_with_type( - expr, exp.DataType.Type.DATETIME - ), - exp.CurrentTime: lambda self, expr: self._annotate_with_type( - expr, exp.DataType.Type.TIMESTAMP - ), - exp.CurrentTimestamp: lambda self, expr: self._annotate_with_type( - expr, exp.DataType.Type.TIMESTAMP - ), - exp.DateAdd: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE), - exp.DateSub: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE), - exp.DateDiff: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT), - exp.DatetimeAdd: lambda self, expr: self._annotate_with_type( - expr, exp.DataType.Type.DATETIME - ), - exp.DatetimeSub: lambda self, expr: self._annotate_with_type( - expr, exp.DataType.Type.DATETIME - ), - exp.DatetimeDiff: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT), - exp.Extract: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT), - exp.TimestampAdd: lambda self, expr: self._annotate_with_type( - expr, exp.DataType.Type.TIMESTAMP - ), - exp.TimestampSub: lambda self, expr: self._annotate_with_type( - expr, exp.DataType.Type.TIMESTAMP - ), - exp.TimestampDiff: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT), - exp.TimeAdd: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TIMESTAMP), - exp.TimeSub: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TIMESTAMP), - exp.TimeDiff: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT), - exp.DateStrToDate: lambda self, expr: self._annotate_with_type( - expr, exp.DataType.Type.DATE - ), - exp.DateToDateStr: lambda self, expr: self._annotate_with_type( - expr, exp.DataType.Type.VARCHAR - ), - exp.DateToDi: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT), - exp.Day: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TINYINT), - exp.DiToDate: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE), - exp.Exp: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE), - exp.Floor: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT), - exp.Case: lambda self, expr: self._annotate_by_args(expr, "default", "ifs"), - exp.If: lambda self, expr: self._annotate_by_args(expr, "true", "false"), - exp.Coalesce: lambda self, expr: self._annotate_by_args(expr, "this", "expressions"), - exp.IfNull: lambda self, expr: self._annotate_by_args(expr, "this", "expression"), - exp.Concat: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR), - exp.ConcatWs: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR), - exp.GroupConcat: lambda self, expr: self._annotate_with_type( - expr, exp.DataType.Type.VARCHAR - ), - exp.ArrayConcat: lambda self, expr: self._annotate_with_type( - expr, exp.DataType.Type.VARCHAR - ), - exp.ArraySize: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BIGINT), - exp.Map: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.MAP), - exp.VarMap: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.MAP), - exp.Initcap: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR), - exp.Interval: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INTERVAL), - exp.Least: lambda self, expr: self._annotate_by_args(expr, "expressions"), - exp.Length: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BIGINT), - exp.Levenshtein: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT), - exp.Ln: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE), - exp.Log: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE), - exp.Log2: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE), - exp.Log10: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE), - exp.Lower: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR), - exp.Month: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TINYINT), - exp.Pow: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE), - exp.Quantile: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE), - exp.ApproxQuantile: lambda self, expr: self._annotate_with_type( - expr, exp.DataType.Type.DOUBLE - ), - exp.RegexpLike: lambda self, expr: self._annotate_with_type( - expr, exp.DataType.Type.BOOLEAN - ), - exp.Round: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE), - exp.SafeDivide: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE), - exp.Substring: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR), - exp.StrPosition: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT), - exp.StrToDate: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE), - exp.StrToTime: lambda self, expr: self._annotate_with_type( - expr, exp.DataType.Type.TIMESTAMP - ), - exp.Sqrt: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE), - exp.Stddev: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE), - exp.StddevPop: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE), - exp.StddevSamp: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE), - exp.TimeToStr: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR), - exp.TimeToTimeStr: lambda self, expr: self._annotate_with_type( - expr, exp.DataType.Type.VARCHAR - ), - exp.TimeStrToDate: lambda self, expr: self._annotate_with_type( - expr, exp.DataType.Type.DATE - ), - exp.TimeStrToTime: lambda self, expr: self._annotate_with_type( - expr, exp.DataType.Type.TIMESTAMP - ), - exp.Trim: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR), - exp.TsOrDsToDateStr: lambda self, expr: self._annotate_with_type( - expr, exp.DataType.Type.VARCHAR - ), - exp.TsOrDsToDate: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE), - exp.TsOrDiToDi: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT), - exp.UnixToStr: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR), - exp.UnixToTime: lambda self, expr: self._annotate_with_type( - expr, exp.DataType.Type.TIMESTAMP - ), - exp.UnixToTimeStr: lambda self, expr: self._annotate_with_type( - expr, exp.DataType.Type.VARCHAR - ), - exp.Upper: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR), - exp.Variance: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE), - exp.VariancePop: lambda self, expr: self._annotate_with_type( - expr, exp.DataType.Type.DOUBLE - ), - exp.Week: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TINYINT), - exp.Year: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TINYINT), - } +def _annotate_with_type_lambda(data_type: exp.DataType.Type) -> t.Callable[[TypeAnnotator, E], E]: + return lambda self, e: self._annotate_with_type(e, data_type) - # Reference: https://spark.apache.org/docs/3.2.0/sql-ref-ansi-compliance.html - COERCES_TO = { - # CHAR < NCHAR < VARCHAR < NVARCHAR < TEXT - exp.DataType.Type.TEXT: set(), - exp.DataType.Type.NVARCHAR: {exp.DataType.Type.TEXT}, - exp.DataType.Type.VARCHAR: {exp.DataType.Type.NVARCHAR, exp.DataType.Type.TEXT}, - exp.DataType.Type.NCHAR: { - exp.DataType.Type.VARCHAR, - exp.DataType.Type.NVARCHAR, + +class _TypeAnnotator(type): + def __new__(cls, clsname, bases, attrs): + klass = super().__new__(cls, clsname, bases, attrs) + + # Highest-to-lowest type precedence, as specified in Spark's docs (ANSI): + # https://spark.apache.org/docs/3.2.0/sql-ref-ansi-compliance.html + text_precedence = ( exp.DataType.Type.TEXT, - }, - exp.DataType.Type.CHAR: { - exp.DataType.Type.NCHAR, - exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR, - exp.DataType.Type.TEXT, - }, - # TINYINT < SMALLINT < INT < BIGINT < DECIMAL < FLOAT < DOUBLE - exp.DataType.Type.DOUBLE: set(), - exp.DataType.Type.FLOAT: {exp.DataType.Type.DOUBLE}, - exp.DataType.Type.DECIMAL: {exp.DataType.Type.FLOAT, exp.DataType.Type.DOUBLE}, - exp.DataType.Type.BIGINT: { - exp.DataType.Type.DECIMAL, - exp.DataType.Type.FLOAT, + exp.DataType.Type.VARCHAR, + exp.DataType.Type.NCHAR, + exp.DataType.Type.CHAR, + ) + numeric_precedence = ( exp.DataType.Type.DOUBLE, + exp.DataType.Type.FLOAT, + exp.DataType.Type.DECIMAL, + exp.DataType.Type.BIGINT, + exp.DataType.Type.INT, + exp.DataType.Type.SMALLINT, + exp.DataType.Type.TINYINT, + ) + timelike_precedence = ( + exp.DataType.Type.TIMESTAMPLTZ, + exp.DataType.Type.TIMESTAMPTZ, + exp.DataType.Type.TIMESTAMP, + exp.DataType.Type.DATETIME, + exp.DataType.Type.DATE, + ) + + for type_precedence in (text_precedence, numeric_precedence, timelike_precedence): + coerces_to = set() + for data_type in type_precedence: + klass.COERCES_TO[data_type] = coerces_to.copy() + coerces_to |= {data_type} + + return klass + + +class TypeAnnotator(metaclass=_TypeAnnotator): + TYPE_TO_EXPRESSIONS: t.Dict[exp.DataType.Type, t.Set[t.Type[exp.Expression]]] = { + exp.DataType.Type.BIGINT: { + exp.ApproxDistinct, + exp.ArraySize, + exp.Count, + exp.Length, + }, + exp.DataType.Type.BOOLEAN: { + exp.Between, + exp.Boolean, + exp.In, + exp.RegexpLike, + }, + exp.DataType.Type.DATE: { + exp.CurrentDate, + exp.Date, + exp.DateAdd, + exp.DateStrToDate, + exp.DateSub, + exp.DateTrunc, + exp.DiToDate, + exp.StrToDate, + exp.TimeStrToDate, + exp.TsOrDsToDate, + }, + exp.DataType.Type.DATETIME: { + exp.CurrentDatetime, + exp.DatetimeAdd, + exp.DatetimeSub, + }, + exp.DataType.Type.DOUBLE: { + exp.ApproxQuantile, + exp.Avg, + exp.Exp, + exp.Ln, + exp.Log, + exp.Log2, + exp.Log10, + exp.Pow, + exp.Quantile, + exp.Round, + exp.SafeDivide, + exp.Sqrt, + exp.Stddev, + exp.StddevPop, + exp.StddevSamp, + exp.Variance, + exp.VariancePop, }, exp.DataType.Type.INT: { - exp.DataType.Type.BIGINT, - exp.DataType.Type.DECIMAL, - exp.DataType.Type.FLOAT, - exp.DataType.Type.DOUBLE, + exp.Ceil, + exp.DateDiff, + exp.DatetimeDiff, + exp.Extract, + exp.TimestampDiff, + exp.TimeDiff, + exp.DateToDi, + exp.Floor, + exp.Levenshtein, + exp.StrPosition, + exp.TsOrDiToDi, }, - exp.DataType.Type.SMALLINT: { - exp.DataType.Type.INT, - exp.DataType.Type.BIGINT, - exp.DataType.Type.DECIMAL, - exp.DataType.Type.FLOAT, - exp.DataType.Type.DOUBLE, + exp.DataType.Type.TIMESTAMP: { + exp.CurrentTime, + exp.CurrentTimestamp, + exp.StrToTime, + exp.TimeAdd, + exp.TimeStrToTime, + exp.TimeSub, + exp.TimestampAdd, + exp.TimestampSub, + exp.UnixToTime, }, exp.DataType.Type.TINYINT: { - exp.DataType.Type.SMALLINT, - exp.DataType.Type.INT, - exp.DataType.Type.BIGINT, - exp.DataType.Type.DECIMAL, - exp.DataType.Type.FLOAT, - exp.DataType.Type.DOUBLE, + exp.Day, + exp.Month, + exp.Week, + exp.Year, }, - # DATE < DATETIME < TIMESTAMP < TIMESTAMPTZ < TIMESTAMPLTZ - exp.DataType.Type.TIMESTAMPLTZ: set(), - exp.DataType.Type.TIMESTAMPTZ: {exp.DataType.Type.TIMESTAMPLTZ}, - exp.DataType.Type.TIMESTAMP: { - exp.DataType.Type.TIMESTAMPTZ, - exp.DataType.Type.TIMESTAMPLTZ, + exp.DataType.Type.VARCHAR: { + exp.ArrayConcat, + exp.Concat, + exp.ConcatWs, + exp.DateToDateStr, + exp.GroupConcat, + exp.Initcap, + exp.Lower, + exp.SafeConcat, + exp.Substring, + exp.TimeToStr, + exp.TimeToTimeStr, + exp.Trim, + exp.TsOrDsToDateStr, + exp.UnixToStr, + exp.UnixToTimeStr, + exp.Upper, }, - exp.DataType.Type.DATETIME: { - exp.DataType.Type.TIMESTAMP, - exp.DataType.Type.TIMESTAMPTZ, - exp.DataType.Type.TIMESTAMPLTZ, + } + + ANNOTATORS = { + **{ + expr_type: lambda self, e: self._annotate_unary(e) + for expr_type in subclasses(exp.__name__, (exp.Unary, exp.Alias)) }, - exp.DataType.Type.DATE: { - exp.DataType.Type.DATETIME, - exp.DataType.Type.TIMESTAMP, - exp.DataType.Type.TIMESTAMPTZ, - exp.DataType.Type.TIMESTAMPLTZ, + **{ + expr_type: lambda self, e: self._annotate_binary(e) + for expr_type in subclasses(exp.__name__, exp.Binary) + }, + **{ + expr_type: _annotate_with_type_lambda(data_type) + for data_type, expressions in TYPE_TO_EXPRESSIONS.items() + for expr_type in expressions }, + exp.Anonymous: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.UNKNOWN), + exp.Cast: lambda self, e: self._annotate_with_type(e, e.args["to"]), + exp.Case: lambda self, e: self._annotate_by_args(e, "default", "ifs"), + exp.Coalesce: lambda self, e: self._annotate_by_args(e, "this", "expressions"), + exp.DataType: lambda self, e: self._annotate_with_type(e, e.copy()), + exp.If: lambda self, e: self._annotate_by_args(e, "true", "false"), + exp.Interval: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.INTERVAL), + exp.Least: lambda self, e: self._annotate_by_args(e, "expressions"), + exp.Literal: lambda self, e: self._annotate_literal(e), + exp.Map: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.MAP), + exp.Max: lambda self, e: self._annotate_by_args(e, "this", "expressions"), + exp.Min: lambda self, e: self._annotate_by_args(e, "this", "expressions"), + exp.Null: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.NULL), + exp.Sum: lambda self, e: self._annotate_by_args(e, "this", "expressions", promote=True), + exp.TryCast: lambda self, e: self._annotate_with_type(e, e.args["to"]), + exp.VarMap: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.MAP), } - TRAVERSABLES = (exp.Select, exp.Union, exp.UDTF, exp.Subquery) + # Specifies what types a given type can be coerced into (autofilled) + COERCES_TO: t.Dict[exp.DataType.Type, t.Set[exp.DataType.Type]] = {} - def __init__(self, schema=None, annotators=None, coerces_to=None): + def __init__( + self, + schema: Schema, + annotators: t.Optional[t.Dict[t.Type[E], t.Callable[[TypeAnnotator, E], E]]] = None, + coerces_to: t.Optional[t.Dict[exp.DataType.Type, t.Set[exp.DataType.Type]]] = None, + ) -> None: self.schema = schema self.annotators = annotators or self.ANNOTATORS self.coerces_to = coerces_to or self.COERCES_TO - def annotate(self, expression): - if isinstance(expression, self.TRAVERSABLES): - for scope in traverse_scope(expression): - selects = {} - for name, source in scope.sources.items(): - if not isinstance(source, Scope): - continue - if isinstance(source.expression, exp.UDTF): - values = [] - - if isinstance(source.expression, exp.Lateral): - if isinstance(source.expression.this, exp.Explode): - values = [source.expression.this.this] - else: - values = source.expression.expressions[0].expressions - - if not values: - continue - - selects[name] = { - alias: column - for alias, column in zip( - source.expression.alias_column_names, - values, - ) - } + def annotate(self, expression: E) -> E: + for scope in traverse_scope(expression): + selects = {} + for name, source in scope.sources.items(): + if not isinstance(source, Scope): + continue + if isinstance(source.expression, exp.UDTF): + values = [] + + if isinstance(source.expression, exp.Lateral): + if isinstance(source.expression.this, exp.Explode): + values = [source.expression.this.this] else: - selects[name] = { - select.alias_or_name: select for select in source.expression.selects - } - # First annotate the current scope's column references - for col in scope.columns: - if not col.table: + values = source.expression.expressions[0].expressions + + if not values: continue - source = scope.sources.get(col.table) - if isinstance(source, exp.Table): - col.type = self.schema.get_column_type(source, col) - elif source and col.table in selects and col.name in selects[col.table]: - col.type = selects[col.table][col.name].type - # Then (possibly) annotate the remaining expressions in the scope - self._maybe_annotate(scope.expression) + selects[name] = { + alias: column + for alias, column in zip( + source.expression.alias_column_names, + values, + ) + } + else: + selects[name] = { + select.alias_or_name: select for select in source.expression.selects + } + + # First annotate the current scope's column references + for col in scope.columns: + if not col.table: + continue + + source = scope.sources.get(col.table) + if isinstance(source, exp.Table): + col.type = self.schema.get_column_type(source, col) + elif source and col.table in selects and col.name in selects[col.table]: + col.type = selects[col.table][col.name].type + + # Then (possibly) annotate the remaining expressions in the scope + self._maybe_annotate(scope.expression) + return self._maybe_annotate(expression) # This takes care of non-traversable expressions - def _maybe_annotate(self, expression): + def _maybe_annotate(self, expression: E) -> E: if expression.type: return expression # We've already inferred the expression's type @@ -312,13 +290,15 @@ class TypeAnnotator: else self._annotate_with_type(expression, exp.DataType.Type.UNKNOWN) ) - def _annotate_args(self, expression): + def _annotate_args(self, expression: E) -> E: for _, value in expression.iter_expressions(): self._maybe_annotate(value) return expression - def _maybe_coerce(self, type1, type2): + def _maybe_coerce( + self, type1: exp.DataType | exp.DataType.Type, type2: exp.DataType | exp.DataType.Type + ) -> exp.DataType.Type: # We propagate the NULL / UNKNOWN types upwards if found if isinstance(type1, exp.DataType): type1 = type1.this @@ -330,9 +310,14 @@ class TypeAnnotator: if exp.DataType.Type.UNKNOWN in (type1, type2): return exp.DataType.Type.UNKNOWN - return type2 if type2 in self.coerces_to.get(type1, {}) else type1 + return type2 if type2 in self.coerces_to.get(type1, {}) else type1 # type: ignore - def _annotate_binary(self, expression): + # Note: the following "no_type_check" decorators were added because mypy was yelling due + # to assigning Type values to expression.type (since its getter returns Optional[DataType]). + # This is a known mypy issue: https://github.com/python/mypy/issues/3004 + + @t.no_type_check + def _annotate_binary(self, expression: B) -> B: self._annotate_args(expression) left_type = expression.left.type.this @@ -354,7 +339,8 @@ class TypeAnnotator: return expression - def _annotate_unary(self, expression): + @t.no_type_check + def _annotate_unary(self, expression: E) -> E: self._annotate_args(expression) if isinstance(expression, exp.Condition) and not isinstance(expression, exp.Paren): @@ -364,7 +350,8 @@ class TypeAnnotator: return expression - def _annotate_literal(self, expression): + @t.no_type_check + def _annotate_literal(self, expression: exp.Literal) -> exp.Literal: if expression.is_string: expression.type = exp.DataType.Type.VARCHAR elif expression.is_int: @@ -374,13 +361,16 @@ class TypeAnnotator: return expression - def _annotate_with_type(self, expression, target_type): + @t.no_type_check + def _annotate_with_type(self, expression: E, target_type: exp.DataType.Type) -> E: expression.type = target_type return self._annotate_args(expression) - def _annotate_by_args(self, expression, *args, promote=False): + @t.no_type_check + def _annotate_by_args(self, expression: E, *args: str, promote: bool = False) -> E: self._annotate_args(expression) - expressions = [] + + expressions: t.List[exp.Expression] = [] for arg in args: arg_expr = expression.args.get(arg) expressions.extend(expr for expr in ensure_list(arg_expr) if expr) diff --git a/sqlglot/optimizer/canonicalize.py b/sqlglot/optimizer/canonicalize.py index da2fce8..015b06a 100644 --- a/sqlglot/optimizer/canonicalize.py +++ b/sqlglot/optimizer/canonicalize.py @@ -26,7 +26,7 @@ def canonicalize(expression: exp.Expression) -> exp.Expression: def add_text_to_concat(node: exp.Expression) -> exp.Expression: if isinstance(node, exp.Add) and node.type and node.type.this in exp.DataType.TEXT_TYPES: - node = exp.Concat(this=node.this, expression=node.expression) + node = exp.Concat(expressions=[node.left, node.right]) return node diff --git a/sqlglot/optimizer/eliminate_joins.py b/sqlglot/optimizer/eliminate_joins.py index 27de9c7..cd8ba3b 100644 --- a/sqlglot/optimizer/eliminate_joins.py +++ b/sqlglot/optimizer/eliminate_joins.py @@ -32,7 +32,7 @@ def eliminate_joins(expression): # Reverse the joins so we can remove chains of unused joins for join in reversed(joins): - alias = join.this.alias_or_name + alias = join.alias_or_name if _should_eliminate_join(scope, join, alias): join.pop() scope.remove_source(alias) @@ -126,7 +126,7 @@ def join_condition(join): tuple[list[str], list[str], exp.Expression]: Tuple of (source key, join key, remaining predicate) """ - name = join.this.alias_or_name + name = join.alias_or_name on = (join.args.get("on") or exp.true()).copy() source_key = [] join_key = [] diff --git a/sqlglot/optimizer/isolate_table_selects.py b/sqlglot/optimizer/isolate_table_selects.py index 5dfa4aa..79e3ed5 100644 --- a/sqlglot/optimizer/isolate_table_selects.py +++ b/sqlglot/optimizer/isolate_table_selects.py @@ -21,7 +21,7 @@ def isolate_table_selects(expression, schema=None): source.replace( exp.select("*") .from_( - alias(source, source.name or source.alias, table=True), + alias(source, source.alias_or_name, table=True), copy=False, ) .subquery(source.alias, copy=False) diff --git a/sqlglot/optimizer/merge_subqueries.py b/sqlglot/optimizer/merge_subqueries.py index f9c9664..fefe96e 100644 --- a/sqlglot/optimizer/merge_subqueries.py +++ b/sqlglot/optimizer/merge_subqueries.py @@ -145,7 +145,7 @@ def _mergeable(outer_scope, inner_scope, leave_tables_isolated, from_or_join): if not isinstance(from_or_join, exp.Join): return False - alias = from_or_join.this.alias_or_name + alias = from_or_join.alias_or_name on = from_or_join.args.get("on") if not on: @@ -253,10 +253,6 @@ def _merge_joins(outer_scope, inner_scope, from_or_join): """ new_joins = [] - comma_joins = inner_scope.expression.args.get("from").expressions[1:] - for subquery in comma_joins: - new_joins.append(exp.Join(this=subquery, kind="CROSS")) - outer_scope.add_source(subquery.alias_or_name, inner_scope.sources[subquery.alias_or_name]) joins = inner_scope.expression.args.get("joins") or [] for join in joins: @@ -328,13 +324,12 @@ def _merge_where(outer_scope, inner_scope, from_or_join): if source == from_or_join.alias_or_name: break - if set(exp.column_table_names(where.this)) <= sources: + if exp.column_table_names(where.this) <= sources: from_or_join.on(where.this, copy=False) from_or_join.set("on", from_or_join.args.get("on")) return expression.where(where.this, copy=False) - expression.set("where", expression.args.get("where")) def _merge_order(outer_scope, inner_scope): diff --git a/sqlglot/optimizer/optimize_joins.py b/sqlglot/optimizer/optimize_joins.py index 4e0c3a1..d51276f 100644 --- a/sqlglot/optimizer/optimize_joins.py +++ b/sqlglot/optimizer/optimize_joins.py @@ -1,3 +1,7 @@ +from __future__ import annotations + +import typing as t + from sqlglot import exp from sqlglot.helper import tsort @@ -13,25 +17,28 @@ def optimize_joins(expression): >>> optimize_joins(parse_one("SELECT * FROM x CROSS JOIN y JOIN z ON x.a = z.a AND y.a = z.a")).sql() 'SELECT * FROM x JOIN z ON x.a = z.a AND TRUE JOIN y ON y.a = z.a' """ + for select in expression.find_all(exp.Select): references = {} cross_joins = [] for join in select.args.get("joins", []): - name = join.this.alias_or_name - tables = other_table_names(join, name) + tables = other_table_names(join) if tables: for table in tables: references[table] = references.get(table, []) + [join] else: - cross_joins.append((name, join)) + cross_joins.append((join.alias_or_name, join)) for name, join in cross_joins: for dep in references.get(name, []): on = dep.args["on"] if isinstance(on, exp.Connector): + if len(other_table_names(dep)) < 2: + continue + for predicate in on.flatten(): if name in exp.column_table_names(predicate): predicate.replace(exp.true()) @@ -47,17 +54,12 @@ def reorder_joins(expression): Reorder joins by topological sort order based on predicate references. """ for from_ in expression.find_all(exp.From): - head = from_.this parent = from_.parent - joins = {join.this.alias_or_name: join for join in parent.args.get("joins", [])} - dag = {head.alias_or_name: []} - - for name, join in joins.items(): - dag[name] = other_table_names(join, name) - + joins = {join.alias_or_name: join for join in parent.args.get("joins", [])} + dag = {name: other_table_names(join) for name, join in joins.items()} parent.set( "joins", - [joins[name] for name in tsort(dag) if name != head.alias_or_name], + [joins[name] for name in tsort(dag) if name != from_.alias_or_name], ) return expression @@ -75,9 +77,6 @@ def normalize(expression): return expression -def other_table_names(join, exclude): - return [ - name - for name in (exp.column_table_names(join.args.get("on") or exp.true())) - if name != exclude - ] +def other_table_names(join: exp.Join) -> t.Set[str]: + on = join.args.get("on") + return exp.column_table_names(on, join.alias_or_name) if on else set() diff --git a/sqlglot/optimizer/optimizer.py b/sqlglot/optimizer/optimizer.py index dbe33a2..abac63b 100644 --- a/sqlglot/optimizer/optimizer.py +++ b/sqlglot/optimizer/optimizer.py @@ -78,7 +78,7 @@ def optimize( "schema": schema, "dialect": dialect, "isolate_tables": True, # needed for other optimizations to perform well - "quote_identifiers": False, # this happens in canonicalize + "quote_identifiers": False, **kwargs, } diff --git a/sqlglot/optimizer/pushdown_predicates.py b/sqlglot/optimizer/pushdown_predicates.py index b89a82b..fb1662d 100644 --- a/sqlglot/optimizer/pushdown_predicates.py +++ b/sqlglot/optimizer/pushdown_predicates.py @@ -41,7 +41,7 @@ def pushdown_predicates(expression): # joins should only pushdown into itself, not to other joins # so we limit the selected sources to only itself for join in select.args.get("joins") or []: - name = join.this.alias_or_name + name = join.alias_or_name pushdown(join.args.get("on"), {name: scope.selected_sources[name]}, scope_ref_count) return expression @@ -93,10 +93,10 @@ def pushdown_dnf(predicates, scope, scope_ref_count): pushdown_tables = set() for a in predicates: - a_tables = set(exp.column_table_names(a)) + a_tables = exp.column_table_names(a) for b in predicates: - a_tables &= set(exp.column_table_names(b)) + a_tables &= exp.column_table_names(b) pushdown_tables.update(a_tables) @@ -147,7 +147,7 @@ def nodes_for_predicate(predicate, sources, scope_ref_count): tables = exp.column_table_names(predicate) where_condition = isinstance(predicate.find_ancestor(exp.Join, exp.Where), exp.Where) - for table in tables: + for table in sorted(tables): node, source = sources.get(table) or (None, None) # if the predicate is in a where statement we can try to push it down diff --git a/sqlglot/optimizer/qualify_columns.py b/sqlglot/optimizer/qualify_columns.py index 4a31171..aba9a7e 100644 --- a/sqlglot/optimizer/qualify_columns.py +++ b/sqlglot/optimizer/qualify_columns.py @@ -14,7 +14,7 @@ from sqlglot.schema import Schema, ensure_schema def qualify_columns( expression: exp.Expression, - schema: dict | Schema, + schema: t.Dict | Schema, expand_alias_refs: bool = True, infer_schema: t.Optional[bool] = None, ) -> exp.Expression: @@ -93,7 +93,7 @@ def _pop_table_column_aliases(derived_tables): def _expand_using(scope, resolver): joins = list(scope.find_all(exp.Join)) - names = {join.this.alias for join in joins} + names = {join.alias_or_name for join in joins} ordered = [key for key in scope.selected_sources if key not in names] # Mapping of automatically joined column names to an ordered set of source names (dict). @@ -105,7 +105,7 @@ def _expand_using(scope, resolver): if not using: continue - join_table = join.this.alias_or_name + join_table = join.alias_or_name columns = {} diff --git a/sqlglot/optimizer/qualify_tables.py b/sqlglot/optimizer/qualify_tables.py index fcc5f26..9c931d6 100644 --- a/sqlglot/optimizer/qualify_tables.py +++ b/sqlglot/optimizer/qualify_tables.py @@ -91,11 +91,13 @@ def qualify_tables( ) elif isinstance(source, Scope) and source.is_udtf: udtf = source.expression - table_alias = udtf.args.get("alias") or exp.TableAlias(this=next_alias_name()) + table_alias = udtf.args.get("alias") or exp.TableAlias( + this=exp.to_identifier(next_alias_name()) + ) udtf.set("alias", table_alias) if not table_alias.name: - table_alias.set("this", next_alias_name()) + table_alias.set("this", exp.to_identifier(next_alias_name())) if isinstance(udtf, exp.Values) and not table_alias.columns: for i, e in enumerate(udtf.expressions[0].expressions): table_alias.append("columns", exp.to_identifier(f"_col_{i}")) diff --git a/sqlglot/optimizer/scope.py b/sqlglot/optimizer/scope.py index 9ffb4d6..aa56b83 100644 --- a/sqlglot/optimizer/scope.py +++ b/sqlglot/optimizer/scope.py @@ -620,7 +620,7 @@ def _traverse_tables(scope): table_name = expression.name source_name = expression.alias_or_name - if table_name in scope.sources: + if table_name in scope.sources and not expression.db: # This is a reference to a parent source (e.g. a CTE), not an actual table, unless # it is pivoted, because then we get back a new table and hence a new source. pivots = expression.args.get("pivots") diff --git a/sqlglot/parser.py b/sqlglot/parser.py index 96bd6e3..d6888c7 100644 --- a/sqlglot/parser.py +++ b/sqlglot/parser.py @@ -6,7 +6,8 @@ from collections import defaultdict from sqlglot import exp from sqlglot.errors import ErrorLevel, ParseError, concat_messages, merge_errors -from sqlglot.helper import apply_index_offset, ensure_collection, ensure_list, seq_get +from sqlglot.helper import apply_index_offset, ensure_list, seq_get +from sqlglot.time import format_time from sqlglot.tokens import Token, Tokenizer, TokenType from sqlglot.trie import in_trie, new_trie @@ -25,13 +26,14 @@ def parse_var_map(args: t.List) -> exp.StarMap | exp.VarMap: for i in range(0, len(args), 2): keys.append(args[i]) values.append(args[i + 1]) + return exp.VarMap( keys=exp.Array(expressions=keys), values=exp.Array(expressions=values), ) -def parse_like(args: t.List) -> exp.Expression: +def parse_like(args: t.List) -> exp.Escape | exp.Like: like = exp.Like(this=seq_get(args, 1), expression=seq_get(args, 0)) return exp.Escape(this=like, expression=seq_get(args, 2)) if len(args) > 2 else like @@ -47,33 +49,26 @@ def binary_range_parser( class _Parser(type): def __new__(cls, clsname, bases, attrs): klass = super().__new__(cls, clsname, bases, attrs) - klass._show_trie = new_trie(key.split(" ") for key in klass.SHOW_PARSERS) - klass._set_trie = new_trie(key.split(" ") for key in klass.SET_PARSERS) + + klass.SHOW_TRIE = new_trie(key.split(" ") for key in klass.SHOW_PARSERS) + klass.SET_TRIE = new_trie(key.split(" ") for key in klass.SET_PARSERS) return klass class Parser(metaclass=_Parser): """ - Parser consumes a list of tokens produced by the `sqlglot.tokens.Tokenizer` and produces - a parsed syntax tree. + Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree. Args: - error_level: the desired error level. + error_level: The desired error level. Default: ErrorLevel.IMMEDIATE - error_message_context: determines the amount of context to capture from a + error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). - Default: 50. - index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. - Default: 0 - alias_post_tablesample: If the table alias comes after tablesample. - Default: False + Default: 100 max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3 - null_ordering: Indicates the default null ordering method to use if not explicitly set. - Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". - Default: "nulls_are_small" """ FUNCTIONS: t.Dict[str, t.Callable] = { @@ -83,7 +78,6 @@ class Parser(metaclass=_Parser): to=exp.DataType(this=exp.DataType.Type.TEXT), ), "GLOB": lambda args: exp.Glob(this=seq_get(args, 1), expression=seq_get(args, 0)), - "IFNULL": exp.Coalesce.from_arg_list, "LIKE": parse_like, "TIME_TO_TIME_STR": lambda args: exp.Cast( this=seq_get(args, 0), @@ -108,8 +102,6 @@ class Parser(metaclass=_Parser): TokenType.CURRENT_USER: exp.CurrentUser, } - JOIN_HINTS: t.Set[str] = set() - NESTED_TYPE_TOKENS = { TokenType.ARRAY, TokenType.MAP, @@ -117,6 +109,10 @@ class Parser(metaclass=_Parser): TokenType.STRUCT, } + ENUM_TYPE_TOKENS = { + TokenType.ENUM, + } + TYPE_TOKENS = { TokenType.BIT, TokenType.BOOLEAN, @@ -188,6 +184,7 @@ class Parser(metaclass=_Parser): TokenType.VARIANT, TokenType.OBJECT, TokenType.INET, + TokenType.ENUM, *NESTED_TYPE_TOKENS, } @@ -198,7 +195,10 @@ class Parser(metaclass=_Parser): TokenType.SOME: exp.Any, } - RESERVED_KEYWORDS = {*Tokenizer.SINGLE_TOKENS.values(), TokenType.SELECT} + RESERVED_KEYWORDS = { + *Tokenizer.SINGLE_TOKENS.values(), + TokenType.SELECT, + } DB_CREATABLES = { TokenType.DATABASE, @@ -216,6 +216,7 @@ class Parser(metaclass=_Parser): *DB_CREATABLES, } + # Tokens that can represent identifiers ID_VAR_TOKENS = { TokenType.VAR, TokenType.ANTI, @@ -224,6 +225,7 @@ class Parser(metaclass=_Parser): TokenType.AUTO_INCREMENT, TokenType.BEGIN, TokenType.CACHE, + TokenType.CASE, TokenType.COLLATE, TokenType.COMMAND, TokenType.COMMENT, @@ -274,6 +276,7 @@ class Parser(metaclass=_Parser): TokenType.TRUE, TokenType.UNIQUE, TokenType.UNPIVOT, + TokenType.UPDATE, TokenType.VOLATILE, TokenType.WINDOW, *CREATABLES, @@ -409,6 +412,8 @@ class Parser(metaclass=_Parser): TokenType.ANTI, } + JOIN_HINTS: t.Set[str] = set() + LAMBDAS = { TokenType.ARROW: lambda self, expressions: self.expression( exp.Lambda, @@ -420,7 +425,7 @@ class Parser(metaclass=_Parser): ), TokenType.FARROW: lambda self, expressions: self.expression( exp.Kwarg, - this=exp.Var(this=expressions[0].name), + this=exp.var(expressions[0].name), expression=self._parse_conjunction(), ), } @@ -515,7 +520,7 @@ class Parser(metaclass=_Parser): TokenType.USE: lambda self: self.expression( exp.Use, kind=self._match_texts(("ROLE", "WAREHOUSE", "DATABASE", "SCHEMA")) - and exp.Var(this=self._prev.text), + and exp.var(self._prev.text), this=self._parse_table(schema=False), ), } @@ -634,6 +639,7 @@ class Parser(metaclass=_Parser): "TBLPROPERTIES": lambda self: self._parse_wrapped_csv(self._parse_property), "TEMP": lambda self: self.expression(exp.TemporaryProperty), "TEMPORARY": lambda self: self.expression(exp.TemporaryProperty), + "TO": lambda self: self._parse_to_table(), "TRANSIENT": lambda self: self.expression(exp.TransientProperty), "TTL": lambda self: self._parse_ttl(), "USING": lambda self: self._parse_property_assignment(exp.FileFormatProperty), @@ -710,6 +716,7 @@ class Parser(metaclass=_Parser): FUNCTION_PARSERS: t.Dict[str, t.Callable] = { "CAST": lambda self: self._parse_cast(self.STRICT_CAST), + "CONCAT": lambda self: self._parse_concat(), "CONVERT": lambda self: self._parse_convert(self.STRICT_CAST), "DECODE": lambda self: self._parse_decode(), "EXTRACT": lambda self: self._parse_extract(), @@ -755,8 +762,11 @@ class Parser(metaclass=_Parser): MODIFIABLES = (exp.Subquery, exp.Subqueryable, exp.Table) - TRANSACTION_KIND = {"DEFERRED", "IMMEDIATE", "EXCLUSIVE"} + DDL_SELECT_TOKENS = {TokenType.SELECT, TokenType.WITH, TokenType.L_PAREN} + PRE_VOLATILE_TOKENS = {TokenType.CREATE, TokenType.REPLACE, TokenType.UNIQUE} + + TRANSACTION_KIND = {"DEFERRED", "IMMEDIATE", "EXCLUSIVE"} TRANSACTION_CHARACTERISTICS = { "ISOLATION LEVEL REPEATABLE READ", "ISOLATION LEVEL READ COMMITTED", @@ -778,6 +788,8 @@ class Parser(metaclass=_Parser): STRICT_CAST = True + CONCAT_NULL_OUTPUTS_STRING = False # A NULL arg in CONCAT yields NULL by default + CONVERT_TYPE_FIRST = False PREFIXED_PIVOT_COLUMNS = False @@ -789,40 +801,39 @@ class Parser(metaclass=_Parser): __slots__ = ( "error_level", "error_message_context", + "max_errors", "sql", "errors", - "index_offset", - "unnest_column_only", - "alias_post_tablesample", - "max_errors", - "null_ordering", "_tokens", "_index", "_curr", "_next", "_prev", "_prev_comments", - "_show_trie", - "_set_trie", ) + # Autofilled + INDEX_OFFSET: int = 0 + UNNEST_COLUMN_ONLY: bool = False + ALIAS_POST_TABLESAMPLE: bool = False + STRICT_STRING_CONCAT = False + NULL_ORDERING: str = "nulls_are_small" + SHOW_TRIE: t.Dict = {} + SET_TRIE: t.Dict = {} + FORMAT_MAPPING: t.Dict[str, str] = {} + FORMAT_TRIE: t.Dict = {} + TIME_MAPPING: t.Dict[str, str] = {} + TIME_TRIE: t.Dict = {} + def __init__( self, error_level: t.Optional[ErrorLevel] = None, error_message_context: int = 100, - index_offset: int = 0, - unnest_column_only: bool = False, - alias_post_tablesample: bool = False, max_errors: int = 3, - null_ordering: t.Optional[str] = None, ): self.error_level = error_level or ErrorLevel.IMMEDIATE self.error_message_context = error_message_context - self.index_offset = index_offset - self.unnest_column_only = unnest_column_only - self.alias_post_tablesample = alias_post_tablesample self.max_errors = max_errors - self.null_ordering = null_ordering self.reset() def reset(self): @@ -843,11 +854,11 @@ class Parser(metaclass=_Parser): per parsed SQL statement. Args: - raw_tokens: the list of tokens. - sql: the original SQL string, used to produce helpful debug messages. + raw_tokens: The list of tokens. + sql: The original SQL string, used to produce helpful debug messages. Returns: - The list of syntax trees. + The list of the produced syntax trees. """ return self._parse( parse_method=self.__class__._parse_statement, raw_tokens=raw_tokens, sql=sql @@ -865,23 +876,25 @@ class Parser(metaclass=_Parser): of them, stopping at the first for which the parsing succeeds. Args: - expression_types: the expression type(s) to try and parse the token list into. - raw_tokens: the list of tokens. - sql: the original SQL string, used to produce helpful debug messages. + expression_types: The expression type(s) to try and parse the token list into. + raw_tokens: The list of tokens. + sql: The original SQL string, used to produce helpful debug messages. Returns: The target Expression. """ errors = [] - for expression_type in ensure_collection(expression_types): + for expression_type in ensure_list(expression_types): parser = self.EXPRESSION_PARSERS.get(expression_type) if not parser: raise TypeError(f"No parser registered for {expression_type}") + try: return self._parse(parser, raw_tokens, sql) except ParseError as e: e.errors[0]["into_expression"] = expression_type errors.append(e) + raise ParseError( f"Failed to parse '{sql or raw_tokens}' into {expression_types}", errors=merge_errors(errors), @@ -895,6 +908,7 @@ class Parser(metaclass=_Parser): ) -> t.List[t.Optional[exp.Expression]]: self.reset() self.sql = sql or "" + total = len(raw_tokens) chunks: t.List[t.List[Token]] = [[]] @@ -922,9 +936,7 @@ class Parser(metaclass=_Parser): return expressions def check_errors(self) -> None: - """ - Logs or raises any found errors, depending on the chosen error level setting. - """ + """Logs or raises any found errors, depending on the chosen error level setting.""" if self.error_level == ErrorLevel.WARN: for error in self.errors: logger.error(str(error)) @@ -969,39 +981,38 @@ class Parser(metaclass=_Parser): Creates a new, validated Expression. Args: - exp_class: the expression class to instantiate. - comments: an optional list of comments to attach to the expression. - kwargs: the arguments to set for the expression along with their respective values. + exp_class: The expression class to instantiate. + comments: An optional list of comments to attach to the expression. + kwargs: The arguments to set for the expression along with their respective values. Returns: The target expression. """ instance = exp_class(**kwargs) instance.add_comments(comments) if comments else self._add_comments(instance) - self.validate_expression(instance) - return instance + return self.validate_expression(instance) def _add_comments(self, expression: t.Optional[exp.Expression]) -> None: if expression and self._prev_comments: expression.add_comments(self._prev_comments) self._prev_comments = None - def validate_expression( - self, expression: exp.Expression, args: t.Optional[t.List] = None - ) -> None: + def validate_expression(self, expression: E, args: t.Optional[t.List] = None) -> E: """ - Validates an already instantiated expression, making sure that all its mandatory arguments - are set. + Validates an Expression, making sure that all its mandatory arguments are set. Args: - expression: the expression to validate. - args: an optional list of items that was used to instantiate the expression, if it's a Func. + expression: The expression to validate. + args: An optional list of items that was used to instantiate the expression, if it's a Func. + + Returns: + The validated expression. """ - if self.error_level == ErrorLevel.IGNORE: - return + if self.error_level != ErrorLevel.IGNORE: + for error_message in expression.error_messages(args): + self.raise_error(error_message) - for error_message in expression.error_messages(args): - self.raise_error(error_message) + return expression def _find_sql(self, start: Token, end: Token) -> str: return self.sql[start.start : end.end + 1] @@ -1010,6 +1021,7 @@ class Parser(metaclass=_Parser): self._index += times self._curr = seq_get(self._tokens, self._index) self._next = seq_get(self._tokens, self._index + 1) + if self._index > 0: self._prev = self._tokens[self._index - 1] self._prev_comments = self._prev.comments @@ -1031,7 +1043,6 @@ class Parser(metaclass=_Parser): self._match(TokenType.ON) kind = self._match_set(self.CREATABLES) and self._prev - if not kind: return self._parse_as_command(start) @@ -1050,6 +1061,12 @@ class Parser(metaclass=_Parser): exp.Comment, this=this, kind=kind.text, expression=self._parse_string(), exists=exists ) + def _parse_to_table( + self, + ) -> exp.ToTableProperty: + table = self._parse_table_parts(schema=True) + return self.expression(exp.ToTableProperty, this=table) + # https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree#mergetree-table-ttl def _parse_ttl(self) -> exp.Expression: def _parse_ttl_action() -> t.Optional[exp.Expression]: @@ -1102,10 +1119,11 @@ class Parser(metaclass=_Parser): expression = self._parse_set_operations(expression) if expression else self._parse_select() return self._parse_query_modifiers(expression) - def _parse_drop(self) -> t.Optional[exp.Drop | exp.Command]: + def _parse_drop(self) -> exp.Drop | exp.Command: start = self._prev temporary = self._match(TokenType.TEMPORARY) materialized = self._match_text_seq("MATERIALIZED") + kind = self._match_set(self.CREATABLES) and self._prev.text if not kind: return self._parse_as_command(start) @@ -1129,21 +1147,23 @@ class Parser(metaclass=_Parser): and self._match(TokenType.EXISTS) ) - def _parse_create(self) -> t.Optional[exp.Expression]: + def _parse_create(self) -> exp.Create | exp.Command: + # Note: this can't be None because we've matched a statement parser start = self._prev - replace = self._prev.text.upper() == "REPLACE" or self._match_pair( + replace = start.text.upper() == "REPLACE" or self._match_pair( TokenType.OR, TokenType.REPLACE ) unique = self._match(TokenType.UNIQUE) if self._match_pair(TokenType.TABLE, TokenType.FUNCTION, advance=False): - self._match(TokenType.TABLE) + self._advance() properties = None create_token = self._match_set(self.CREATABLES) and self._prev if not create_token: - properties = self._parse_properties() # exp.Properties.Location.POST_CREATE + # exp.Properties.Location.POST_CREATE + properties = self._parse_properties() create_token = self._match_set(self.CREATABLES) and self._prev if not properties or not create_token: @@ -1157,7 +1177,7 @@ class Parser(metaclass=_Parser): begin = None clone = None - def extend_props(temp_props: t.Optional[exp.Expression]) -> None: + def extend_props(temp_props: t.Optional[exp.Properties]) -> None: nonlocal properties if properties and temp_props: properties.expressions.extend(temp_props.expressions) @@ -1166,6 +1186,8 @@ class Parser(metaclass=_Parser): if create_token.token_type in (TokenType.FUNCTION, TokenType.PROCEDURE): this = self._parse_user_defined_function(kind=create_token.token_type) + + # exp.Properties.Location.POST_SCHEMA ("schema" here is the UDF's type signature) extend_props(self._parse_properties()) self._match(TokenType.ALIAS) @@ -1190,13 +1212,8 @@ class Parser(metaclass=_Parser): extend_props(self._parse_properties()) self._match(TokenType.ALIAS) - - # exp.Properties.Location.POST_ALIAS - if not ( - self._match(TokenType.SELECT, advance=False) - or self._match(TokenType.WITH, advance=False) - or self._match(TokenType.L_PAREN, advance=False) - ): + if not self._match_set(self.DDL_SELECT_TOKENS, advance=False): + # exp.Properties.Location.POST_ALIAS extend_props(self._parse_properties()) expression = self._parse_ddl_select() @@ -1206,7 +1223,7 @@ class Parser(metaclass=_Parser): while True: index = self._parse_index() - # exp.Properties.Location.POST_EXPRESSION or exp.Properties.Location.POST_INDEX + # exp.Properties.Location.POST_EXPRESSION and POST_INDEX extend_props(self._parse_properties()) if not index: @@ -1296,7 +1313,7 @@ class Parser(metaclass=_Parser): return None - def _parse_stored(self) -> exp.Expression: + def _parse_stored(self) -> exp.FileFormatProperty: self._match(TokenType.ALIAS) input_format = self._parse_string() if self._match_text_seq("INPUTFORMAT") else None @@ -1311,14 +1328,13 @@ class Parser(metaclass=_Parser): else self._parse_var_or_string() or self._parse_number() or self._parse_id_var(), ) - def _parse_property_assignment(self, exp_class: t.Type[exp.Expression]) -> exp.Expression: + def _parse_property_assignment(self, exp_class: t.Type[E]) -> E: self._match(TokenType.EQ) self._match(TokenType.ALIAS) return self.expression(exp_class, this=self._parse_field()) - def _parse_properties(self, before: t.Optional[bool] = None) -> t.Optional[exp.Expression]: + def _parse_properties(self, before: t.Optional[bool] = None) -> t.Optional[exp.Properties]: properties = [] - while True: if before: prop = self._parse_property_before() @@ -1335,29 +1351,25 @@ class Parser(metaclass=_Parser): return None - def _parse_fallback(self, no: bool = False) -> exp.Expression: + def _parse_fallback(self, no: bool = False) -> exp.FallbackProperty: return self.expression( exp.FallbackProperty, no=no, protection=self._match_text_seq("PROTECTION") ) - def _parse_volatile_property(self) -> exp.Expression: + def _parse_volatile_property(self) -> exp.VolatileProperty | exp.StabilityProperty: if self._index >= 2: pre_volatile_token = self._tokens[self._index - 2] else: pre_volatile_token = None - if pre_volatile_token and pre_volatile_token.token_type in ( - TokenType.CREATE, - TokenType.REPLACE, - TokenType.UNIQUE, - ): + if pre_volatile_token and pre_volatile_token.token_type in self.PRE_VOLATILE_TOKENS: return exp.VolatileProperty() return self.expression(exp.StabilityProperty, this=exp.Literal.string("VOLATILE")) def _parse_with_property( self, - ) -> t.Union[t.Optional[exp.Expression], t.List[t.Optional[exp.Expression]]]: + ) -> t.Optional[exp.Expression] | t.List[t.Optional[exp.Expression]]: self._match(TokenType.WITH) if self._match(TokenType.L_PAREN, advance=False): return self._parse_wrapped_csv(self._parse_property) @@ -1376,7 +1388,7 @@ class Parser(metaclass=_Parser): return self._parse_withisolatedloading() # https://dev.mysql.com/doc/refman/8.0/en/create-view.html - def _parse_definer(self) -> t.Optional[exp.Expression]: + def _parse_definer(self) -> t.Optional[exp.DefinerProperty]: self._match(TokenType.EQ) user = self._parse_id_var() @@ -1388,18 +1400,18 @@ class Parser(metaclass=_Parser): return exp.DefinerProperty(this=f"{user}@{host}") - def _parse_withjournaltable(self) -> exp.Expression: + def _parse_withjournaltable(self) -> exp.WithJournalTableProperty: self._match(TokenType.TABLE) self._match(TokenType.EQ) return self.expression(exp.WithJournalTableProperty, this=self._parse_table_parts()) - def _parse_log(self, no: bool = False) -> exp.Expression: + def _parse_log(self, no: bool = False) -> exp.LogProperty: return self.expression(exp.LogProperty, no=no) - def _parse_journal(self, **kwargs) -> exp.Expression: + def _parse_journal(self, **kwargs) -> exp.JournalProperty: return self.expression(exp.JournalProperty, **kwargs) - def _parse_checksum(self) -> exp.Expression: + def _parse_checksum(self) -> exp.ChecksumProperty: self._match(TokenType.EQ) on = None @@ -1407,53 +1419,47 @@ class Parser(metaclass=_Parser): on = True elif self._match_text_seq("OFF"): on = False - default = self._match(TokenType.DEFAULT) - return self.expression( - exp.ChecksumProperty, - on=on, - default=default, - ) + return self.expression(exp.ChecksumProperty, on=on, default=self._match(TokenType.DEFAULT)) - def _parse_cluster(self) -> t.Optional[exp.Expression]: + def _parse_cluster(self) -> t.Optional[exp.Cluster]: if not self._match_text_seq("BY"): self._retreat(self._index - 1) return None - return self.expression( - exp.Cluster, - expressions=self._parse_csv(self._parse_ordered), - ) - def _parse_freespace(self) -> exp.Expression: + return self.expression(exp.Cluster, expressions=self._parse_csv(self._parse_ordered)) + + def _parse_freespace(self) -> exp.FreespaceProperty: self._match(TokenType.EQ) return self.expression( exp.FreespaceProperty, this=self._parse_number(), percent=self._match(TokenType.PERCENT) ) - def _parse_mergeblockratio(self, no: bool = False, default: bool = False) -> exp.Expression: + def _parse_mergeblockratio( + self, no: bool = False, default: bool = False + ) -> exp.MergeBlockRatioProperty: if self._match(TokenType.EQ): return self.expression( exp.MergeBlockRatioProperty, this=self._parse_number(), percent=self._match(TokenType.PERCENT), ) - return self.expression( - exp.MergeBlockRatioProperty, - no=no, - default=default, - ) + + return self.expression(exp.MergeBlockRatioProperty, no=no, default=default) def _parse_datablocksize( self, default: t.Optional[bool] = None, minimum: t.Optional[bool] = None, maximum: t.Optional[bool] = None, - ) -> exp.Expression: + ) -> exp.DataBlocksizeProperty: self._match(TokenType.EQ) size = self._parse_number() + units = None if self._match_texts(("BYTES", "KBYTES", "KILOBYTES")): units = self._prev.text + return self.expression( exp.DataBlocksizeProperty, size=size, @@ -1463,12 +1469,13 @@ class Parser(metaclass=_Parser): maximum=maximum, ) - def _parse_blockcompression(self) -> exp.Expression: + def _parse_blockcompression(self) -> exp.BlockCompressionProperty: self._match(TokenType.EQ) always = self._match_text_seq("ALWAYS") manual = self._match_text_seq("MANUAL") never = self._match_text_seq("NEVER") default = self._match_text_seq("DEFAULT") + autotemp = None if self._match_text_seq("AUTOTEMP"): autotemp = self._parse_schema() @@ -1482,7 +1489,7 @@ class Parser(metaclass=_Parser): autotemp=autotemp, ) - def _parse_withisolatedloading(self) -> exp.Expression: + def _parse_withisolatedloading(self) -> exp.IsolatedLoadingProperty: no = self._match_text_seq("NO") concurrent = self._match_text_seq("CONCURRENT") self._match_text_seq("ISOLATED", "LOADING") @@ -1498,7 +1505,7 @@ class Parser(metaclass=_Parser): for_none=for_none, ) - def _parse_locking(self) -> exp.Expression: + def _parse_locking(self) -> exp.LockingProperty: if self._match(TokenType.TABLE): kind = "TABLE" elif self._match(TokenType.VIEW): @@ -1553,14 +1560,14 @@ class Parser(metaclass=_Parser): return self._parse_csv(self._parse_conjunction) return [] - def _parse_partitioned_by(self) -> exp.Expression: + def _parse_partitioned_by(self) -> exp.PartitionedByProperty: self._match(TokenType.EQ) return self.expression( exp.PartitionedByProperty, this=self._parse_schema() or self._parse_bracket(self._parse_field()), ) - def _parse_withdata(self, no: bool = False) -> exp.Expression: + def _parse_withdata(self, no: bool = False) -> exp.WithDataProperty: if self._match_text_seq("AND", "STATISTICS"): statistics = True elif self._match_text_seq("AND", "NO", "STATISTICS"): @@ -1570,52 +1577,50 @@ class Parser(metaclass=_Parser): return self.expression(exp.WithDataProperty, no=no, statistics=statistics) - def _parse_no_property(self) -> t.Optional[exp.Property]: + def _parse_no_property(self) -> t.Optional[exp.NoPrimaryIndexProperty]: if self._match_text_seq("PRIMARY", "INDEX"): return exp.NoPrimaryIndexProperty() return None - def _parse_on_property(self) -> t.Optional[exp.Property]: + def _parse_on_property(self) -> t.Optional[exp.Expression]: if self._match_text_seq("COMMIT", "PRESERVE", "ROWS"): return exp.OnCommitProperty() elif self._match_text_seq("COMMIT", "DELETE", "ROWS"): return exp.OnCommitProperty(delete=True) return None - def _parse_distkey(self) -> exp.Expression: + def _parse_distkey(self) -> exp.DistKeyProperty: return self.expression(exp.DistKeyProperty, this=self._parse_wrapped(self._parse_id_var)) - def _parse_create_like(self) -> t.Optional[exp.Expression]: + def _parse_create_like(self) -> t.Optional[exp.LikeProperty]: table = self._parse_table(schema=True) + options = [] while self._match_texts(("INCLUDING", "EXCLUDING")): this = self._prev.text.upper() - id_var = self._parse_id_var() + id_var = self._parse_id_var() if not id_var: return None options.append( - self.expression( - exp.Property, - this=this, - value=exp.Var(this=id_var.this.upper()), - ) + self.expression(exp.Property, this=this, value=exp.var(id_var.this.upper())) ) + return self.expression(exp.LikeProperty, this=table, expressions=options) - def _parse_sortkey(self, compound: bool = False) -> exp.Expression: + def _parse_sortkey(self, compound: bool = False) -> exp.SortKeyProperty: return self.expression( - exp.SortKeyProperty, this=self._parse_wrapped_csv(self._parse_id_var), compound=compound + exp.SortKeyProperty, this=self._parse_wrapped_id_vars(), compound=compound ) - def _parse_character_set(self, default: bool = False) -> exp.Expression: + def _parse_character_set(self, default: bool = False) -> exp.CharacterSetProperty: self._match(TokenType.EQ) return self.expression( exp.CharacterSetProperty, this=self._parse_var_or_string(), default=default ) - def _parse_returns(self) -> exp.Expression: + def _parse_returns(self) -> exp.ReturnsProperty: value: t.Optional[exp.Expression] is_table = self._match(TokenType.TABLE) @@ -1629,19 +1634,18 @@ class Parser(metaclass=_Parser): if not self._match(TokenType.GT): self.raise_error("Expecting >") else: - value = self._parse_schema(exp.Var(this="TABLE")) + value = self._parse_schema(exp.var("TABLE")) else: value = self._parse_types() return self.expression(exp.ReturnsProperty, this=value, is_table=is_table) - def _parse_describe(self) -> exp.Expression: + def _parse_describe(self) -> exp.Describe: kind = self._match_set(self.CREATABLES) and self._prev.text this = self._parse_table() - return self.expression(exp.Describe, this=this, kind=kind) - def _parse_insert(self) -> exp.Expression: + def _parse_insert(self) -> exp.Insert: overwrite = self._match(TokenType.OVERWRITE) local = self._match_text_seq("LOCAL") alternative = None @@ -1673,11 +1677,11 @@ class Parser(metaclass=_Parser): alternative=alternative, ) - def _parse_on_conflict(self) -> t.Optional[exp.Expression]: + def _parse_on_conflict(self) -> t.Optional[exp.OnConflict]: conflict = self._match_text_seq("ON", "CONFLICT") duplicate = self._match_text_seq("ON", "DUPLICATE", "KEY") - if not (conflict or duplicate): + if not conflict and not duplicate: return None nothing = None @@ -1707,18 +1711,20 @@ class Parser(metaclass=_Parser): constraint=constraint, ) - def _parse_returning(self) -> t.Optional[exp.Expression]: + def _parse_returning(self) -> t.Optional[exp.Returning]: if not self._match(TokenType.RETURNING): return None return self.expression(exp.Returning, expressions=self._parse_csv(self._parse_column)) - def _parse_row(self) -> t.Optional[exp.Expression]: + def _parse_row(self) -> t.Optional[exp.RowFormatSerdeProperty | exp.RowFormatDelimitedProperty]: if not self._match(TokenType.FORMAT): return None return self._parse_row_format() - def _parse_row_format(self, match_row: bool = False) -> t.Optional[exp.Expression]: + def _parse_row_format( + self, match_row: bool = False + ) -> t.Optional[exp.RowFormatSerdeProperty | exp.RowFormatDelimitedProperty]: if match_row and not self._match_pair(TokenType.ROW, TokenType.FORMAT): return None @@ -1744,7 +1750,7 @@ class Parser(metaclass=_Parser): return self.expression(exp.RowFormatDelimitedProperty, **kwargs) # type: ignore - def _parse_load(self) -> exp.Expression: + def _parse_load(self) -> exp.LoadData | exp.Command: if self._match_text_seq("DATA"): local = self._match_text_seq("LOCAL") self._match_text_seq("INPATH") @@ -1764,7 +1770,7 @@ class Parser(metaclass=_Parser): ) return self._parse_as_command(self._prev) - def _parse_delete(self) -> exp.Expression: + def _parse_delete(self) -> exp.Delete: self._match(TokenType.FROM) return self.expression( @@ -1775,7 +1781,7 @@ class Parser(metaclass=_Parser): returning=self._parse_returning(), ) - def _parse_update(self) -> exp.Expression: + def _parse_update(self) -> exp.Update: return self.expression( exp.Update, **{ # type: ignore @@ -1787,22 +1793,20 @@ class Parser(metaclass=_Parser): }, ) - def _parse_uncache(self) -> exp.Expression: + def _parse_uncache(self) -> exp.Uncache: if not self._match(TokenType.TABLE): self.raise_error("Expecting TABLE after UNCACHE") return self.expression( - exp.Uncache, - exists=self._parse_exists(), - this=self._parse_table(schema=True), + exp.Uncache, exists=self._parse_exists(), this=self._parse_table(schema=True) ) - def _parse_cache(self) -> exp.Expression: + def _parse_cache(self) -> exp.Cache: lazy = self._match_text_seq("LAZY") self._match(TokenType.TABLE) table = self._parse_table(schema=True) - options = [] + options = [] if self._match_text_seq("OPTIONS"): self._match_l_paren() k = self._parse_string() @@ -1820,7 +1824,7 @@ class Parser(metaclass=_Parser): expression=self._parse_select(nested=True), ) - def _parse_partition(self) -> t.Optional[exp.Expression]: + def _parse_partition(self) -> t.Optional[exp.Partition]: if not self._match(TokenType.PARTITION): return None @@ -1828,7 +1832,7 @@ class Parser(metaclass=_Parser): exp.Partition, expressions=self._parse_wrapped_csv(self._parse_conjunction) ) - def _parse_value(self) -> exp.Expression: + def _parse_value(self) -> exp.Tuple: if self._match(TokenType.L_PAREN): expressions = self._parse_csv(self._parse_conjunction) self._match_r_paren() @@ -1926,7 +1930,7 @@ class Parser(metaclass=_Parser): return self._parse_set_operations(this) - def _parse_with(self, skip_with_token: bool = False) -> t.Optional[exp.Expression]: + def _parse_with(self, skip_with_token: bool = False) -> t.Optional[exp.With]: if not skip_with_token and not self._match(TokenType.WITH): return None @@ -1946,22 +1950,19 @@ class Parser(metaclass=_Parser): exp.With, comments=comments, expressions=expressions, recursive=recursive ) - def _parse_cte(self) -> exp.Expression: + def _parse_cte(self) -> exp.CTE: alias = self._parse_table_alias() if not alias or not alias.this: self.raise_error("Expected CTE to have alias") self._match(TokenType.ALIAS) - return self.expression( - exp.CTE, - this=self._parse_wrapped(self._parse_statement), - alias=alias, + exp.CTE, this=self._parse_wrapped(self._parse_statement), alias=alias ) def _parse_table_alias( self, alias_tokens: t.Optional[t.Collection[TokenType]] = None - ) -> t.Optional[exp.Expression]: + ) -> t.Optional[exp.TableAlias]: any_token = self._match(TokenType.ALIAS) alias = ( self._parse_id_var(any_token=any_token, tokens=alias_tokens or self.TABLE_ALIAS_TOKENS) @@ -1982,9 +1983,10 @@ class Parser(metaclass=_Parser): def _parse_subquery( self, this: t.Optional[exp.Expression], parse_alias: bool = True - ) -> t.Optional[exp.Expression]: + ) -> t.Optional[exp.Subquery]: if not this: return None + return self.expression( exp.Subquery, this=this, @@ -2000,19 +2002,25 @@ class Parser(metaclass=_Parser): expression = parser(self) if expression: + if key == "limit": + offset = expression.args.pop("offset", None) + if offset: + this.set("offset", exp.Offset(expression=offset)) this.set(key, expression) return this - def _parse_hint(self) -> t.Optional[exp.Expression]: + def _parse_hint(self) -> t.Optional[exp.Hint]: if self._match(TokenType.HINT): hints = self._parse_csv(self._parse_function) + if not self._match_pair(TokenType.STAR, TokenType.SLASH): self.raise_error("Expected */ after HINT") + return self.expression(exp.Hint, expressions=hints) return None - def _parse_into(self) -> t.Optional[exp.Expression]: + def _parse_into(self) -> t.Optional[exp.Into]: if not self._match(TokenType.INTO): return None @@ -2039,7 +2047,7 @@ class Parser(metaclass=_Parser): this=self._parse_query_modifiers(this) if modifiers else this, ) - def _parse_match_recognize(self) -> t.Optional[exp.Expression]: + def _parse_match_recognize(self) -> t.Optional[exp.MatchRecognize]: if not self._match(TokenType.MATCH_RECOGNIZE): return None @@ -2052,7 +2060,7 @@ class Parser(metaclass=_Parser): ) if self._match_text_seq("ONE", "ROW", "PER", "MATCH"): - rows = exp.Var(this="ONE ROW PER MATCH") + rows = exp.var("ONE ROW PER MATCH") elif self._match_text_seq("ALL", "ROWS", "PER", "MATCH"): text = "ALL ROWS PER MATCH" if self._match_text_seq("SHOW", "EMPTY", "MATCHES"): @@ -2061,7 +2069,7 @@ class Parser(metaclass=_Parser): text += f" OMIT EMPTY MATCHES" elif self._match_text_seq("WITH", "UNMATCHED", "ROWS"): text += f" WITH UNMATCHED ROWS" - rows = exp.Var(this=text) + rows = exp.var(text) else: rows = None @@ -2075,7 +2083,7 @@ class Parser(metaclass=_Parser): text += f" TO FIRST {self._advance_any().text}" # type: ignore elif self._match_text_seq("TO", "LAST"): text += f" TO LAST {self._advance_any().text}" # type: ignore - after = exp.Var(this=text) + after = exp.var(text) else: after = None @@ -2093,11 +2101,14 @@ class Parser(metaclass=_Parser): paren += 1 if self._curr.token_type == TokenType.R_PAREN: paren -= 1 + end = self._prev self._advance() + if paren > 0: self.raise_error("Expecting )", self._curr) - pattern = exp.Var(this=self._find_sql(start, end)) + + pattern = exp.var(self._find_sql(start, end)) else: pattern = None @@ -2127,7 +2138,7 @@ class Parser(metaclass=_Parser): alias=self._parse_table_alias(), ) - def _parse_lateral(self) -> t.Optional[exp.Expression]: + def _parse_lateral(self) -> t.Optional[exp.Lateral]: outer_apply = self._match_pair(TokenType.OUTER, TokenType.APPLY) cross_apply = self._match_pair(TokenType.CROSS, TokenType.APPLY) @@ -2150,24 +2161,19 @@ class Parser(metaclass=_Parser): expression=self._parse_function() or self._parse_id_var(any_token=False), ) - table_alias: t.Optional[exp.Expression] - if view: table = self._parse_id_var(any_token=False) columns = self._parse_csv(self._parse_id_var) if self._match(TokenType.ALIAS) else [] - table_alias = self.expression(exp.TableAlias, this=table, columns=columns) + table_alias: t.Optional[exp.TableAlias] = self.expression( + exp.TableAlias, this=table, columns=columns + ) + elif isinstance(this, exp.Subquery) and this.alias: + # Ensures parity between the Subquery's and the Lateral's "alias" args + table_alias = this.args["alias"].copy() else: table_alias = self._parse_table_alias() - expression = self.expression( - exp.Lateral, - this=this, - view=view, - outer=outer, - alias=table_alias, - ) - - return expression + return self.expression(exp.Lateral, this=this, view=view, outer=outer, alias=table_alias) def _parse_join_parts( self, @@ -2178,7 +2184,7 @@ class Parser(metaclass=_Parser): self._match_set(self.JOIN_KINDS) and self._prev, ) - def _parse_join(self, skip_join_token: bool = False) -> t.Optional[exp.Expression]: + def _parse_join(self, skip_join_token: bool = False) -> t.Optional[exp.Join]: if self._match(TokenType.COMMA): return self.expression(exp.Join, this=self._parse_table()) @@ -2223,7 +2229,7 @@ class Parser(metaclass=_Parser): def _parse_index( self, index: t.Optional[exp.Expression] = None, - ) -> t.Optional[exp.Expression]: + ) -> t.Optional[exp.Index]: if index: unique = None primary = None @@ -2236,11 +2242,15 @@ class Parser(metaclass=_Parser): unique = self._match(TokenType.UNIQUE) primary = self._match_text_seq("PRIMARY") amp = self._match_text_seq("AMP") + if not self._match(TokenType.INDEX): return None + index = self._parse_id_var() table = None + using = self._parse_field() if self._match(TokenType.USING) else None + if self._match(TokenType.L_PAREN, advance=False): columns = self._parse_wrapped_csv(self._parse_ordered) else: @@ -2250,6 +2260,7 @@ class Parser(metaclass=_Parser): exp.Index, this=index, table=table, + using=using, columns=columns, unique=unique, primary=primary, @@ -2259,7 +2270,7 @@ class Parser(metaclass=_Parser): def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: return ( - (not schema and self._parse_function()) + (not schema and self._parse_function(optional_parens=False)) or self._parse_id_var(any_token=False) or self._parse_string_as_identifier() or self._parse_placeholder() @@ -2314,7 +2325,7 @@ class Parser(metaclass=_Parser): if schema: return self._parse_schema(this=this) - if self.alias_post_tablesample: + if self.ALIAS_POST_TABLESAMPLE: table_sample = self._parse_table_sample() alias = self._parse_table_alias(alias_tokens=alias_tokens or self.TABLE_ALIAS_TOKENS) @@ -2331,7 +2342,7 @@ class Parser(metaclass=_Parser): ) self._match_r_paren() - if not self.alias_post_tablesample: + if not self.ALIAS_POST_TABLESAMPLE: table_sample = self._parse_table_sample() if table_sample: @@ -2340,46 +2351,47 @@ class Parser(metaclass=_Parser): return this - def _parse_unnest(self) -> t.Optional[exp.Expression]: + def _parse_unnest(self, with_alias: bool = True) -> t.Optional[exp.Unnest]: if not self._match(TokenType.UNNEST): return None expressions = self._parse_wrapped_csv(self._parse_type) ordinality = self._match_pair(TokenType.WITH, TokenType.ORDINALITY) - alias = self._parse_table_alias() - if alias and self.unnest_column_only: + alias = self._parse_table_alias() if with_alias else None + + if alias and self.UNNEST_COLUMN_ONLY: if alias.args.get("columns"): self.raise_error("Unexpected extra column alias in unnest.") + alias.set("columns", [alias.this]) alias.set("this", None) offset = None if self._match_pair(TokenType.WITH, TokenType.OFFSET): self._match(TokenType.ALIAS) - offset = self._parse_id_var() or exp.Identifier(this="offset") + offset = self._parse_id_var() or exp.to_identifier("offset") return self.expression( - exp.Unnest, - expressions=expressions, - ordinality=ordinality, - alias=alias, - offset=offset, + exp.Unnest, expressions=expressions, ordinality=ordinality, alias=alias, offset=offset ) - def _parse_derived_table_values(self) -> t.Optional[exp.Expression]: + def _parse_derived_table_values(self) -> t.Optional[exp.Values]: is_derived = self._match_pair(TokenType.L_PAREN, TokenType.VALUES) if not is_derived and not self._match(TokenType.VALUES): return None expressions = self._parse_csv(self._parse_value) + alias = self._parse_table_alias() if is_derived: self._match_r_paren() - return self.expression(exp.Values, expressions=expressions, alias=self._parse_table_alias()) + return self.expression( + exp.Values, expressions=expressions, alias=alias or self._parse_table_alias() + ) - def _parse_table_sample(self, as_modifier: bool = False) -> t.Optional[exp.Expression]: + def _parse_table_sample(self, as_modifier: bool = False) -> t.Optional[exp.TableSample]: if not self._match(TokenType.TABLE_SAMPLE) and not ( as_modifier and self._match_text_seq("USING", "SAMPLE") ): @@ -2456,7 +2468,7 @@ class Parser(metaclass=_Parser): exp.Pivot, this=this, expressions=expressions, using=using, group=group ) - def _parse_pivot(self) -> t.Optional[exp.Expression]: + def _parse_pivot(self) -> t.Optional[exp.Pivot]: index = self._index if self._match(TokenType.PIVOT): @@ -2519,7 +2531,7 @@ class Parser(metaclass=_Parser): def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]: return [agg.alias for agg in aggregations] - def _parse_where(self, skip_where_token: bool = False) -> t.Optional[exp.Expression]: + def _parse_where(self, skip_where_token: bool = False) -> t.Optional[exp.Where]: if not skip_where_token and not self._match(TokenType.WHERE): return None @@ -2527,7 +2539,7 @@ class Parser(metaclass=_Parser): exp.Where, comments=self._prev_comments, this=self._parse_conjunction() ) - def _parse_group(self, skip_group_by_token: bool = False) -> t.Optional[exp.Expression]: + def _parse_group(self, skip_group_by_token: bool = False) -> t.Optional[exp.Group]: if not skip_group_by_token and not self._match(TokenType.GROUP_BY): return None @@ -2578,12 +2590,12 @@ class Parser(metaclass=_Parser): return self._parse_column() - def _parse_having(self, skip_having_token: bool = False) -> t.Optional[exp.Expression]: + def _parse_having(self, skip_having_token: bool = False) -> t.Optional[exp.Having]: if not skip_having_token and not self._match(TokenType.HAVING): return None return self.expression(exp.Having, this=self._parse_conjunction()) - def _parse_qualify(self) -> t.Optional[exp.Expression]: + def _parse_qualify(self) -> t.Optional[exp.Qualify]: if not self._match(TokenType.QUALIFY): return None return self.expression(exp.Qualify, this=self._parse_conjunction()) @@ -2598,16 +2610,15 @@ class Parser(metaclass=_Parser): exp.Order, this=this, expressions=self._parse_csv(self._parse_ordered) ) - def _parse_sort( - self, exp_class: t.Type[exp.Expression], *texts: str - ) -> t.Optional[exp.Expression]: + def _parse_sort(self, exp_class: t.Type[E], *texts: str) -> t.Optional[E]: if not self._match_text_seq(*texts): return None return self.expression(exp_class, expressions=self._parse_csv(self._parse_ordered)) - def _parse_ordered(self) -> exp.Expression: + def _parse_ordered(self) -> exp.Ordered: this = self._parse_conjunction() self._match(TokenType.ASC) + is_desc = self._match(TokenType.DESC) is_nulls_first = self._match_text_seq("NULLS", "FIRST") is_nulls_last = self._match_text_seq("NULLS", "LAST") @@ -2615,13 +2626,14 @@ class Parser(metaclass=_Parser): asc = not desc nulls_first = is_nulls_first or False explicitly_null_ordered = is_nulls_first or is_nulls_last + if ( not explicitly_null_ordered and ( - (asc and self.null_ordering == "nulls_are_small") - or (desc and self.null_ordering != "nulls_are_small") + (asc and self.NULL_ORDERING == "nulls_are_small") + or (desc and self.NULL_ORDERING != "nulls_are_small") ) - and self.null_ordering != "nulls_are_last" + and self.NULL_ORDERING != "nulls_are_last" ): nulls_first = True @@ -2632,9 +2644,15 @@ class Parser(metaclass=_Parser): ) -> t.Optional[exp.Expression]: if self._match(TokenType.TOP if top else TokenType.LIMIT): limit_paren = self._match(TokenType.L_PAREN) - limit_exp = self.expression( - exp.Limit, this=this, expression=self._parse_number() if top else self._parse_term() - ) + expression = self._parse_number() if top else self._parse_term() + + if self._match(TokenType.COMMA): + offset = expression + expression = self._parse_term() + else: + offset = None + + limit_exp = self.expression(exp.Limit, this=this, expression=expression, offset=offset) if limit_paren: self._match_r_paren() @@ -2667,17 +2685,15 @@ class Parser(metaclass=_Parser): return this def _parse_offset(self, this: t.Optional[exp.Expression] = None) -> t.Optional[exp.Expression]: - if not self._match_set((TokenType.OFFSET, TokenType.COMMA)): + if not self._match(TokenType.OFFSET): return this count = self._parse_number() self._match_set((TokenType.ROW, TokenType.ROWS)) return self.expression(exp.Offset, this=this, expression=count) - def _parse_locks(self) -> t.List[exp.Expression]: - # Lists are invariant, so we need to use a type hint here - locks: t.List[exp.Expression] = [] - + def _parse_locks(self) -> t.List[exp.Lock]: + locks = [] while True: if self._match_text_seq("FOR", "UPDATE"): update = True @@ -2768,6 +2784,7 @@ class Parser(metaclass=_Parser): def _parse_is(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: index = self._index - 1 negate = self._match(TokenType.NOT) + if self._match_text_seq("DISTINCT", "FROM"): klass = exp.NullSafeEQ if negate else exp.NullSafeNEQ return self.expression(klass, this=this, expression=self._parse_expression()) @@ -2781,7 +2798,7 @@ class Parser(metaclass=_Parser): return self.expression(exp.Not, this=this) if negate else this def _parse_in(self, this: t.Optional[exp.Expression], alias: bool = False) -> exp.In: - unnest = self._parse_unnest() + unnest = self._parse_unnest(with_alias=False) if unnest: this = self.expression(exp.In, this=this, unnest=unnest) elif self._match(TokenType.L_PAREN): @@ -2798,7 +2815,7 @@ class Parser(metaclass=_Parser): return this - def _parse_between(self, this: exp.Expression) -> exp.Expression: + def _parse_between(self, this: exp.Expression) -> exp.Between: low = self._parse_bitwise() self._match(TokenType.AND) high = self._parse_bitwise() @@ -2809,7 +2826,7 @@ class Parser(metaclass=_Parser): return this return self.expression(exp.Escape, this=this, expression=self._parse_string()) - def _parse_interval(self) -> t.Optional[exp.Expression]: + def _parse_interval(self) -> t.Optional[exp.Interval]: if not self._match(TokenType.INTERVAL): return None @@ -2840,9 +2857,7 @@ class Parser(metaclass=_Parser): while True: if self._match_set(self.BITWISE): this = self.expression( - self.BITWISE[self._prev.token_type], - this=this, - expression=self._parse_term(), + self.BITWISE[self._prev.token_type], this=this, expression=self._parse_term() ) elif self._match_pair(TokenType.LT, TokenType.LT): this = self.expression( @@ -2890,7 +2905,7 @@ class Parser(metaclass=_Parser): return this - def _parse_type_size(self) -> t.Optional[exp.Expression]: + def _parse_type_size(self) -> t.Optional[exp.DataTypeSize]: this = self._parse_type() if not this: return None @@ -2926,6 +2941,8 @@ class Parser(metaclass=_Parser): expressions = self._parse_csv( lambda: self._parse_types(check_func=check_func, schema=schema) ) + elif type_token in self.ENUM_TYPE_TOKENS: + expressions = self._parse_csv(self._parse_primary) else: expressions = self._parse_csv(self._parse_type_size) @@ -2943,11 +2960,7 @@ class Parser(metaclass=_Parser): ) while self._match_pair(TokenType.L_BRACKET, TokenType.R_BRACKET): - this = exp.DataType( - this=exp.DataType.Type.ARRAY, - expressions=[this], - nested=True, - ) + this = exp.DataType(this=exp.DataType.Type.ARRAY, expressions=[this], nested=True) return this @@ -2973,23 +2986,14 @@ class Parser(metaclass=_Parser): value: t.Optional[exp.Expression] = None if type_token in self.TIMESTAMPS: - if self._match_text_seq("WITH", "TIME", "ZONE") or type_token == TokenType.TIMESTAMPTZ: + if self._match_text_seq("WITH", "TIME", "ZONE"): + maybe_func = False value = exp.DataType(this=exp.DataType.Type.TIMESTAMPTZ, expressions=expressions) - elif ( - self._match_text_seq("WITH", "LOCAL", "TIME", "ZONE") - or type_token == TokenType.TIMESTAMPLTZ - ): + elif self._match_text_seq("WITH", "LOCAL", "TIME", "ZONE"): + maybe_func = False value = exp.DataType(this=exp.DataType.Type.TIMESTAMPLTZ, expressions=expressions) elif self._match_text_seq("WITHOUT", "TIME", "ZONE"): - if type_token == TokenType.TIME: - value = exp.DataType(this=exp.DataType.Type.TIME, expressions=expressions) - else: - value = exp.DataType(this=exp.DataType.Type.TIMESTAMP, expressions=expressions) - - maybe_func = maybe_func and value is None - - if value is None: - value = exp.DataType(this=exp.DataType.Type.TIMESTAMP, expressions=expressions) + maybe_func = False elif type_token == TokenType.INTERVAL: unit = self._parse_var() @@ -3037,7 +3041,7 @@ class Parser(metaclass=_Parser): return self._parse_bracket(this) return self._parse_column_ops(this) - def _parse_column_ops(self, this: exp.Expression) -> exp.Expression: + def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: this = self._parse_bracket(this) while self._match_set(self.COLUMN_OPERATORS): @@ -3057,7 +3061,7 @@ class Parser(metaclass=_Parser): else exp.Literal.string(value) ) else: - field = self._parse_field(anonymous_func=True) + field = self._parse_field(anonymous_func=True, any_token=True) if isinstance(field, exp.Func): # bigquery allows function calls like x.y.count(...) @@ -3089,8 +3093,10 @@ class Parser(metaclass=_Parser): expressions = [primary] while self._match(TokenType.STRING): expressions.append(exp.Literal.string(self._prev.text)) + if len(expressions) > 1: return self.expression(exp.Concat, expressions=expressions) + return primary if self._match_pair(TokenType.DOT, TokenType.NUMBER): @@ -3118,8 +3124,8 @@ class Parser(metaclass=_Parser): if this: this.add_comments(comments) - self._match_r_paren(expression=this) + self._match_r_paren(expression=this) return this return None @@ -3137,18 +3143,21 @@ class Parser(metaclass=_Parser): ) def _parse_function( - self, functions: t.Optional[t.Dict[str, t.Callable]] = None, anonymous: bool = False + self, + functions: t.Optional[t.Dict[str, t.Callable]] = None, + anonymous: bool = False, + optional_parens: bool = True, ) -> t.Optional[exp.Expression]: if not self._curr: return None token_type = self._curr.token_type - if self._match_set(self.NO_PAREN_FUNCTION_PARSERS): + if optional_parens and self._match_set(self.NO_PAREN_FUNCTION_PARSERS): return self.NO_PAREN_FUNCTION_PARSERS[token_type](self) if not self._next or self._next.token_type != TokenType.L_PAREN: - if token_type in self.NO_PAREN_FUNCTIONS: + if optional_parens and token_type in self.NO_PAREN_FUNCTIONS: self._advance() return self.expression(self.NO_PAREN_FUNCTIONS[token_type]) @@ -3182,8 +3191,7 @@ class Parser(metaclass=_Parser): args = self._parse_csv(lambda: self._parse_lambda(alias=alias)) if function and not anonymous: - this = function(args) - self.validate_expression(this, args) + this = self.validate_expression(function(args), args) else: this = self.expression(exp.Anonymous, this=this, expressions=args) @@ -3210,14 +3218,14 @@ class Parser(metaclass=_Parser): exp.UserDefinedFunction, this=this, expressions=expressions, wrapped=True ) - def _parse_introducer(self, token: Token) -> t.Optional[exp.Expression]: + def _parse_introducer(self, token: Token) -> exp.Introducer | exp.Identifier: literal = self._parse_primary() if literal: return self.expression(exp.Introducer, this=token.text, expression=literal) return self.expression(exp.Identifier, this=token.text) - def _parse_session_parameter(self) -> exp.Expression: + def _parse_session_parameter(self) -> exp.SessionParameter: kind = None this = self._parse_id_var() or self._parse_primary() @@ -3255,7 +3263,7 @@ class Parser(metaclass=_Parser): if isinstance(this, exp.EQ): left = this.this if isinstance(left, exp.Column): - left.replace(exp.Var(this=left.text("this"))) + left.replace(exp.var(left.text("this"))) return self._parse_limit(self._parse_order(self._parse_respect_or_ignore_nulls(this))) @@ -3279,6 +3287,7 @@ class Parser(metaclass=_Parser): lambda: self._parse_constraint() or self._parse_column_def(self._parse_field(any_token=True)) ) + self._match_r_paren() return self.expression(exp.Schema, this=this, expressions=args) @@ -3286,6 +3295,7 @@ class Parser(metaclass=_Parser): # column defs are not really columns, they're identifiers if isinstance(this, exp.Column): this = this.this + kind = self._parse_types(schema=True) if self._match_text_seq("FOR", "ORDINALITY"): @@ -3303,7 +3313,9 @@ class Parser(metaclass=_Parser): return self.expression(exp.ColumnDef, this=this, kind=kind, constraints=constraints) - def _parse_auto_increment(self) -> exp.Expression: + def _parse_auto_increment( + self, + ) -> exp.GeneratedAsIdentityColumnConstraint | exp.AutoIncrementColumnConstraint: start = None increment = None @@ -3321,7 +3333,7 @@ class Parser(metaclass=_Parser): return exp.AutoIncrementColumnConstraint() - def _parse_compress(self) -> exp.Expression: + def _parse_compress(self) -> exp.CompressColumnConstraint: if self._match(TokenType.L_PAREN, advance=False): return self.expression( exp.CompressColumnConstraint, this=self._parse_wrapped_csv(self._parse_bitwise) @@ -3329,7 +3341,7 @@ class Parser(metaclass=_Parser): return self.expression(exp.CompressColumnConstraint, this=self._parse_bitwise()) - def _parse_generated_as_identity(self) -> exp.Expression: + def _parse_generated_as_identity(self) -> exp.GeneratedAsIdentityColumnConstraint: if self._match_text_seq("BY", "DEFAULT"): on_null = self._match_pair(TokenType.ON, TokenType.NULL) this = self.expression( @@ -3364,11 +3376,13 @@ class Parser(metaclass=_Parser): return this - def _parse_inline(self) -> t.Optional[exp.Expression]: + def _parse_inline(self) -> exp.InlineLengthColumnConstraint: self._match_text_seq("LENGTH") return self.expression(exp.InlineLengthColumnConstraint, this=self._parse_bitwise()) - def _parse_not_constraint(self) -> t.Optional[exp.Expression]: + def _parse_not_constraint( + self, + ) -> t.Optional[exp.NotNullColumnConstraint | exp.CaseSpecificColumnConstraint]: if self._match_text_seq("NULL"): return self.expression(exp.NotNullColumnConstraint) if self._match_text_seq("CASESPECIFIC"): @@ -3417,7 +3431,7 @@ class Parser(metaclass=_Parser): return self.CONSTRAINT_PARSERS[constraint](self) - def _parse_unique(self) -> exp.Expression: + def _parse_unique(self) -> exp.UniqueColumnConstraint: self._match_text_seq("KEY") return self.expression( exp.UniqueColumnConstraint, this=self._parse_schema(self._parse_id_var(any_token=False)) @@ -3460,7 +3474,7 @@ class Parser(metaclass=_Parser): return options - def _parse_references(self, match: bool = True) -> t.Optional[exp.Expression]: + def _parse_references(self, match: bool = True) -> t.Optional[exp.Reference]: if match and not self._match(TokenType.REFERENCES): return None @@ -3473,7 +3487,7 @@ class Parser(metaclass=_Parser): options = self._parse_key_constraint_options() return self.expression(exp.Reference, this=this, expressions=expressions, options=options) - def _parse_foreign_key(self) -> exp.Expression: + def _parse_foreign_key(self) -> exp.ForeignKey: expressions = self._parse_wrapped_id_vars() reference = self._parse_references() options = {} @@ -3501,7 +3515,7 @@ class Parser(metaclass=_Parser): def _parse_primary_key( self, wrapped_optional: bool = False, in_props: bool = False - ) -> exp.Expression: + ) -> exp.PrimaryKeyColumnConstraint | exp.PrimaryKey: desc = ( self._match_set((TokenType.ASC, TokenType.DESC)) and self._prev.token_type == TokenType.DESC @@ -3514,15 +3528,7 @@ class Parser(metaclass=_Parser): options = self._parse_key_constraint_options() return self.expression(exp.PrimaryKey, expressions=expressions, options=options) - @t.overload - def _parse_bracket(self, this: exp.Expression) -> exp.Expression: - ... - - @t.overload def _parse_bracket(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: - ... - - def _parse_bracket(self, this): if not self._match_set((TokenType.L_BRACKET, TokenType.L_BRACE)): return this @@ -3541,7 +3547,7 @@ class Parser(metaclass=_Parser): elif not this or this.name.upper() == "ARRAY": this = self.expression(exp.Array, expressions=expressions) else: - expressions = apply_index_offset(this, expressions, -self.index_offset) + expressions = apply_index_offset(this, expressions, -self.INDEX_OFFSET) this = self.expression(exp.Bracket, this=this, expressions=expressions) if not self._match(TokenType.R_BRACKET) and bracket_kind == TokenType.L_BRACKET: @@ -3582,8 +3588,7 @@ class Parser(metaclass=_Parser): def _parse_if(self) -> t.Optional[exp.Expression]: if self._match(TokenType.L_PAREN): args = self._parse_csv(self._parse_conjunction) - this = exp.If.from_arg_list(args) - self.validate_expression(this, args) + this = self.validate_expression(exp.If.from_arg_list(args), args) self._match_r_paren() else: index = self._index - 1 @@ -3601,7 +3606,7 @@ class Parser(metaclass=_Parser): return self._parse_window(this) - def _parse_extract(self) -> exp.Expression: + def _parse_extract(self) -> exp.Extract: this = self._parse_function() or self._parse_var() or self._parse_type() if self._match(TokenType.FROM): @@ -3630,9 +3635,37 @@ class Parser(metaclass=_Parser): elif to.this == exp.DataType.Type.CHAR: if self._match(TokenType.CHARACTER_SET): to = self.expression(exp.CharacterSet, this=self._parse_var_or_string()) + elif to.this in exp.DataType.TEMPORAL_TYPES and self._match(TokenType.FORMAT): + fmt = self._parse_string() + + return self.expression( + exp.StrToDate if to.this == exp.DataType.Type.DATE else exp.StrToTime, + this=this, + format=exp.Literal.string( + format_time( + fmt.this if fmt else "", + self.FORMAT_MAPPING or self.TIME_MAPPING, + self.FORMAT_TRIE or self.TIME_TRIE, + ) + ), + ) return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to) + def _parse_concat(self) -> t.Optional[exp.Expression]: + args = self._parse_csv(self._parse_conjunction) + if self.CONCAT_NULL_OUTPUTS_STRING: + args = [exp.func("COALESCE", arg, exp.Literal.string("")) for arg in args] + + # Some dialects (e.g. Trino) don't allow a single-argument CONCAT call, so when + # we find such a call we replace it with its argument. + if len(args) == 1: + return args[0] + + return self.expression( + exp.Concat if self.STRICT_STRING_CONCAT else exp.SafeConcat, expressions=args + ) + def _parse_string_agg(self) -> exp.Expression: expression: t.Optional[exp.Expression] @@ -3654,9 +3687,7 @@ class Parser(metaclass=_Parser): # the STRING_AGG call is parsed like in MySQL / SQLite and can thus be transpiled more easily to them. if not self._match_text_seq("WITHIN", "GROUP"): self._retreat(index) - this = exp.GroupConcat.from_arg_list(args) - self.validate_expression(this, args) - return this + return self.validate_expression(exp.GroupConcat.from_arg_list(args), args) self._match_l_paren() # The corresponding match_r_paren will be called in parse_function (caller) order = self._parse_order(this=expression) @@ -3679,7 +3710,7 @@ class Parser(metaclass=_Parser): return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to) - def _parse_decode(self) -> t.Optional[exp.Expression]: + def _parse_decode(self) -> t.Optional[exp.Decode | exp.Case]: """ There are generally two variants of the DECODE function: @@ -3726,18 +3757,20 @@ class Parser(metaclass=_Parser): return exp.Case(ifs=ifs, default=expressions[-1] if len(expressions) % 2 == 1 else None) - def _parse_json_key_value(self) -> t.Optional[exp.Expression]: + def _parse_json_key_value(self) -> t.Optional[exp.JSONKeyValue]: self._match_text_seq("KEY") key = self._parse_field() self._match(TokenType.COLON) self._match_text_seq("VALUE") value = self._parse_field() + if not key and not value: return None return self.expression(exp.JSONKeyValue, this=key, expression=value) - def _parse_json_object(self) -> exp.Expression: - expressions = self._parse_csv(self._parse_json_key_value) + def _parse_json_object(self) -> exp.JSONObject: + star = self._parse_star() + expressions = [star] if star else self._parse_csv(self._parse_json_key_value) null_handling = None if self._match_text_seq("NULL", "ON", "NULL"): @@ -3767,7 +3800,7 @@ class Parser(metaclass=_Parser): encoding=encoding, ) - def _parse_logarithm(self) -> exp.Expression: + def _parse_logarithm(self) -> exp.Func: # Default argument order is base, expression args = self._parse_csv(self._parse_range) @@ -3780,7 +3813,7 @@ class Parser(metaclass=_Parser): exp.Ln if self.LOG_DEFAULTS_TO_LN else exp.Log, this=seq_get(args, 0) ) - def _parse_match_against(self) -> exp.Expression: + def _parse_match_against(self) -> exp.MatchAgainst: expressions = self._parse_csv(self._parse_column) self._match_text_seq(")", "AGAINST", "(") @@ -3803,15 +3836,16 @@ class Parser(metaclass=_Parser): ) # https://learn.microsoft.com/en-us/sql/t-sql/functions/openjson-transact-sql?view=sql-server-ver16 - def _parse_open_json(self) -> exp.Expression: + def _parse_open_json(self) -> exp.OpenJSON: this = self._parse_bitwise() path = self._match(TokenType.COMMA) and self._parse_string() - def _parse_open_json_column_def() -> exp.Expression: + def _parse_open_json_column_def() -> exp.OpenJSONColumnDef: this = self._parse_field(any_token=True) kind = self._parse_types() path = self._parse_string() as_json = self._match_pair(TokenType.ALIAS, TokenType.JSON) + return self.expression( exp.OpenJSONColumnDef, this=this, kind=kind, path=path, as_json=as_json ) @@ -3823,7 +3857,7 @@ class Parser(metaclass=_Parser): return self.expression(exp.OpenJSON, this=this, path=path, expressions=expressions) - def _parse_position(self, haystack_first: bool = False) -> exp.Expression: + def _parse_position(self, haystack_first: bool = False) -> exp.StrPosition: args = self._parse_csv(self._parse_bitwise) if self._match(TokenType.IN): @@ -3838,17 +3872,15 @@ class Parser(metaclass=_Parser): needle = seq_get(args, 0) haystack = seq_get(args, 1) - this = exp.StrPosition(this=haystack, substr=needle, position=seq_get(args, 2)) - - self.validate_expression(this, args) - - return this + return self.expression( + exp.StrPosition, this=haystack, substr=needle, position=seq_get(args, 2) + ) - def _parse_join_hint(self, func_name: str) -> exp.Expression: + def _parse_join_hint(self, func_name: str) -> exp.JoinHint: args = self._parse_csv(self._parse_table) return exp.JoinHint(this=func_name.upper(), expressions=args) - def _parse_substring(self) -> exp.Expression: + def _parse_substring(self) -> exp.Substring: # Postgres supports the form: substring(string [from int] [for int]) # https://www.postgresql.org/docs/9.1/functions-string.html @ Table 9-6 @@ -3859,12 +3891,9 @@ class Parser(metaclass=_Parser): if self._match(TokenType.FOR): args.append(self._parse_bitwise()) - this = exp.Substring.from_arg_list(args) - self.validate_expression(this, args) - - return this + return self.validate_expression(exp.Substring.from_arg_list(args), args) - def _parse_trim(self) -> exp.Expression: + def _parse_trim(self) -> exp.Trim: # https://www.w3resource.com/sql/character-functions/trim.php # https://docs.oracle.com/javadb/10.8.3.0/ref/rreftrimfunc.html @@ -3885,11 +3914,7 @@ class Parser(metaclass=_Parser): collation = self._parse_bitwise() return self.expression( - exp.Trim, - this=this, - position=position, - expression=expression, - collation=collation, + exp.Trim, this=this, position=position, expression=expression, collation=collation ) def _parse_window_clause(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]: @@ -4047,7 +4072,7 @@ class Parser(metaclass=_Parser): return self.PRIMARY_PARSERS[TokenType.STRING](self, self._prev) return self._parse_placeholder() - def _parse_string_as_identifier(self) -> t.Optional[exp.Expression]: + def _parse_string_as_identifier(self) -> t.Optional[exp.Identifier]: return exp.to_identifier(self._match(TokenType.STRING) and self._prev.text, quoted=True) def _parse_number(self) -> t.Optional[exp.Expression]: @@ -4097,7 +4122,7 @@ class Parser(metaclass=_Parser): return self.PRIMARY_PARSERS[TokenType.STAR](self, self._prev) return None - def _parse_parameter(self) -> exp.Expression: + def _parse_parameter(self) -> exp.Parameter: wrapped = self._match(TokenType.L_BRACE) this = self._parse_var() or self._parse_identifier() or self._parse_primary() self._match(TokenType.R_BRACE) @@ -4183,7 +4208,7 @@ class Parser(metaclass=_Parser): self._parse_set_operations(self._parse_select(nested=True, parse_subquery_alias=False)) ) - def _parse_transaction(self) -> exp.Expression: + def _parse_transaction(self) -> exp.Transaction: this = None if self._match_texts(self.TRANSACTION_KIND): this = self._prev.text @@ -4203,7 +4228,7 @@ class Parser(metaclass=_Parser): return self.expression(exp.Transaction, this=this, modes=modes) - def _parse_commit_or_rollback(self) -> exp.Expression: + def _parse_commit_or_rollback(self) -> exp.Commit | exp.Rollback: chain = None savepoint = None is_rollback = self._prev.token_type == TokenType.ROLLBACK @@ -4220,6 +4245,7 @@ class Parser(metaclass=_Parser): if is_rollback: return self.expression(exp.Rollback, savepoint=savepoint) + return self.expression(exp.Commit, chain=chain) def _parse_add_column(self) -> t.Optional[exp.Expression]: @@ -4243,19 +4269,19 @@ class Parser(metaclass=_Parser): return expression - def _parse_drop_column(self) -> t.Optional[exp.Expression]: + def _parse_drop_column(self) -> t.Optional[exp.Drop | exp.Command]: drop = self._match(TokenType.DROP) and self._parse_drop() if drop and not isinstance(drop, exp.Command): drop.set("kind", drop.args.get("kind", "COLUMN")) return drop # https://docs.aws.amazon.com/athena/latest/ug/alter-table-drop-partition.html - def _parse_drop_partition(self, exists: t.Optional[bool] = None) -> exp.Expression: + def _parse_drop_partition(self, exists: t.Optional[bool] = None) -> exp.DropPartition: return self.expression( exp.DropPartition, expressions=self._parse_csv(self._parse_partition), exists=exists ) - def _parse_add_constraint(self) -> t.Optional[exp.Expression]: + def _parse_add_constraint(self) -> exp.AddConstraint: this = None kind = self._prev.token_type @@ -4288,7 +4314,7 @@ class Parser(metaclass=_Parser): self._retreat(index) return self._parse_csv(self._parse_add_column) - def _parse_alter_table_alter(self) -> exp.Expression: + def _parse_alter_table_alter(self) -> exp.AlterColumn: self._match(TokenType.COLUMN) column = self._parse_field(any_token=True) @@ -4316,11 +4342,11 @@ class Parser(metaclass=_Parser): self._retreat(index) return self._parse_csv(self._parse_drop_column) - def _parse_alter_table_rename(self) -> exp.Expression: + def _parse_alter_table_rename(self) -> exp.RenameTable: self._match_text_seq("TO") return self.expression(exp.RenameTable, this=self._parse_table(schema=True)) - def _parse_alter(self) -> t.Optional[exp.Expression]: + def _parse_alter(self) -> exp.AlterTable | exp.Command: start = self._prev if not self._match(TokenType.TABLE): @@ -4345,7 +4371,7 @@ class Parser(metaclass=_Parser): ) return self._parse_as_command(start) - def _parse_merge(self) -> exp.Expression: + def _parse_merge(self) -> exp.Merge: self._match(TokenType.INTO) target = self._parse_table() @@ -4412,7 +4438,7 @@ class Parser(metaclass=_Parser): ) def _parse_show(self) -> t.Optional[exp.Expression]: - parser = self._find_parser(self.SHOW_PARSERS, self._show_trie) # type: ignore + parser = self._find_parser(self.SHOW_PARSERS, self.SHOW_TRIE) if parser: return parser(self) self._advance() @@ -4433,17 +4459,9 @@ class Parser(metaclass=_Parser): return None right = self._parse_statement() or self._parse_id_var() - this = self.expression( - exp.EQ, - this=left, - expression=right, - ) + this = self.expression(exp.EQ, this=left, expression=right) - return self.expression( - exp.SetItem, - this=this, - kind=kind, - ) + return self.expression(exp.SetItem, this=this, kind=kind) def _parse_set_transaction(self, global_: bool = False) -> exp.Expression: self._match_text_seq("TRANSACTION") @@ -4458,10 +4476,10 @@ class Parser(metaclass=_Parser): ) def _parse_set_item(self) -> t.Optional[exp.Expression]: - parser = self._find_parser(self.SET_PARSERS, self._set_trie) # type: ignore + parser = self._find_parser(self.SET_PARSERS, self.SET_TRIE) return parser(self) if parser else self._parse_set_item_assignment(kind=None) - def _parse_set(self) -> exp.Expression: + def _parse_set(self) -> exp.Set | exp.Command: index = self._index set_ = self.expression(exp.Set, expressions=self._parse_csv(self._parse_set_item)) @@ -4471,10 +4489,10 @@ class Parser(metaclass=_Parser): return set_ - def _parse_var_from_options(self, options: t.Collection[str]) -> t.Optional[exp.Expression]: + def _parse_var_from_options(self, options: t.Collection[str]) -> t.Optional[exp.Var]: for option in options: if self._match_text_seq(*option.split(" ")): - return exp.Var(this=option) + return exp.var(option) return None def _parse_as_command(self, start: Token) -> exp.Command: diff --git a/sqlglot/planner.py b/sqlglot/planner.py index eccad35..4ed7449 100644 --- a/sqlglot/planner.py +++ b/sqlglot/planner.py @@ -302,7 +302,7 @@ class Join(Step): for join in joins: source_key, join_key, condition = join_condition(join) - step.joins[join.this.alias_or_name] = { + step.joins[join.alias_or_name] = { "side": join.side, # type: ignore "join_key": join_key, "source_key": source_key, diff --git a/sqlglot/schema.py b/sqlglot/schema.py index f1c4a09..f73adee 100644 --- a/sqlglot/schema.py +++ b/sqlglot/schema.py @@ -285,8 +285,6 @@ class MappingSchema(AbstractMappingSchema[t.Dict[str, str]], Schema): elif isinstance(column_type, str): return self._to_data_type(column_type.upper(), dialect=dialect) - raise SchemaError(f"Unknown column type '{column_type}'") - return exp.DataType.build("unknown") def _normalize(self, schema: t.Dict) -> t.Dict: diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py index a30ec24..42628b9 100644 --- a/sqlglot/tokens.py +++ b/sqlglot/tokens.py @@ -144,6 +144,7 @@ class TokenType(AutoName): VARIANT = auto() OBJECT = auto() INET = auto() + ENUM = auto() # keywords ALIAS = auto() @@ -346,6 +347,7 @@ class Token: col: The column that the token ends on. start: The start index of the token. end: The ending index of the token. + comments: The comments to attach to the token. """ self.token_type = token_type self.text = text @@ -391,12 +393,15 @@ class _Tokenizer(type): klass._STRING_ESCAPES = set(klass.STRING_ESCAPES) klass._IDENTIFIER_ESCAPES = set(klass.IDENTIFIER_ESCAPES) - klass._COMMENTS = dict( - (comment, None) if isinstance(comment, str) else (comment[0], comment[1]) - for comment in klass.COMMENTS - ) + klass._COMMENTS = { + **dict( + (comment, None) if isinstance(comment, str) else (comment[0], comment[1]) + for comment in klass.COMMENTS + ), + "{#": "#}", # Ensure Jinja comments are tokenized correctly in all dialects + } - klass.KEYWORD_TRIE = new_trie( + klass._KEYWORD_TRIE = new_trie( key.upper() for key in ( *klass.KEYWORDS, @@ -456,20 +461,22 @@ class Tokenizer(metaclass=_Tokenizer): STRING_ESCAPES = ["'"] VAR_SINGLE_TOKENS: t.Set[str] = set() + # Autofilled + IDENTIFIERS_CAN_START_WITH_DIGIT: bool = False + _COMMENTS: t.Dict[str, str] = {} _FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {} _IDENTIFIERS: t.Dict[str, str] = {} _IDENTIFIER_ESCAPES: t.Set[str] = set() _QUOTES: t.Dict[str, str] = {} _STRING_ESCAPES: t.Set[str] = set() + _KEYWORD_TRIE: t.Dict = {} - KEYWORDS: t.Dict[t.Optional[str], TokenType] = { + KEYWORDS: t.Dict[str, TokenType] = { **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")}, **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")}, - "{{+": TokenType.BLOCK_START, - "{{-": TokenType.BLOCK_START, - "+}}": TokenType.BLOCK_END, - "-}}": TokenType.BLOCK_END, + **{f"{{{{{postfix}": TokenType.BLOCK_START for postfix in ("+", "-")}, + **{f"{prefix}}}}}": TokenType.BLOCK_END for prefix in ("+", "-")}, "/*+": TokenType.HINT, "==": TokenType.EQ, "::": TokenType.DCOLON, @@ -594,6 +601,7 @@ class Tokenizer(metaclass=_Tokenizer): "RECURSIVE": TokenType.RECURSIVE, "REGEXP": TokenType.RLIKE, "REPLACE": TokenType.REPLACE, + "RETURNING": TokenType.RETURNING, "REFERENCES": TokenType.REFERENCES, "RIGHT": TokenType.RIGHT, "RLIKE": TokenType.RLIKE, @@ -732,8 +740,7 @@ class Tokenizer(metaclass=_Tokenizer): NUMERIC_LITERALS: t.Dict[str, str] = {} ENCODE: t.Optional[str] = None - COMMENTS = ["--", ("/*", "*/"), ("{#", "#}")] - KEYWORD_TRIE: t.Dict = {} # autofilled + COMMENTS = ["--", ("/*", "*/")] __slots__ = ( "sql", @@ -748,7 +755,6 @@ class Tokenizer(metaclass=_Tokenizer): "_end", "_peek", "_prev_token_line", - "identifiers_can_start_with_digit", ) def __init__(self) -> None: @@ -894,7 +900,7 @@ class Tokenizer(metaclass=_Tokenizer): char = chars prev_space = False skip = False - trie = self.KEYWORD_TRIE + trie = self._KEYWORD_TRIE single_token = char in self.SINGLE_TOKENS while chars: @@ -994,7 +1000,7 @@ class Tokenizer(metaclass=_Tokenizer): self._advance() elif self._peek == "." and not decimal: after = self.peek(1) - if after.isdigit() or not after.strip(): + if after.isdigit() or not after.isalpha(): decimal = True self._advance() else: @@ -1013,13 +1019,13 @@ class Tokenizer(metaclass=_Tokenizer): literal += self._peek.upper() self._advance() - token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal)) + token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal, "")) if token_type: self._add(TokenType.NUMBER, number_text) self._add(TokenType.DCOLON, "::") return self._add(token_type, literal) - elif self.identifiers_can_start_with_digit: # type: ignore + elif self.IDENTIFIERS_CAN_START_WITH_DIGIT: return self._add(TokenType.VAR) self._add(TokenType.NUMBER, number_text) diff --git a/tests/dataframe/unit/test_functions.py b/tests/dataframe/unit/test_functions.py index befa68b..556001c 100644 --- a/tests/dataframe/unit/test_functions.py +++ b/tests/dataframe/unit/test_functions.py @@ -1278,7 +1278,7 @@ class TestFunctions(unittest.TestCase): col = SF.concat(SF.col("cola"), SF.col("colb")) self.assertEqual("CONCAT(cola, colb)", col.sql()) col_single = SF.concat("cola") - self.assertEqual("cola", col_single.sql()) + self.assertEqual("CONCAT(cola)", col_single.sql()) def test_array_position(self): col_str = SF.array_position("cola", SF.col("colb")) diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py index 05ded11..1c8aa51 100644 --- a/tests/dialects/test_bigquery.py +++ b/tests/dialects/test_bigquery.py @@ -6,6 +6,9 @@ class TestBigQuery(Validator): dialect = "bigquery" def test_bigquery(self): + self.validate_identity("DATE(2016, 12, 25)") + self.validate_identity("DATE(CAST('2016-12-25 23:59:59' AS DATETIME))") + self.validate_identity("SELECT foo IN UNNEST(bar) AS bla") self.validate_identity("SELECT * FROM x-0.a") self.validate_identity("SELECT * FROM pivot CROSS JOIN foo") self.validate_identity("SAFE_CAST(x AS STRING)") @@ -27,6 +30,9 @@ class TestBigQuery(Validator): self.validate_identity("SELECT * FROM q UNPIVOT(values FOR quarter IN (b, c))") self.validate_identity("""CREATE TABLE x (a STRUCT>)""") self.validate_identity("""CREATE TABLE x (a STRUCT)""") + self.validate_identity( + "DATE(CAST('2016-12-25 05:30:00+07' AS DATETIME), 'America/Los_Angeles')" + ) self.validate_identity( """CREATE TABLE x (a STRING OPTIONS (description='x')) OPTIONS (table_expiration_days=1)""" ) @@ -37,6 +43,19 @@ class TestBigQuery(Validator): "CREATE TABLE IF NOT EXISTS foo AS SELECT * FROM bla EXCEPT DISTINCT (SELECT * FROM bar) LIMIT 0" ) + self.validate_all("SELECT SPLIT(foo)", write={"bigquery": "SELECT SPLIT(foo, ',')"}) + self.validate_all( + "cast(x as date format 'MM/DD/YYYY')", + write={ + "bigquery": "PARSE_DATE('%m/%d/%Y', x)", + }, + ) + self.validate_all( + "cast(x as time format 'YYYY.MM.DD HH:MI:SSTZH')", + write={ + "bigquery": "PARSE_TIMESTAMP('%Y.%m.%d %I:%M:%S%z', x)", + }, + ) self.validate_all("SELECT 1 AS hash", write={"bigquery": "SELECT 1 AS `hash`"}) self.validate_all('x <> ""', write={"bigquery": "x <> ''"}) self.validate_all('x <> """"""', write={"bigquery": "x <> ''"}) @@ -55,11 +74,12 @@ class TestBigQuery(Validator): "SELECT * FROM `my-project.my-dataset.my-table`", write={"bigquery": "SELECT * FROM `my-project`.`my-dataset`.`my-table`"}, ) + self.validate_all("CAST(x AS DATETIME)", read={"": "x::timestamp"}) + self.validate_identity("CAST(x AS TIMESTAMP)") self.validate_all("LEAST(x, y)", read={"sqlite": "MIN(x, y)"}) self.validate_all("CAST(x AS CHAR)", write={"bigquery": "CAST(x AS STRING)"}) self.validate_all("CAST(x AS NCHAR)", write={"bigquery": "CAST(x AS STRING)"}) self.validate_all("CAST(x AS NVARCHAR)", write={"bigquery": "CAST(x AS STRING)"}) - self.validate_all("CAST(x AS TIMESTAMP)", write={"bigquery": "CAST(x AS DATETIME)"}) self.validate_all("CAST(x AS TIMESTAMPTZ)", write={"bigquery": "CAST(x AS TIMESTAMP)"}) self.validate_all("CAST(x AS RECORD)", write={"bigquery": "CAST(x AS STRUCT)"}) self.validate_all( @@ -418,9 +438,11 @@ class TestBigQuery(Validator): self.validate_all( "SELECT REGEXP_EXTRACT(abc, 'pattern(group)') FROM table", write={ + "bigquery": "SELECT REGEXP_EXTRACT(abc, 'pattern(group)') FROM table", "duckdb": "SELECT REGEXP_EXTRACT(abc, 'pattern(group)', 1) FROM table", }, ) + self.validate_identity("REGEXP_EXTRACT(`foo`, 'bar: (.+?)', 1, 1)") self.validate_identity("BEGIN A B C D E F") self.validate_identity("BEGIN TRANSACTION") self.validate_identity("COMMIT TRANSACTION") diff --git a/tests/dialects/test_clickhouse.py b/tests/dialects/test_clickhouse.py index f5372d9..7584c67 100644 --- a/tests/dialects/test_clickhouse.py +++ b/tests/dialects/test_clickhouse.py @@ -45,7 +45,21 @@ class TestClickhouse(Validator): self.validate_identity( "CREATE MATERIALIZED VIEW test_view ON CLUSTER cl1 (id UInt8) ENGINE=AggregatingMergeTree() ORDER BY tuple() AS SELECT * FROM test_data" ) + self.validate_identity( + "CREATE MATERIALIZED VIEW test_view ON CLUSTER cl1 (id UInt8) TO table1 AS SELECT * FROM test_data" + ) + self.validate_identity( + "CREATE MATERIALIZED VIEW test_view (id UInt8) TO db.table1 AS SELECT * FROM test_data" + ) + self.validate_all( + "CONCAT(CASE WHEN COALESCE(a, '') IS NULL THEN COALESCE(a, '') ELSE CAST(COALESCE(a, '') AS TEXT) END, CASE WHEN COALESCE(b, '') IS NULL THEN COALESCE(b, '') ELSE CAST(COALESCE(b, '') AS TEXT) END)", + read={"postgres": "CONCAT(a, b)"}, + ) + self.validate_all( + "CONCAT(CASE WHEN a IS NULL THEN a ELSE CAST(a AS TEXT) END, CASE WHEN b IS NULL THEN b ELSE CAST(b AS TEXT) END)", + read={"mysql": "CONCAT(a, b)"}, + ) self.validate_all( r"'Enum8(\'Sunday\' = 0)'", write={"clickhouse": "'Enum8(''Sunday'' = 0)'"} ) diff --git a/tests/dialects/test_dialect.py b/tests/dialects/test_dialect.py index 7e20812..8ffdf07 100644 --- a/tests/dialects/test_dialect.py +++ b/tests/dialects/test_dialect.py @@ -1084,6 +1084,14 @@ class TestDialect(Validator): self.validate_identity("some.column LIKE 'foo' + another.column + 'bar'") self.validate_all("LIKE(x, 'z')", write={"": "'z' LIKE x"}) + self.validate_all( + "CONCAT(a, b, c)", + write={ + "": "CONCAT(a, b, c)", + "redshift": "a || b || c", + "sqlite": "a || b || c", + }, + ) self.validate_all( "x ILIKE '%y'", read={ @@ -1177,10 +1185,21 @@ class TestDialect(Validator): self.validate_all( "CONCAT(a)", write={ - "mysql": "a", + "clickhouse": "a", + "presto": "a", + "trino": "a", "tsql": "a", }, ) + self.validate_all( + "COALESCE(a, '')", + read={ + "drill": "CONCAT(a)", + "duckdb": "CONCAT(a)", + "postgres": "CONCAT(a)", + "tsql": "CONCAT(a)", + }, + ) self.validate_all( "IF(x > 1, 1, 0)", write={ @@ -1276,7 +1295,7 @@ class TestDialect(Validator): def test_limit(self): self.validate_all( "SELECT * FROM data LIMIT 10, 20", - write={"sqlite": "SELECT * FROM data LIMIT 10 OFFSET 20"}, + write={"sqlite": "SELECT * FROM data LIMIT 20 OFFSET 10"}, ) self.validate_all( "SELECT x FROM y LIMIT 10", diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py index ee15d04..f0caafc 100644 --- a/tests/dialects/test_duckdb.py +++ b/tests/dialects/test_duckdb.py @@ -9,6 +9,20 @@ class TestDuckDB(Validator): self.validate_identity("SELECT CURRENT_DATE") self.validate_identity("SELECT CURRENT_TIMESTAMP") + self.validate_all( + "SELECT CAST('2020-01-01' AS DATE) + INTERVAL (-1) DAY", + read={"mysql": "SELECT DATE '2020-01-01' + INTERVAL -1 DAY"}, + ) + self.validate_all( + "SELECT INTERVAL '1 quarter'", + write={"duckdb": "SELECT (90 * INTERVAL '1' day)"}, + ) + self.validate_all( + "SELECT ((DATE_TRUNC('DAY', CAST(CAST(DATE_TRUNC('DAY', CURRENT_TIMESTAMP) AS DATE) AS TIMESTAMP) + INTERVAL (0 - MOD((DAYOFWEEK(CAST(CAST(DATE_TRUNC('DAY', CURRENT_TIMESTAMP) AS DATE) AS TIMESTAMP)) % 7) - 1 + 7, 7)) day) + (7 * INTERVAL (-5) day))) AS t1", + read={ + "presto": "SELECT ((DATE_ADD('week', -5, DATE_TRUNC('DAY', DATE_ADD('day', (0 - MOD((DAY_OF_WEEK(CAST(CAST(DATE_TRUNC('DAY', NOW()) AS DATE) AS TIMESTAMP)) % 7) - 1 + 7, 7)), CAST(CAST(DATE_TRUNC('DAY', NOW()) AS DATE) AS TIMESTAMP)))))) AS t1", + }, + ) self.validate_all( "EPOCH(x)", read={ @@ -51,7 +65,7 @@ class TestDuckDB(Validator): self.validate_all( "STRPTIME(x, '%y-%-m')", write={ - "bigquery": "PARSE_TIMESTAMP('%y-%m', x)", + "bigquery": "PARSE_TIMESTAMP('%y-%-m', x)", "duckdb": "STRPTIME(x, '%y-%-m')", "presto": "DATE_PARSE(x, '%y-%c')", "hive": "CAST(FROM_UNIXTIME(UNIX_TIMESTAMP(x, 'yy-M')) AS TIMESTAMP)", @@ -70,7 +84,7 @@ class TestDuckDB(Validator): self.validate_all( "STRPTIME(x, '%-m/%-d/%y %-I:%M %p')", write={ - "bigquery": "PARSE_TIMESTAMP('%m/%d/%y %I:%M %p', x)", + "bigquery": "PARSE_TIMESTAMP('%-m/%-d/%y %-I:%M %p', x)", "duckdb": "STRPTIME(x, '%-m/%-d/%y %-I:%M %p')", "presto": "DATE_PARSE(x, '%c/%e/%y %l:%i %p')", "hive": "CAST(FROM_UNIXTIME(UNIX_TIMESTAMP(x, 'M/d/yy h:mm a')) AS TIMESTAMP)", diff --git a/tests/dialects/test_mysql.py b/tests/dialects/test_mysql.py index 4fb6fa5..0b9c8b7 100644 --- a/tests/dialects/test_mysql.py +++ b/tests/dialects/test_mysql.py @@ -45,6 +45,8 @@ class TestMySQL(Validator): ) def test_identity(self): + self.validate_identity("CAST(x AS ENUM('a', 'b'))") + self.validate_identity("CAST(x AS SET('a', 'b'))") self.validate_identity("SELECT CURRENT_TIMESTAMP(6)") self.validate_identity("x ->> '$.name'") self.validate_identity("SELECT CAST(`a`.`b` AS INT) FROM foo") diff --git a/tests/dialects/test_oracle.py b/tests/dialects/test_oracle.py index 12ff699..2c67805 100644 --- a/tests/dialects/test_oracle.py +++ b/tests/dialects/test_oracle.py @@ -26,8 +26,8 @@ class TestOracle(Validator): self.validate_all( "NVL(NULL, 1)", write={ + "": "COALESCE(NULL, 1)", "oracle": "NVL(NULL, 1)", - "": "IFNULL(NULL, 1)", }, ) self.validate_all( diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py index 972a8c8..4e57b36 100644 --- a/tests/dialects/test_postgres.py +++ b/tests/dialects/test_postgres.py @@ -1,3 +1,5 @@ +from unittest import mock + from sqlglot import ParseError, exp, parse_one, transpile from tests.dialects.test_dialect import Validator @@ -85,6 +87,39 @@ class TestPostgres(Validator): read="postgres", ) + def test_unnest(self): + self.validate_identity( + "SELECT * FROM UNNEST(ARRAY[1, 2], ARRAY['foo', 'bar', 'baz']) AS x(a, b)" + ) + + self.validate_all( + "SELECT UNNEST(c) FROM t", + write={ + "hive": "SELECT EXPLODE(c) FROM t", + "postgres": "SELECT UNNEST(c) FROM t", + "presto": "SELECT col FROM t CROSS JOIN UNNEST(c) AS _u(col)", + }, + ) + self.validate_all( + "SELECT UNNEST(ARRAY[1])", + write={ + "hive": "SELECT EXPLODE(ARRAY(1))", + "postgres": "SELECT UNNEST(ARRAY[1])", + "presto": "SELECT col FROM UNNEST(ARRAY[1]) AS _u(col)", + }, + ) + + @mock.patch("sqlglot.helper.logger") + def test_array_offset(self, mock_logger): + self.validate_all( + "SELECT col[1]", + write={ + "hive": "SELECT col[0]", + "postgres": "SELECT col[1]", + "presto": "SELECT col[1]", + }, + ) + def test_postgres(self): self.validate_identity("CAST(x AS INT4RANGE)") self.validate_identity("CAST(x AS INT4MULTIRANGE)") @@ -540,3 +575,24 @@ class TestPostgres(Validator): "SELECT a, LOGICAL_OR(b) FROM table GROUP BY a", write={"postgres": "SELECT a, BOOL_OR(b) FROM table GROUP BY a"}, ) + + def test_string_concat(self): + self.validate_all( + "CONCAT(a, b)", + write={ + "": "CONCAT(COALESCE(a, ''), COALESCE(b, ''))", + "duckdb": "CONCAT(COALESCE(a, ''), COALESCE(b, ''))", + "postgres": "CONCAT(COALESCE(a, ''), COALESCE(b, ''))", + "presto": "CONCAT(CAST(COALESCE(a, '') AS VARCHAR), CAST(COALESCE(b, '') AS VARCHAR))", + }, + ) + self.validate_all( + "a || b", + write={ + "": "a || b", + "clickhouse": "CONCAT(CAST(a AS TEXT), CAST(b AS TEXT))", + "duckdb": "a || b", + "postgres": "a || b", + "presto": "CONCAT(CAST(a AS VARCHAR), CAST(b AS VARCHAR))", + }, + ) diff --git a/tests/dialects/test_presto.py b/tests/dialects/test_presto.py index e3d09ef..4f37be5 100644 --- a/tests/dialects/test_presto.py +++ b/tests/dialects/test_presto.py @@ -440,6 +440,8 @@ class TestPresto(Validator): ) def test_presto(self): + self.validate_identity("SELECT * FROM x OFFSET 1 LIMIT 1") + self.validate_identity("SELECT * FROM x OFFSET 1 FETCH FIRST 1 ROWS ONLY") self.validate_identity("SELECT BOOL_OR(a > 10) FROM asd AS T(a)") self.validate_identity("SELECT * FROM (VALUES (1))") self.validate_identity("START TRANSACTION READ WRITE, ISOLATION LEVEL SERIALIZABLE") diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py index 941f2aa..426e188 100644 --- a/tests/dialects/test_snowflake.py +++ b/tests/dialects/test_snowflake.py @@ -30,6 +30,10 @@ class TestSnowflake(Validator): self.validate_identity("SELECT CONVERT_TIMEZONE('UTC', 'America/Los_Angeles', col)") self.validate_all("CAST(x AS CHAR VARYING)", write={"snowflake": "CAST(x AS VARCHAR)"}) + self.validate_all( + "SELECT * FROM (VALUES (0) foo(bar))", + write={"snowflake": "SELECT * FROM (VALUES (0)) AS foo(bar)"}, + ) self.validate_all("CAST(x AS CHARACTER VARYING)", write={"snowflake": "CAST(x AS VARCHAR)"}) self.validate_all("CAST(x AS NCHAR VARYING)", write={"snowflake": "CAST(x AS VARCHAR)"}) self.validate_all( @@ -274,8 +278,8 @@ class TestSnowflake(Validator): "SELECT TO_TIMESTAMP('2013-04-05 01:02:03')", write={ "bigquery": "SELECT PARSE_TIMESTAMP('%Y-%m-%d %H:%M:%S', '2013-04-05 01:02:03')", - "snowflake": "SELECT TO_TIMESTAMP('2013-04-05 01:02:03', 'yyyy-mm-dd hh24:mi:ss')", - "spark": "SELECT TO_TIMESTAMP('2013-04-05 01:02:03', 'yyyy-MM-d HH:mm:ss')", + "snowflake": "SELECT TO_TIMESTAMP('2013-04-05 01:02:03', 'yyyy-mm-DD hh24:mi:ss')", + "spark": "SELECT TO_TIMESTAMP('2013-04-05 01:02:03', 'yyyy-MM-dd HH:mm:ss')", }, ) self.validate_all( diff --git a/tests/dialects/test_teradata.py b/tests/dialects/test_teradata.py index 9f789d0..6906e47 100644 --- a/tests/dialects/test_teradata.py +++ b/tests/dialects/test_teradata.py @@ -138,11 +138,15 @@ class TestTeradata(Validator): def test_cast(self): self.validate_all( "CAST('1992-01' AS DATE FORMAT 'YYYY-DD')", + read={ + "bigquery": "CAST('1992-01' AS DATE FORMAT 'YYYY-DD')", + }, write={ "teradata": "CAST('1992-01' AS DATE FORMAT 'YYYY-DD')", - "databricks": "DATE_FORMAT('1992-01', 'YYYY-DD')", - "mysql": "DATE_FORMAT('1992-01', 'YYYY-DD')", - "spark": "DATE_FORMAT('1992-01', 'YYYY-DD')", - "": "TIME_TO_STR('1992-01', 'YYYY-DD')", + "bigquery": "PARSE_DATE('%Y-%d', '1992-01')", + "databricks": "TO_DATE('1992-01', 'yyyy-dd')", + "mysql": "STR_TO_DATE('1992-01', '%Y-%d')", + "spark": "TO_DATE('1992-01', 'yyyy-dd')", + "": "STR_TO_DATE('1992-01', '%Y-%d')", }, ) diff --git a/tests/fixtures/identity.sql b/tests/fixtures/identity.sql index 9fdddf1..e0ea9cb 100644 --- a/tests/fixtures/identity.sql +++ b/tests/fixtures/identity.sql @@ -1,7 +1,11 @@ SUM(1) SUM(CASE WHEN x > 1 THEN 1 ELSE 0 END) / y 1 +(1) +1. +(1.) 1.0 +(1.0) 1E2 1E+2 1E-2 @@ -69,6 +73,8 @@ a.B() a['x'].C() int.x map.x +SELECT update +SELECT x.update SELECT call.x a.b.INT(1.234) INT(x / 100) @@ -155,6 +161,7 @@ DATE(x) = DATE(y) TIMESTAMP(DATE(x)) TIMESTAMP_TRUNC(COALESCE(time_field, CURRENT_TIMESTAMP()), DAY) COUNT(DISTINCT CASE WHEN DATE_TRUNC(DATE(time_field), isoweek) = DATE_TRUNC(DATE(time_field2), isoweek) THEN report_id ELSE NULL END) +COUNT(a, b) x[y - 1] CASE WHEN SUM(x) > 3 THEN 1 END OVER (PARTITION BY x) SUM(ROW() OVER (PARTITION BY x)) @@ -224,6 +231,7 @@ SELECT SUM(x IGNORE NULLS) AS x SELECT COUNT(x RESPECT NULLS) SELECT TRUNCATE(a, b) SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a, b DESC LIMIT 10) AS x +SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a, b DESC LIMIT 1, 10) AS x SELECT ARRAY_AGG(STRUCT(x, x AS y) ORDER BY z DESC) AS x SELECT LAST_VALUE(x IGNORE NULLS) OVER y AS x SELECT LAG(x) OVER (ORDER BY y) AS x @@ -601,6 +609,7 @@ CREATE FUNCTION a.b.c() CREATE INDEX abc ON t (a) CREATE INDEX abc ON t (a, b, b) CREATE INDEX abc ON t (a NULLS LAST) +CREATE INDEX pointloc ON points USING GIST(BOX(location, location)) CREATE UNIQUE INDEX abc ON t (a, b, b) CREATE UNIQUE INDEX IF NOT EXISTS my_idx ON tbl (a, b) CREATE SCHEMA x @@ -671,6 +680,7 @@ INSERT INTO x VALUES (1, 'a', 2.0), (1, 'a', 3.0), (X(), y[1], z.x) INSERT INTO y (a, b, c) SELECT a, b, c FROM x INSERT INTO y (SELECT 1) UNION (SELECT 2) INSERT INTO result_table (WITH test AS (SELECT * FROM source_table) SELECT * FROM test) +INSERT INTO "tests_user" ("username", "first_name", "last_name") VALUES ('fiara', 'Fiara', 'Ironhide') RETURNING "tests_user"."id" INSERT OVERWRITE TABLE x IF EXISTS SELECT * FROM y INSERT OVERWRITE TABLE a.b IF EXISTS SELECT * FROM y INSERT OVERWRITE DIRECTORY 'x' SELECT 1 @@ -805,6 +815,7 @@ PRAGMA schema.synchronous = 2 PRAGMA schema.synchronous = FULL PRAGMA schema.memory_limit = '1GB' JSON_OBJECT() +JSON_OBJECT(*) JSON_OBJECT('key1': 1, 'key2': TRUE) JSON_OBJECT('id': '5', 'fld1': 'bla', 'fld2': 'bar') JSON_OBJECT('x': NULL, 'y': 1 NULL ON NULL) @@ -820,3 +831,7 @@ SELECT PERCENTILE_CONT(x, 0.5 RESPECT NULLS) OVER () SELECT PERCENTILE_CONT(x, 0.5 IGNORE NULLS) OVER () WITH my_cte AS (SELECT 'a' AS desc) SELECT desc AS description FROM my_cte WITH my_cte AS (SELECT 'a' AS asc) SELECT asc AS description FROM my_cte +SELECT * FROM case +SELECT * FROM schema.case +SELECT * FROM current_date +SELECT * FROM schema.current_date diff --git a/tests/fixtures/optimizer/isolate_table_selects.sql b/tests/fixtures/optimizer/isolate_table_selects.sql index 43540e8..36f2d8e 100644 --- a/tests/fixtures/optimizer/isolate_table_selects.sql +++ b/tests/fixtures/optimizer/isolate_table_selects.sql @@ -1,5 +1,5 @@ SELECT * FROM x AS x, y AS y2; -SELECT * FROM (SELECT * FROM x AS x) AS x, (SELECT * FROM y AS y) AS y2; +SELECT * FROM (SELECT * FROM x AS x) AS x, (SELECT * FROM y AS y2) AS y2; SELECT * FROM x AS x WHERE x = 1; SELECT * FROM x AS x WHERE x = 1; @@ -17,7 +17,7 @@ WITH y AS (SELECT *) SELECT * FROM x AS x; WITH y AS (SELECT *) SELECT * FROM x AS x; WITH y AS (SELECT * FROM y AS y2 CROSS JOIN x AS z2) SELECT * FROM x AS x CROSS JOIN y as y; -WITH y AS (SELECT * FROM (SELECT * FROM y AS y) AS y2 CROSS JOIN (SELECT * FROM x AS x) AS z2) SELECT * FROM (SELECT * FROM x AS x) AS x CROSS JOIN y AS y; +WITH y AS (SELECT * FROM (SELECT * FROM y AS y2) AS y2 CROSS JOIN (SELECT * FROM x AS z2) AS z2) SELECT * FROM (SELECT * FROM x AS x) AS x CROSS JOIN y AS y; SELECT * FROM x AS x CROSS JOIN xx AS y; SELECT * FROM (SELECT * FROM x AS x) AS x CROSS JOIN xx AS y; diff --git a/tests/fixtures/optimizer/optimizer.sql b/tests/fixtures/optimizer/optimizer.sql index e0567d7..0cb1a58 100644 --- a/tests/fixtures/optimizer/optimizer.sql +++ b/tests/fixtures/optimizer/optimizer.sql @@ -101,10 +101,10 @@ SELECT "x"."a" AS "a", SUM("y"."b") AS "sum_b" FROM "x" AS "x" -JOIN "y" AS "y" - ON "x"."b" = "y"."b" LEFT JOIN "_u_0" AS "_u_0" ON "x"."b" = "_u_0"."_u_1" +JOIN "y" AS "y" + ON "x"."b" = "y"."b" WHERE "_u_0"."_col_0" >= 0 AND "x"."a" > 1 GROUP BY @@ -210,10 +210,10 @@ SELECT "n"."b" AS "b", "o"."b" AS "b" FROM "n" -FULL JOIN "o" - ON "n"."a" = "o"."a" JOIN "n" AS "n2" ON "n"."a" = "n2"."a" +FULL JOIN "o" + ON "n"."a" = "o"."a" WHERE "o"."b" > 0; @@ -619,3 +619,30 @@ WITH "foO" AS ( SELECT "foO"."x" AS "x" FROM "foO" AS "foO"; + +# title: lateral subquery +# execute: false +# dialect: postgres +SELECT u.user_id, l.log_date +FROM users u +CROSS JOIN LATERAL ( + SELECT l.log_date + FROM logs l + WHERE l.user_id = u.user_id AND l.log_date <= 100 + ORDER BY l.log_date DESC NULLS LAST + LIMIT 1 +) l; +SELECT + "u"."user_id" AS "user_id", + "l"."log_date" AS "log_date" +FROM "users" AS "u" +CROSS JOIN LATERAL ( + SELECT + "l"."log_date" + FROM "logs" AS "l" + WHERE + "l"."log_date" <= 100 AND "l"."user_id" = "u"."user_id" + ORDER BY + "l"."log_date" DESC NULLS LAST + LIMIT 1 +) AS "l"; diff --git a/tests/fixtures/optimizer/pushdown_predicates.sql b/tests/fixtures/optimizer/pushdown_predicates.sql index 83a353d..79ce353 100644 --- a/tests/fixtures/optimizer/pushdown_predicates.sql +++ b/tests/fixtures/optimizer/pushdown_predicates.sql @@ -25,8 +25,8 @@ SELECT x.a AS a FROM (SELECT x.a FROM x AS x WHERE x.a = 1 AND x.b = 1) AS x JOI SELECT x.a FROM x AS x JOIN (SELECT y.a FROM y AS y) AS y ON y.a = 1 AND x.a = y.a; SELECT x.a FROM x AS x JOIN (SELECT y.a FROM y AS y WHERE y.a = 1) AS y ON x.a = y.a AND TRUE; -SELECT x.a AS a FROM x AS x JOIN (SELECT * FROM y AS y) AS y ON y.a = 1 WHERE x.a = 1 AND x.b = 1 AND y.a = x; -SELECT x.a AS a FROM x AS x JOIN (SELECT * FROM y AS y WHERE y.a = 1) AS y ON y.a = x AND TRUE WHERE x.a = 1 AND x.b = 1 AND TRUE; +SELECT x.a AS a FROM x AS x JOIN (SELECT * FROM y AS y) AS y ON y.a = 1 WHERE x.a = 1 AND x.b = 1 AND y.a = x.a; +SELECT x.a AS a FROM x AS x JOIN (SELECT * FROM y AS y WHERE y.a = 1) AS y ON y.a = x.a AND TRUE WHERE x.a = 1 AND x.b = 1 AND TRUE; SELECT x.a AS a FROM x AS x CROSS JOIN (SELECT * FROM y AS y) AS y WHERE x.a = 1 AND x.b = 1 AND y.a = x.a AND y.a = 1; SELECT x.a AS a FROM x AS x JOIN (SELECT * FROM y AS y WHERE y.a = 1) AS y ON y.a = x.a AND TRUE WHERE x.a = 1 AND x.b = 1 AND TRUE AND TRUE; diff --git a/tests/fixtures/optimizer/qualify_columns.sql b/tests/fixtures/optimizer/qualify_columns.sql index 7be2c7f..81c0b5e 100644 --- a/tests/fixtures/optimizer/qualify_columns.sql +++ b/tests/fixtures/optimizer/qualify_columns.sql @@ -296,6 +296,10 @@ SELECT x.b AS b FROM x AS x; SELECT x.b FROM x JOIN y USING (b); SELECT x.b AS b FROM x AS x JOIN y AS y ON x.b = y.b; +# execute: false +WITH cte AS (SELECT a.b.c.d.f.g FROM tbl1) SELECT g FROM (SELECT g FROM tbl2) tbl2 JOIN cte USING(g); +WITH cte AS (SELECT tbl1.a.b.c.d.f.g AS g FROM tbl1 AS tbl1) SELECT COALESCE(tbl2.g, cte.g) AS g FROM (SELECT tbl2.g AS g FROM tbl2 AS tbl2) AS tbl2 JOIN cte ON tbl2.g = cte.g; + SELECT x.b FROM x JOIN y USING (b) JOIN z USING (b); SELECT x.b AS b FROM x AS x JOIN y AS y ON x.b = y.b JOIN z AS z ON x.b = z.b; diff --git a/tests/fixtures/optimizer/tpc-ds/tpc-ds.sql b/tests/fixtures/optimizer/tpc-ds/tpc-ds.sql index a6ee325..7ef7a6d 100644 --- a/tests/fixtures/optimizer/tpc-ds/tpc-ds.sql +++ b/tests/fixtures/optimizer/tpc-ds/tpc-ds.sql @@ -46,12 +46,12 @@ WITH "customer_total_return" AS ( SELECT "customer"."c_customer_id" AS "c_customer_id" FROM "customer_total_return" AS "ctr1" -JOIN "store" AS "store" - ON "store"."s_state" = 'TN' AND "store"."s_store_sk" = "ctr1"."ctr_store_sk" -JOIN "customer" AS "customer" - ON "ctr1"."ctr_customer_sk" = "customer"."c_customer_sk" LEFT JOIN "_u_0" AS "_u_0" ON "ctr1"."ctr_store_sk" = "_u_0"."_u_1" +JOIN "customer" AS "customer" + ON "ctr1"."ctr_customer_sk" = "customer"."c_customer_sk" +JOIN "store" AS "store" + ON "store"."s_state" = 'TN' AND "store"."s_store_sk" = "ctr1"."ctr_store_sk" WHERE "ctr1"."ctr_total_return" > "_u_0"."_col_0" ORDER BY @@ -238,23 +238,23 @@ ORDER BY dt.d_year, brand_id LIMIT 100; SELECT - "date_dim"."d_year" AS "d_year", + "dt"."d_year" AS "d_year", "item"."i_brand_id" AS "brand_id", "item"."i_brand" AS "brand", SUM("store_sales"."ss_ext_discount_amt") AS "sum_agg" -FROM "date_dim" AS "date_dim" +FROM "date_dim" AS "dt" JOIN "store_sales" AS "store_sales" - ON "date_dim"."d_date_sk" = "store_sales"."ss_sold_date_sk" + ON "dt"."d_date_sk" = "store_sales"."ss_sold_date_sk" JOIN "item" AS "item" ON "item"."i_manufact_id" = 427 AND "store_sales"."ss_item_sk" = "item"."i_item_sk" WHERE - "date_dim"."d_moy" = 11 + "dt"."d_moy" = 11 GROUP BY - "date_dim"."d_year", + "dt"."d_year", "item"."i_brand", "item"."i_brand_id" ORDER BY - "date_dim"."d_year", + "dt"."d_year", "sum_agg" DESC, "brand_id" LIMIT 100; @@ -567,14 +567,14 @@ SELECT "t_s_secyear"."customer_last_name" AS "customer_last_name", "t_s_secyear"."customer_preferred_cust_flag" AS "customer_preferred_cust_flag" FROM "year_total" AS "t_s_firstyear" -JOIN "year_total" AS "t_s_secyear" - ON "t_s_secyear"."customer_id" = "t_s_firstyear"."customer_id" - AND "t_s_secyear"."dyear" = 2002 - AND "t_s_secyear"."sale_type" = 's' JOIN "year_total" AS "t_c_secyear" ON "t_c_secyear"."dyear" = 2002 AND "t_c_secyear"."sale_type" = 'c' AND "t_s_firstyear"."customer_id" = "t_c_secyear"."customer_id" +JOIN "year_total" AS "t_s_secyear" + ON "t_s_secyear"."customer_id" = "t_s_firstyear"."customer_id" + AND "t_s_secyear"."dyear" = 2002 + AND "t_s_secyear"."sale_type" = 's' JOIN "year_total" AS "t_w_firstyear" ON "t_s_firstyear"."customer_id" = "t_w_firstyear"."customer_id" AND "t_w_firstyear"."dyear" = 2001 @@ -813,10 +813,10 @@ WITH "salesreturns" AS ( SUM("salesreturns"."return_amt") AS "returns1", SUM("salesreturns"."net_loss") AS "profit_loss" FROM "salesreturns_2" AS "salesreturns" - JOIN "date_dim_2" AS "date_dim" - ON "salesreturns"."date_sk" = "date_dim"."d_date_sk" JOIN "catalog_page" AS "catalog_page" ON "salesreturns"."page_sk" = "catalog_page"."cp_catalog_page_sk" + JOIN "date_dim_2" AS "date_dim" + ON "salesreturns"."date_sk" = "date_dim"."d_date_sk" GROUP BY "catalog_page"."cp_catalog_page_id" ), "salesreturns_3" AS ( @@ -931,7 +931,7 @@ ORDER BY cnt LIMIT 100; WITH "_u_0" AS ( SELECT DISTINCT - "date_dim"."d_month_seq" AS "_col_0" + "date_dim"."d_month_seq" AS "d_month_seq" FROM "date_dim" AS "date_dim" WHERE "date_dim"."d_moy" = 7 AND "date_dim"."d_year" = 1998 @@ -944,25 +944,25 @@ WITH "_u_0" AS ( "j"."i_category" ) SELECT - "customer_address"."ca_state" AS "state", + "a"."ca_state" AS "state", COUNT(*) AS "cnt" -FROM "customer_address" AS "customer_address" -CROSS JOIN "_u_0" AS "_u_0" -JOIN "date_dim" AS "date_dim" - ON "date_dim"."d_month_seq" = "_u_0"."_col_0" -JOIN "store_sales" AS "store_sales" - ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" -JOIN "customer" AS "customer" - ON "customer"."c_customer_sk" = "store_sales"."ss_customer_sk" - AND "customer_address"."ca_address_sk" = "customer"."c_current_addr_sk" -JOIN "item" AS "item" - ON "store_sales"."ss_item_sk" = "item"."i_item_sk" +FROM "customer_address" AS "a" +JOIN "customer" AS "c" + ON "a"."ca_address_sk" = "c"."c_current_addr_sk" +JOIN "store_sales" AS "s" + ON "c"."c_customer_sk" = "s"."ss_customer_sk" +JOIN "date_dim" AS "d" + ON "s"."ss_sold_date_sk" = "d"."d_date_sk" +JOIN "item" AS "i" + ON "s"."ss_item_sk" = "i"."i_item_sk" +JOIN "_u_0" AS "_u_0" + ON "d"."d_month_seq" = "_u_0"."d_month_seq" LEFT JOIN "_u_1" AS "_u_1" - ON "_u_1"."_u_2" = "item"."i_category" + ON "_u_1"."_u_2" = "i"."i_category" WHERE - "item"."i_current_price" > 1.2 * "_u_1"."_col_0" + "i"."i_current_price" > 1.2 * "_u_1"."_col_0" GROUP BY - "customer_address"."ca_state" + "a"."ca_state" HAVING COUNT(*) >= 10 ORDER BY @@ -1369,6 +1369,36 @@ WITH "_u_0" AS ( FROM "store_sales" AS "store_sales" WHERE "store_sales"."ss_quantity" <= 20 AND "store_sales"."ss_quantity" >= 1 +), "_u_10" AS ( + SELECT + AVG("store_sales"."ss_ext_list_price") AS "_col_0" + FROM "store_sales" AS "store_sales" + WHERE + "store_sales"."ss_quantity" <= 80 AND "store_sales"."ss_quantity" >= 61 +), "_u_11" AS ( + SELECT + AVG("store_sales"."ss_net_profit") AS "_col_0" + FROM "store_sales" AS "store_sales" + WHERE + "store_sales"."ss_quantity" <= 80 AND "store_sales"."ss_quantity" >= 61 +), "_u_12" AS ( + SELECT + COUNT(*) AS "_col_0" + FROM "store_sales" AS "store_sales" + WHERE + "store_sales"."ss_quantity" <= 100 AND "store_sales"."ss_quantity" >= 81 +), "_u_13" AS ( + SELECT + AVG("store_sales"."ss_ext_list_price") AS "_col_0" + FROM "store_sales" AS "store_sales" + WHERE + "store_sales"."ss_quantity" <= 100 AND "store_sales"."ss_quantity" >= 81 +), "_u_14" AS ( + SELECT + AVG("store_sales"."ss_net_profit") AS "_col_0" + FROM "store_sales" AS "store_sales" + WHERE + "store_sales"."ss_quantity" <= 100 AND "store_sales"."ss_quantity" >= 81 ), "_u_2" AS ( SELECT AVG("store_sales"."ss_net_profit") AS "_col_0" @@ -1417,36 +1447,6 @@ WITH "_u_0" AS ( FROM "store_sales" AS "store_sales" WHERE "store_sales"."ss_quantity" <= 80 AND "store_sales"."ss_quantity" >= 61 -), "_u_10" AS ( - SELECT - AVG("store_sales"."ss_ext_list_price") AS "_col_0" - FROM "store_sales" AS "store_sales" - WHERE - "store_sales"."ss_quantity" <= 80 AND "store_sales"."ss_quantity" >= 61 -), "_u_11" AS ( - SELECT - AVG("store_sales"."ss_net_profit") AS "_col_0" - FROM "store_sales" AS "store_sales" - WHERE - "store_sales"."ss_quantity" <= 80 AND "store_sales"."ss_quantity" >= 61 -), "_u_12" AS ( - SELECT - COUNT(*) AS "_col_0" - FROM "store_sales" AS "store_sales" - WHERE - "store_sales"."ss_quantity" <= 100 AND "store_sales"."ss_quantity" >= 81 -), "_u_13" AS ( - SELECT - AVG("store_sales"."ss_ext_list_price") AS "_col_0" - FROM "store_sales" AS "store_sales" - WHERE - "store_sales"."ss_quantity" <= 100 AND "store_sales"."ss_quantity" >= 81 -), "_u_14" AS ( - SELECT - AVG("store_sales"."ss_net_profit") AS "_col_0" - FROM "store_sales" AS "store_sales" - WHERE - "store_sales"."ss_quantity" <= 100 AND "store_sales"."ss_quantity" >= 81 ) SELECT CASE WHEN "_u_0"."_col_0" > 3672 THEN "_u_1"."_col_0" ELSE "_u_2"."_col_0" END AS "bucket1", @@ -1457,6 +1457,11 @@ SELECT FROM "reason" AS "reason" CROSS JOIN "_u_0" AS "_u_0" CROSS JOIN "_u_1" AS "_u_1" +CROSS JOIN "_u_10" AS "_u_10" +CROSS JOIN "_u_11" AS "_u_11" +CROSS JOIN "_u_12" AS "_u_12" +CROSS JOIN "_u_13" AS "_u_13" +CROSS JOIN "_u_14" AS "_u_14" CROSS JOIN "_u_2" AS "_u_2" CROSS JOIN "_u_3" AS "_u_3" CROSS JOIN "_u_4" AS "_u_4" @@ -1465,11 +1470,6 @@ CROSS JOIN "_u_6" AS "_u_6" CROSS JOIN "_u_7" AS "_u_7" CROSS JOIN "_u_8" AS "_u_8" CROSS JOIN "_u_9" AS "_u_9" -CROSS JOIN "_u_10" AS "_u_10" -CROSS JOIN "_u_11" AS "_u_11" -CROSS JOIN "_u_12" AS "_u_12" -CROSS JOIN "_u_13" AS "_u_13" -CROSS JOIN "_u_14" AS "_u_14" WHERE "reason"."r_reason_sk" = 1; @@ -1610,18 +1610,18 @@ SELECT COUNT(*) AS "cnt5", "customer_demographics"."cd_dep_college_count" AS "cd_dep_college_count", COUNT(*) AS "cnt6" -FROM "customer" AS "customer" -JOIN "customer_address" AS "customer_address" - ON "customer"."c_current_addr_sk" = "customer_address"."ca_address_sk" - AND "customer_address"."ca_county" IN ('Lycoming County', 'Sheridan County', 'Kandiyohi County', 'Pike County', 'Greene County') -JOIN "customer_demographics" AS "customer_demographics" - ON "customer_demographics"."cd_demo_sk" = "customer"."c_current_cdemo_sk" +FROM "customer" AS "c" LEFT JOIN "_u_0" AS "_u_0" - ON "customer"."c_customer_sk" = "_u_0"."_u_1" + ON "c"."c_customer_sk" = "_u_0"."_u_1" LEFT JOIN "_u_2" AS "_u_2" - ON "customer"."c_customer_sk" = "_u_2"."_u_3" + ON "c"."c_customer_sk" = "_u_2"."_u_3" LEFT JOIN "_u_4" AS "_u_4" - ON "customer"."c_customer_sk" = "_u_4"."_u_5" + ON "c"."c_customer_sk" = "_u_4"."_u_5" +JOIN "customer_address" AS "ca" + ON "c"."c_current_addr_sk" = "ca"."ca_address_sk" + AND "ca"."ca_county" IN ('Lycoming County', 'Sheridan County', 'Kandiyohi County', 'Pike County', 'Greene County') +JOIN "customer_demographics" AS "customer_demographics" + ON "customer_demographics"."cd_demo_sk" = "c"."c_current_cdemo_sk" WHERE NOT "_u_0"."_u_1" IS NULL AND ( @@ -1835,19 +1835,19 @@ SELECT "t_s_secyear"."customer_last_name" AS "customer_last_name", "t_s_secyear"."customer_birth_country" AS "customer_birth_country" FROM "year_total" AS "t_s_firstyear" -JOIN "year_total" AS "t_s_secyear" - ON "t_s_secyear"."customer_id" = "t_s_firstyear"."customer_id" - AND "t_s_secyear"."dyear" = 2002 - AND "t_s_secyear"."sale_type" = 's' -JOIN "year_total" AS "t_w_secyear" - ON "t_s_firstyear"."customer_id" = "t_w_secyear"."customer_id" - AND "t_w_secyear"."dyear" = 2002 - AND "t_w_secyear"."sale_type" = 'w' JOIN "year_total" AS "t_w_firstyear" ON "t_s_firstyear"."customer_id" = "t_w_firstyear"."customer_id" AND "t_w_firstyear"."dyear" = 2001 AND "t_w_firstyear"."sale_type" = 'w' AND "t_w_firstyear"."year_total" > 0 +JOIN "year_total" AS "t_w_secyear" + ON "t_s_firstyear"."customer_id" = "t_w_secyear"."customer_id" + AND "t_w_secyear"."dyear" = 2002 + AND "t_w_secyear"."sale_type" = 'w' +JOIN "year_total" AS "t_s_secyear" + ON "t_s_secyear"."customer_id" = "t_s_firstyear"."customer_id" + AND "t_s_secyear"."dyear" = 2002 + AND "t_s_secyear"."sale_type" = 's' AND CASE WHEN "t_w_firstyear"."year_total" > 0 THEN "t_w_secyear"."year_total" / "t_w_firstyear"."year_total" @@ -1909,13 +1909,13 @@ SELECT SUM("web_sales"."ws_ext_sales_price") AS "itemrevenue", SUM("web_sales"."ws_ext_sales_price") * 100 / SUM(SUM("web_sales"."ws_ext_sales_price")) OVER (PARTITION BY "item"."i_class") AS "revenueratio" FROM "web_sales" AS "web_sales" -JOIN "item" AS "item" - ON "item"."i_category" IN ('Home', 'Men', 'Women') - AND "web_sales"."ws_item_sk" = "item"."i_item_sk" JOIN "date_dim" AS "date_dim" ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" AND CAST("date_dim"."d_date" AS DATE) <= CAST('2000-06-10' AS DATE) AND CAST("date_dim"."d_date" AS DATE) >= CAST('2000-05-11' AS DATE) +JOIN "item" AS "item" + ON "item"."i_category" IN ('Home', 'Men', 'Women') + AND "web_sales"."ws_item_sk" = "item"."i_item_sk" GROUP BY "item"."i_item_id", "item"."i_item_desc", @@ -1982,22 +1982,7 @@ SELECT AVG("store_sales"."ss_ext_wholesale_cost") AS "_col_2", SUM("store_sales"."ss_ext_wholesale_cost") AS "_col_3" FROM "store_sales" AS "store_sales" -JOIN "store" AS "store" - ON "store"."s_store_sk" = "store_sales"."ss_store_sk" -CROSS JOIN "household_demographics" AS "household_demographics" -JOIN "customer_demographics" AS "customer_demographics" - ON "customer_demographics"."cd_demo_sk" = "store_sales"."ss_cdemo_sk" - AND "customer_demographics"."cd_education_status" = 'Advanced Degree' - AND "customer_demographics"."cd_education_status" = 'Primary' - AND "customer_demographics"."cd_education_status" = 'Secondary' - AND "customer_demographics"."cd_marital_status" = 'D' - AND "customer_demographics"."cd_marital_status" = 'M' - AND "customer_demographics"."cd_marital_status" = 'U' - AND "household_demographics"."hd_dep_count" = 1 - AND "household_demographics"."hd_dep_count" = 3 - AND "store_sales"."ss_hdemo_sk" = "household_demographics"."hd_demo_sk" - AND "store_sales"."ss_sales_price" <= 100.00 - AND "store_sales"."ss_sales_price" >= 150.00 +CROSS JOIN "customer_demographics" AS "customer_demographics" JOIN "customer_address" AS "customer_address" ON ( "customer_address"."ca_country" = 'United States' @@ -2022,7 +2007,22 @@ JOIN "customer_address" AS "customer_address" ) JOIN "date_dim" AS "date_dim" ON "date_dim"."d_year" = 2001 - AND "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk"; + AND "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" +JOIN "household_demographics" AS "household_demographics" + ON "customer_demographics"."cd_demo_sk" = "store_sales"."ss_cdemo_sk" + AND "customer_demographics"."cd_education_status" = 'Advanced Degree' + AND "customer_demographics"."cd_education_status" = 'Primary' + AND "customer_demographics"."cd_education_status" = 'Secondary' + AND "customer_demographics"."cd_marital_status" = 'D' + AND "customer_demographics"."cd_marital_status" = 'M' + AND "customer_demographics"."cd_marital_status" = 'U' + AND "household_demographics"."hd_dep_count" = 1 + AND "household_demographics"."hd_dep_count" = 3 + AND "store_sales"."ss_hdemo_sk" = "household_demographics"."hd_demo_sk" + AND "store_sales"."ss_sales_price" <= 100.00 + AND "store_sales"."ss_sales_price" >= 150.00 +JOIN "store" AS "store" + ON "store"."s_store_sk" = "store_sales"."ss_store_sk"; -------------------------------------- -- TPC-DS 14 @@ -2165,69 +2165,75 @@ WITH "item_2" AS ( "item"."i_class_id" AS "i_class_id", "item"."i_category_id" AS "i_category_id" FROM "item" AS "item" -), "d1" AS ( - SELECT - "date_dim"."d_date_sk" AS "d_date_sk", - "date_dim"."d_year" AS "d_year" - FROM "date_dim" AS "date_dim" - WHERE - "date_dim"."d_year" <= 2001 AND "date_dim"."d_year" >= 1999 ), "cte_4" AS ( SELECT "ics"."i_brand_id" AS "i_brand_id", "ics"."i_class_id" AS "i_class_id", "ics"."i_category_id" AS "i_category_id" FROM "catalog_sales" AS "catalog_sales" - JOIN "item_2" AS "ics" - ON "catalog_sales"."cs_item_sk" = "ics"."i_item_sk" - JOIN "d1" AS "d2" + JOIN "date_dim" AS "d2" ON "catalog_sales"."cs_sold_date_sk" = "d2"."d_date_sk" + AND "d2"."d_year" <= 2001 + AND "d2"."d_year" >= 1999 + JOIN "item" AS "ics" + ON "catalog_sales"."cs_item_sk" = "ics"."i_item_sk" INTERSECT SELECT "iws"."i_brand_id" AS "i_brand_id", "iws"."i_class_id" AS "i_class_id", "iws"."i_category_id" AS "i_category_id" FROM "web_sales" AS "web_sales" - JOIN "item_2" AS "iws" + JOIN "date_dim" AS "d3" + ON "d3"."d_year" <= 2001 + AND "d3"."d_year" >= 1999 + AND "web_sales"."ws_sold_date_sk" = "d3"."d_date_sk" + JOIN "item" AS "iws" ON "web_sales"."ws_item_sk" = "iws"."i_item_sk" - JOIN "d1" AS "d3" - ON "web_sales"."ws_sold_date_sk" = "d3"."d_date_sk" ), "_q_0" AS ( SELECT "iss"."i_brand_id" AS "brand_id", "iss"."i_class_id" AS "class_id", "iss"."i_category_id" AS "category_id" FROM "store_sales" AS "store_sales" - JOIN "item_2" AS "iss" + JOIN "date_dim" AS "d1" + ON "d1"."d_year" <= 2001 + AND "d1"."d_year" >= 1999 + AND "store_sales"."ss_sold_date_sk" = "d1"."d_date_sk" + JOIN "item" AS "iss" ON "store_sales"."ss_item_sk" = "iss"."i_item_sk" - JOIN "d1" AS "d1" - ON "store_sales"."ss_sold_date_sk" = "d1"."d_date_sk" INTERSECT SELECT "cte_4"."i_brand_id" AS "i_brand_id", "cte_4"."i_class_id" AS "i_class_id", "cte_4"."i_category_id" AS "i_category_id" FROM "cte_4" AS "cte_4" +), "date_dim_2" AS ( + SELECT + "date_dim"."d_date_sk" AS "d_date_sk", + "date_dim"."d_year" AS "d_year" + FROM "date_dim" AS "date_dim" + WHERE + "date_dim"."d_year" <= 2001 AND "date_dim"."d_year" >= 1999 ), "cte_8" AS ( SELECT "catalog_sales"."cs_quantity" AS "quantity", "catalog_sales"."cs_list_price" AS "list_price" FROM "catalog_sales" AS "catalog_sales" - JOIN "d1" AS "date_dim" + JOIN "date_dim_2" AS "date_dim" ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" UNION ALL SELECT "web_sales"."ws_quantity" AS "quantity", "web_sales"."ws_list_price" AS "list_price" FROM "web_sales" AS "web_sales" - JOIN "d1" AS "date_dim" + JOIN "date_dim_2" AS "date_dim" ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" ), "x" AS ( SELECT "store_sales"."ss_quantity" AS "quantity", "store_sales"."ss_list_price" AS "list_price" FROM "store_sales" AS "store_sales" - JOIN "d1" AS "date_dim" + JOIN "date_dim_2" AS "date_dim" ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" UNION ALL SELECT @@ -2238,14 +2244,10 @@ WITH "item_2" AS ( SELECT AVG("x"."quantity" * "x"."list_price") AS "average_sales" FROM "x" AS "x" -), "date_dim_2" AS ( +), "_u_1" AS ( SELECT - "date_dim"."d_date_sk" AS "d_date_sk", - "date_dim"."d_year" AS "d_year", - "date_dim"."d_moy" AS "d_moy" - FROM "date_dim" AS "date_dim" - WHERE - "date_dim"."d_moy" = 11 AND "date_dim"."d_year" = 2001 + "avg_sales"."average_sales" AS "average_sales" + FROM "avg_sales" ), "_u_0" AS ( SELECT "item"."i_item_sk" AS "ss_item_sk" @@ -2256,10 +2258,14 @@ WITH "item_2" AS ( AND "item"."i_class_id" = "_q_0"."class_id" GROUP BY "item"."i_item_sk" -), "_u_1" AS ( +), "date_dim_3" AS ( SELECT - "avg_sales"."average_sales" AS "average_sales" - FROM "avg_sales" + "date_dim"."d_date_sk" AS "d_date_sk", + "date_dim"."d_year" AS "d_year", + "date_dim"."d_moy" AS "d_moy" + FROM "date_dim" AS "date_dim" + WHERE + "date_dim"."d_moy" = 11 AND "date_dim"."d_year" = 2001 ), "cte_9" AS ( SELECT 'store' AS "channel", @@ -2269,13 +2275,13 @@ WITH "item_2" AS ( SUM("store_sales"."ss_quantity" * "store_sales"."ss_list_price") AS "sales", COUNT(*) AS "number_sales" FROM "store_sales" AS "store_sales" - JOIN "item_2" AS "item" - ON "store_sales"."ss_item_sk" = "item"."i_item_sk" - JOIN "date_dim_2" AS "date_dim" - ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" + CROSS JOIN "_u_1" AS "_u_1" LEFT JOIN "_u_0" AS "_u_0" ON "store_sales"."ss_item_sk" = "_u_0"."ss_item_sk" - CROSS JOIN "_u_1" AS "_u_1" + JOIN "date_dim_3" AS "date_dim" + ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" + JOIN "item_2" AS "item" + ON "store_sales"."ss_item_sk" = "item"."i_item_sk" WHERE NOT "_u_0"."ss_item_sk" IS NULL GROUP BY @@ -2293,13 +2299,13 @@ WITH "item_2" AS ( SUM("catalog_sales"."cs_quantity" * "catalog_sales"."cs_list_price") AS "sales", COUNT(*) AS "number_sales" FROM "catalog_sales" AS "catalog_sales" - JOIN "item_2" AS "item" - ON "catalog_sales"."cs_item_sk" = "item"."i_item_sk" - JOIN "date_dim_2" AS "date_dim" - ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" + CROSS JOIN "_u_1" AS "_u_3" LEFT JOIN "_u_0" AS "_u_2" ON "catalog_sales"."cs_item_sk" = "_u_2"."ss_item_sk" - CROSS JOIN "_u_1" AS "_u_3" + JOIN "date_dim_3" AS "date_dim" + ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" + JOIN "item_2" AS "item" + ON "catalog_sales"."cs_item_sk" = "item"."i_item_sk" WHERE NOT "_u_2"."ss_item_sk" IS NULL GROUP BY @@ -2317,13 +2323,13 @@ WITH "item_2" AS ( SUM("web_sales"."ws_quantity" * "web_sales"."ws_list_price") AS "sales", COUNT(*) AS "number_sales" FROM "web_sales" AS "web_sales" - JOIN "item_2" AS "item" - ON "web_sales"."ws_item_sk" = "item"."i_item_sk" - JOIN "date_dim_2" AS "date_dim" - ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" + CROSS JOIN "_u_1" AS "_u_5" LEFT JOIN "_u_0" AS "_u_4" ON "web_sales"."ws_item_sk" = "_u_4"."ss_item_sk" - CROSS JOIN "_u_1" AS "_u_5" + JOIN "date_dim_3" AS "date_dim" + ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" + JOIN "item_2" AS "item" + ON "web_sales"."ws_item_sk" = "item"."i_item_sk" WHERE NOT "_u_4"."ss_item_sk" IS NULL GROUP BY @@ -2421,13 +2427,13 @@ JOIN "customer_address" AS "customer_address" ON "catalog_sales"."cs_sales_price" > 500 OR "customer_address"."ca_state" IN ('CA', 'WA', 'GA') OR SUBSTR("customer_address"."ca_zip", 1, 5) IN ('85669', '86197', '88274', '83405', '86475', '85392', '85460', '80348', '81792') -JOIN "customer" AS "customer" - ON "catalog_sales"."cs_bill_customer_sk" = "customer"."c_customer_sk" - AND "customer"."c_current_addr_sk" = "customer_address"."ca_address_sk" JOIN "date_dim" AS "date_dim" ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" AND "date_dim"."d_qoy" = 1 AND "date_dim"."d_year" = 1998 +JOIN "customer" AS "customer" + ON "catalog_sales"."cs_bill_customer_sk" = "customer"."c_customer_sk" + AND "customer"."c_current_addr_sk" = "customer_address"."ca_address_sk" GROUP BY "customer_address"."ca_zip" ORDER BY @@ -2484,32 +2490,32 @@ WITH "_u_0" AS ( "cr1"."cr_order_number" ) SELECT - COUNT(DISTINCT "catalog_sales"."cs_order_number") AS "order count", - SUM("catalog_sales"."cs_ext_ship_cost") AS "total shipping cost", - SUM("catalog_sales"."cs_net_profit") AS "total net profit" -FROM "catalog_sales" AS "catalog_sales" + COUNT(DISTINCT "cs1"."cs_order_number") AS "order count", + SUM("cs1"."cs_ext_ship_cost") AS "total shipping cost", + SUM("cs1"."cs_net_profit") AS "total net profit" +FROM "catalog_sales" AS "cs1" +LEFT JOIN "_u_0" AS "_u_0" + ON "cs1"."cs_order_number" = "_u_0"."_u_1" +LEFT JOIN "_u_3" AS "_u_3" + ON "cs1"."cs_order_number" = "_u_3"."_u_4" +JOIN "call_center" AS "call_center" + ON "call_center"."cc_county" IN ('Williamson County', 'Williamson County', 'Williamson County', 'Williamson County', 'Williamson County') + AND "cs1"."cs_call_center_sk" = "call_center"."cc_call_center_sk" +JOIN "customer_address" AS "customer_address" + ON "cs1"."cs_ship_addr_sk" = "customer_address"."ca_address_sk" + AND "customer_address"."ca_state" = 'IA' JOIN "date_dim" AS "date_dim" - ON "catalog_sales"."cs_ship_date_sk" = "date_dim"."d_date_sk" + ON "cs1"."cs_ship_date_sk" = "date_dim"."d_date_sk" AND "date_dim"."d_date" >= '2002-3-01' AND CAST("date_dim"."d_date" AS DATE) <= ( CAST('2002-3-01' AS DATE) + INTERVAL '60' day ) -JOIN "customer_address" AS "customer_address" - ON "catalog_sales"."cs_ship_addr_sk" = "customer_address"."ca_address_sk" - AND "customer_address"."ca_state" = 'IA' -JOIN "call_center" AS "call_center" - ON "call_center"."cc_county" IN ('Williamson County', 'Williamson County', 'Williamson County', 'Williamson County', 'Williamson County') - AND "catalog_sales"."cs_call_center_sk" = "call_center"."cc_call_center_sk" -LEFT JOIN "_u_0" AS "_u_0" - ON "catalog_sales"."cs_order_number" = "_u_0"."_u_1" -LEFT JOIN "_u_3" AS "_u_3" - ON "catalog_sales"."cs_order_number" = "_u_3"."_u_4" WHERE "_u_3"."_u_4" IS NULL - AND ARRAY_ANY("_u_0"."_u_2", "_x" -> "catalog_sales"."cs_warehouse_sk" <> "_x") + AND ARRAY_ANY("_u_0"."_u_2", "_x" -> "cs1"."cs_warehouse_sk" <> "_x") AND NOT "_u_0"."_u_1" IS NULL ORDER BY - COUNT(DISTINCT "catalog_sales"."cs_order_number") + COUNT(DISTINCT "cs1"."cs_order_number") LIMIT 100; -------------------------------------- @@ -2570,14 +2576,6 @@ ORDER BY i_item_id, i_item_desc, s_state LIMIT 100; -WITH "d3" AS ( - SELECT - "date_dim"."d_date_sk" AS "d_date_sk", - "date_dim"."d_quarter_name" AS "d_quarter_name" - FROM "date_dim" AS "date_dim" - WHERE - "date_dim"."d_quarter_name" IN ('1999Q1', '1999Q2', '1999Q3') -) SELECT "item"."i_item_id" AS "i_item_id", "item"."i_item_desc" AS "i_item_desc", @@ -2595,24 +2593,26 @@ SELECT STDDEV_SAMP("catalog_sales"."cs_quantity") / AVG("catalog_sales"."cs_quantity") AS "catalog_sales_quantitystdev", STDDEV_SAMP("catalog_sales"."cs_quantity") / AVG("catalog_sales"."cs_quantity") AS "catalog_sales_quantitycov" FROM "store_sales" AS "store_sales" -CROSS JOIN "d3" AS "d3" -JOIN "catalog_sales" AS "catalog_sales" - ON "catalog_sales"."cs_sold_date_sk" = "d3"."d_date_sk" +JOIN "date_dim" AS "d1" + ON "d1"."d_date_sk" = "store_sales"."ss_sold_date_sk" + AND "d1"."d_quarter_name" = '1999Q1' +JOIN "item" AS "item" + ON "item"."i_item_sk" = "store_sales"."ss_item_sk" +JOIN "store" AS "store" + ON "store"."s_store_sk" = "store_sales"."ss_store_sk" JOIN "store_returns" AS "store_returns" - ON "store_returns"."sr_customer_sk" = "catalog_sales"."cs_bill_customer_sk" - AND "store_returns"."sr_item_sk" = "catalog_sales"."cs_item_sk" - AND "store_sales"."ss_customer_sk" = "store_returns"."sr_customer_sk" + ON "store_sales"."ss_customer_sk" = "store_returns"."sr_customer_sk" AND "store_sales"."ss_item_sk" = "store_returns"."sr_item_sk" AND "store_sales"."ss_ticket_number" = "store_returns"."sr_ticket_number" -JOIN "date_dim" AS "date_dim" - ON "date_dim"."d_date_sk" = "store_sales"."ss_sold_date_sk" - AND "date_dim"."d_quarter_name" = '1999Q1' -JOIN "d3" AS "d2" - ON "store_returns"."sr_returned_date_sk" = "d2"."d_date_sk" -JOIN "store" AS "store" - ON "store"."s_store_sk" = "store_sales"."ss_store_sk" -JOIN "item" AS "item" - ON "item"."i_item_sk" = "store_sales"."ss_item_sk" +JOIN "catalog_sales" AS "catalog_sales" + ON "store_returns"."sr_customer_sk" = "catalog_sales"."cs_bill_customer_sk" + AND "store_returns"."sr_item_sk" = "catalog_sales"."cs_item_sk" +JOIN "date_dim" AS "d2" + ON "d2"."d_quarter_name" IN ('1999Q1', '1999Q2', '1999Q3') + AND "store_returns"."sr_returned_date_sk" = "d2"."d_date_sk" +JOIN "date_dim" AS "d3" + ON "catalog_sales"."cs_sold_date_sk" = "d3"."d_date_sk" + AND "d3"."d_quarter_name" IN ('1999Q1', '1999Q2', '1999Q3') GROUP BY "item"."i_item_id", "item"."i_item_desc", @@ -2674,25 +2674,25 @@ SELECT AVG(CAST("catalog_sales"."cs_sales_price" AS DECIMAL(12, 2))) AS "agg4", AVG(CAST("catalog_sales"."cs_net_profit" AS DECIMAL(12, 2))) AS "agg5", AVG(CAST("customer"."c_birth_year" AS DECIMAL(12, 2))) AS "agg6", - AVG(CAST("customer_demographics"."cd_dep_count" AS DECIMAL(12, 2))) AS "agg7" + AVG(CAST("cd1"."cd_dep_count" AS DECIMAL(12, 2))) AS "agg7" FROM "catalog_sales" AS "catalog_sales" -JOIN "customer_demographics" AS "customer_demographics" - ON "catalog_sales"."cs_bill_cdemo_sk" = "customer_demographics"."cd_demo_sk" - AND "customer_demographics"."cd_education_status" = 'Secondary' - AND "customer_demographics"."cd_gender" = 'F' +JOIN "customer_demographics" AS "cd1" + ON "catalog_sales"."cs_bill_cdemo_sk" = "cd1"."cd_demo_sk" + AND "cd1"."cd_education_status" = 'Secondary' + AND "cd1"."cd_gender" = 'F' JOIN "customer" AS "customer" ON "catalog_sales"."cs_bill_customer_sk" = "customer"."c_customer_sk" AND "customer"."c_birth_month" IN (8, 4, 2, 5, 11, 9) -JOIN "customer_demographics" AS "customer_demographics_2" - ON "customer"."c_current_cdemo_sk" = "customer_demographics_2"."cd_demo_sk" -JOIN "customer_address" AS "customer_address" - ON "customer"."c_current_addr_sk" = "customer_address"."ca_address_sk" - AND "customer_address"."ca_state" IN ('KS', 'IA', 'AL', 'UT', 'VA', 'NC', 'TX') JOIN "date_dim" AS "date_dim" ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" AND "date_dim"."d_year" = 2001 JOIN "item" AS "item" ON "catalog_sales"."cs_item_sk" = "item"."i_item_sk" +JOIN "customer_demographics" AS "cd2" + ON "customer"."c_current_cdemo_sk" = "cd2"."cd_demo_sk" +JOIN "customer_address" AS "customer_address" + ON "customer"."c_current_addr_sk" = "customer_address"."ca_address_sk" + AND "customer_address"."ca_state" IN ('KS', 'IA', 'AL', 'UT', 'VA', 'NC', 'TX') GROUP BY ROLLUP ( "item"."i_item_id", @@ -2707,6 +2707,72 @@ ORDER BY "i_item_id" LIMIT 100; +-------------------------------------- +-- TPC-DS 19 +-------------------------------------- +SELECT i_brand_id brand_id, + i_brand brand, + i_manufact_id, + i_manufact, + Sum(ss_ext_sales_price) ext_price +FROM date_dim, + store_sales, + item, + customer, + customer_address, + store +WHERE d_date_sk = ss_sold_date_sk + AND ss_item_sk = i_item_sk + AND i_manager_id = 38 + AND d_moy = 12 + AND d_year = 1998 + AND ss_customer_sk = c_customer_sk + AND c_current_addr_sk = ca_address_sk + AND Substr(ca_zip, 1, 5) <> Substr(s_zip, 1, 5) + AND ss_store_sk = s_store_sk +GROUP BY i_brand, + i_brand_id, + i_manufact_id, + i_manufact +ORDER BY ext_price DESC, + i_brand, + i_brand_id, + i_manufact_id, + i_manufact +LIMIT 100; +SELECT + "item"."i_brand_id" AS "brand_id", + "item"."i_brand" AS "brand", + "item"."i_manufact_id" AS "i_manufact_id", + "item"."i_manufact" AS "i_manufact", + SUM("store_sales"."ss_ext_sales_price") AS "ext_price" +FROM "date_dim" AS "date_dim" +JOIN "store_sales" AS "store_sales" + ON "date_dim"."d_date_sk" = "store_sales"."ss_sold_date_sk" +JOIN "item" AS "item" + ON "item"."i_manager_id" = 38 AND "store_sales"."ss_item_sk" = "item"."i_item_sk" +JOIN "store" AS "store" + ON "store_sales"."ss_store_sk" = "store"."s_store_sk" +JOIN "customer_address" AS "customer_address" + ON SUBSTR("customer_address"."ca_zip", 1, 5) <> SUBSTR("store"."s_zip", 1, 5) +JOIN "customer" AS "customer" + ON "customer"."c_current_addr_sk" = "customer_address"."ca_address_sk" + AND "store_sales"."ss_customer_sk" = "customer"."c_customer_sk" +WHERE + "date_dim"."d_moy" = 12 AND "date_dim"."d_year" = 1998 +GROUP BY + "item"."i_brand", + "item"."i_brand_id", + "item"."i_manufact_id", + "item"."i_manufact" +ORDER BY + "ext_price" DESC, + "item"."i_brand", + "item"."i_brand_id", + "i_manufact_id", + "i_manufact" +LIMIT 100; + -------------------------------------- -- TPC-DS 20 -------------------------------------- @@ -2748,13 +2814,13 @@ SELECT SUM("catalog_sales"."cs_ext_sales_price") AS "itemrevenue", SUM("catalog_sales"."cs_ext_sales_price") * 100 / SUM(SUM("catalog_sales"."cs_ext_sales_price")) OVER (PARTITION BY "item"."i_class") AS "revenueratio" FROM "catalog_sales" AS "catalog_sales" -JOIN "item" AS "item" - ON "catalog_sales"."cs_item_sk" = "item"."i_item_sk" - AND "item"."i_category" IN ('Children', 'Women', 'Electronics') JOIN "date_dim" AS "date_dim" ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" AND CAST("date_dim"."d_date" AS DATE) <= CAST('2001-03-05' AS DATE) AND CAST("date_dim"."d_date" AS DATE) >= CAST('2001-02-03' AS DATE) +JOIN "item" AS "item" + ON "catalog_sales"."cs_item_sk" = "item"."i_item_sk" + AND "item"."i_category" IN ('Children', 'Women', 'Electronics') GROUP BY "item"."i_item_id", "item"."i_item_desc", @@ -2828,16 +2894,16 @@ WITH "x" AS ( END ) AS "inv_after" FROM "inventory" AS "inventory" - JOIN "warehouse" AS "warehouse" - ON "inventory"."inv_warehouse_sk" = "warehouse"."w_warehouse_sk" - JOIN "item" AS "item" - ON "item"."i_current_price" <= 1.49 - AND "item"."i_current_price" >= 0.99 - AND "item"."i_item_sk" = "inventory"."inv_item_sk" JOIN "date_dim" AS "date_dim" ON "inventory"."inv_date_sk" = "date_dim"."d_date_sk" AND CAST("date_dim"."d_date" AS DATE) <= CAST('2000-06-12' AS DATE) AND CAST("date_dim"."d_date" AS DATE) >= CAST('2000-04-13' AS DATE) + JOIN "item" AS "item" + ON "item"."i_current_price" <= 1.49 + AND "item"."i_current_price" >= 0.99 + AND "item"."i_item_sk" = "inventory"."inv_item_sk" + JOIN "warehouse" AS "warehouse" + ON "inventory"."inv_warehouse_sk" = "warehouse"."w_warehouse_sk" GROUP BY "warehouse"."w_warehouse_name", "item"."i_item_id" @@ -3008,21 +3074,13 @@ WITH "frequent_ss_items" AS ( SELECT "customer"."c_customer_sk" AS "c_customer_sk" FROM "store_sales" AS "store_sales" + CROSS JOIN "max_store_sales" JOIN "customer_2" AS "customer" ON "store_sales"."ss_customer_sk" = "customer"."c_customer_sk" - CROSS JOIN "max_store_sales" GROUP BY "customer"."c_customer_sk" HAVING SUM("store_sales"."ss_quantity" * "store_sales"."ss_sales_price") > 0.95 * MAX("max_store_sales"."tpcds_cmax") -), "date_dim_4" AS ( - SELECT - "date_dim"."d_date_sk" AS "d_date_sk", - "date_dim"."d_year" AS "d_year", - "date_dim"."d_moy" AS "d_moy" - FROM "date_dim" AS "date_dim" - WHERE - "date_dim"."d_moy" = 6 AND "date_dim"."d_year" = 1998 ), "_u_1" AS ( SELECT "frequent_ss_items"."item_sk" AS "item_sk" @@ -3035,28 +3093,36 @@ WITH "frequent_ss_items" AS ( FROM "best_ss_customer" GROUP BY "best_ss_customer"."c_customer_sk" +), "date_dim_4" AS ( + SELECT + "date_dim"."d_date_sk" AS "d_date_sk", + "date_dim"."d_year" AS "d_year", + "date_dim"."d_moy" AS "d_moy" + FROM "date_dim" AS "date_dim" + WHERE + "date_dim"."d_moy" = 6 AND "date_dim"."d_year" = 1998 ), "_q_1" AS ( SELECT "catalog_sales"."cs_quantity" * "catalog_sales"."cs_list_price" AS "sales" FROM "catalog_sales" AS "catalog_sales" - JOIN "date_dim_4" AS "date_dim" - ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" LEFT JOIN "_u_1" AS "_u_1" ON "catalog_sales"."cs_item_sk" = "_u_1"."item_sk" LEFT JOIN "_u_2" AS "_u_2" ON "catalog_sales"."cs_bill_customer_sk" = "_u_2"."c_customer_sk" + JOIN "date_dim_4" AS "date_dim" + ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" WHERE NOT "_u_1"."item_sk" IS NULL AND NOT "_u_2"."c_customer_sk" IS NULL UNION ALL SELECT "web_sales"."ws_quantity" * "web_sales"."ws_list_price" AS "sales" FROM "web_sales" AS "web_sales" - JOIN "date_dim_4" AS "date_dim" - ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" LEFT JOIN "_u_1" AS "_u_3" ON "web_sales"."ws_item_sk" = "_u_3"."item_sk" LEFT JOIN "_u_2" AS "_u_4" ON "web_sales"."ws_bill_customer_sk" = "_u_4"."c_customer_sk" + JOIN "date_dim_4" AS "date_dim" + ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" WHERE NOT "_u_3"."item_sk" IS NULL AND NOT "_u_4"."c_customer_sk" IS NULL ) @@ -3123,18 +3189,18 @@ WITH "ssales" AS ( "item"."i_color" AS "i_color", SUM("store_sales"."ss_net_profit") AS "netpaid" FROM "store_sales" AS "store_sales" + JOIN "item" AS "item" + ON "store_sales"."ss_item_sk" = "item"."i_item_sk" + JOIN "store" AS "store" + ON "store"."s_market_id" = 6 AND "store_sales"."ss_store_sk" = "store"."s_store_sk" JOIN "store_returns" AS "store_returns" ON "store_sales"."ss_item_sk" = "store_returns"."sr_item_sk" AND "store_sales"."ss_ticket_number" = "store_returns"."sr_ticket_number" - JOIN "store" AS "store" - ON "store"."s_market_id" = 6 AND "store_sales"."ss_store_sk" = "store"."s_store_sk" - JOIN "item" AS "item" - ON "store_sales"."ss_item_sk" = "item"."i_item_sk" - JOIN "customer" AS "customer" - ON "store_sales"."ss_customer_sk" = "customer"."c_customer_sk" JOIN "customer_address" AS "customer_address" + ON "store"."s_zip" = "customer_address"."ca_zip" + JOIN "customer" AS "customer" ON "customer"."c_birth_country" = UPPER("customer_address"."ca_country") - AND "store"."s_zip" = "customer_address"."ca_zip" + AND "store_sales"."ss_customer_sk" = "customer"."c_customer_sk" GROUP BY "customer"."c_last_name", "customer"."c_first_name", @@ -3210,15 +3276,6 @@ ORDER BY i_item_id, s_store_id, s_store_name LIMIT 100; -WITH "d3" AS ( - SELECT - "date_dim"."d_date_sk" AS "d_date_sk", - "date_dim"."d_year" AS "d_year", - "date_dim"."d_moy" AS "d_moy" - FROM "date_dim" AS "date_dim" - WHERE - "date_dim"."d_moy" <= 10 AND "date_dim"."d_moy" >= 4 AND "date_dim"."d_year" = 2001 -) SELECT "item"."i_item_id" AS "i_item_id", "item"."i_item_desc" AS "i_item_desc", @@ -3228,25 +3285,31 @@ SELECT MAX("store_returns"."sr_net_loss") AS "store_returns_loss", MAX("catalog_sales"."cs_net_profit") AS "catalog_sales_profit" FROM "store_sales" AS "store_sales" -CROSS JOIN "d3" AS "d3" -JOIN "catalog_sales" AS "catalog_sales" - ON "catalog_sales"."cs_sold_date_sk" = "d3"."d_date_sk" +JOIN "date_dim" AS "d1" + ON "d1"."d_date_sk" = "store_sales"."ss_sold_date_sk" + AND "d1"."d_moy" = 4 + AND "d1"."d_year" = 2001 +JOIN "item" AS "item" + ON "item"."i_item_sk" = "store_sales"."ss_item_sk" +JOIN "store" AS "store" + ON "store"."s_store_sk" = "store_sales"."ss_store_sk" JOIN "store_returns" AS "store_returns" - ON "store_returns"."sr_customer_sk" = "catalog_sales"."cs_bill_customer_sk" - AND "store_returns"."sr_item_sk" = "catalog_sales"."cs_item_sk" - AND "store_sales"."ss_customer_sk" = "store_returns"."sr_customer_sk" + ON "store_sales"."ss_customer_sk" = "store_returns"."sr_customer_sk" AND "store_sales"."ss_item_sk" = "store_returns"."sr_item_sk" AND "store_sales"."ss_ticket_number" = "store_returns"."sr_ticket_number" -JOIN "date_dim" AS "date_dim" - ON "date_dim"."d_date_sk" = "store_sales"."ss_sold_date_sk" - AND "date_dim"."d_moy" = 4 - AND "date_dim"."d_year" = 2001 -JOIN "d3" AS "d2" - ON "store_returns"."sr_returned_date_sk" = "d2"."d_date_sk" -JOIN "store" AS "store" - ON "store"."s_store_sk" = "store_sales"."ss_store_sk" -JOIN "item" AS "item" - ON "item"."i_item_sk" = "store_sales"."ss_item_sk" +JOIN "catalog_sales" AS "catalog_sales" + ON "store_returns"."sr_customer_sk" = "catalog_sales"."cs_bill_customer_sk" + AND "store_returns"."sr_item_sk" = "catalog_sales"."cs_item_sk" +JOIN "date_dim" AS "d2" + ON "d2"."d_moy" <= 10 + AND "d2"."d_moy" >= 4 + AND "d2"."d_year" = 2001 + AND "store_returns"."sr_returned_date_sk" = "d2"."d_date_sk" +JOIN "date_dim" AS "d3" + ON "catalog_sales"."cs_sold_date_sk" = "d3"."d_date_sk" + AND "d3"."d_moy" <= 10 + AND "d3"."d_moy" >= 4 + AND "d3"."d_year" = 2001 GROUP BY "item"."i_item_id", "item"."i_item_desc", @@ -3359,11 +3422,11 @@ JOIN "customer_demographics" AS "customer_demographics" JOIN "date_dim" AS "date_dim" ON "date_dim"."d_year" = 2000 AND "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" +JOIN "item" AS "item" + ON "store_sales"."ss_item_sk" = "item"."i_item_sk" JOIN "store" AS "store" ON "store"."s_state" IN ('TN', 'TN', 'TN', 'TN', 'TN', 'TN') AND "store_sales"."ss_store_sk" = "store"."s_store_sk" -JOIN "item" AS "item" - ON "store_sales"."ss_item_sk" = "item"."i_item_sk" GROUP BY ROLLUP ( "item"."i_item_id", @@ -3590,29 +3653,29 @@ SELECT AVG("store_returns"."sr_return_quantity") AS "store_returns_quantity", AVG("catalog_sales"."cs_quantity") AS "catalog_sales_quantity" FROM "store_sales" AS "store_sales" -JOIN "date_dim" AS "date_dim" - ON "date_dim"."d_year" IN (1998, 1999, 2000) -JOIN "catalog_sales" AS "catalog_sales" - ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" +JOIN "date_dim" AS "d1" + ON "d1"."d_date_sk" = "store_sales"."ss_sold_date_sk" + AND "d1"."d_moy" = 4 + AND "d1"."d_year" = 1998 +JOIN "item" AS "item" + ON "item"."i_item_sk" = "store_sales"."ss_item_sk" +JOIN "store" AS "store" + ON "store"."s_store_sk" = "store_sales"."ss_store_sk" JOIN "store_returns" AS "store_returns" - ON "store_returns"."sr_customer_sk" = "catalog_sales"."cs_bill_customer_sk" - AND "store_returns"."sr_item_sk" = "catalog_sales"."cs_item_sk" - AND "store_sales"."ss_customer_sk" = "store_returns"."sr_customer_sk" + ON "store_sales"."ss_customer_sk" = "store_returns"."sr_customer_sk" AND "store_sales"."ss_item_sk" = "store_returns"."sr_item_sk" AND "store_sales"."ss_ticket_number" = "store_returns"."sr_ticket_number" -JOIN "date_dim" AS "date_dim_2" - ON "date_dim_2"."d_date_sk" = "store_sales"."ss_sold_date_sk" - AND "date_dim_2"."d_moy" = 4 - AND "date_dim_2"."d_year" = 1998 -JOIN "date_dim" AS "date_dim_3" - ON "date_dim_3"."d_moy" <= 7 - AND "date_dim_3"."d_moy" >= 4 - AND "date_dim_3"."d_year" = 1998 - AND "store_returns"."sr_returned_date_sk" = "date_dim_3"."d_date_sk" -JOIN "store" AS "store" - ON "store"."s_store_sk" = "store_sales"."ss_store_sk" -JOIN "item" AS "item" - ON "item"."i_item_sk" = "store_sales"."ss_item_sk" +JOIN "catalog_sales" AS "catalog_sales" + ON "store_returns"."sr_customer_sk" = "catalog_sales"."cs_bill_customer_sk" + AND "store_returns"."sr_item_sk" = "catalog_sales"."cs_item_sk" +JOIN "date_dim" AS "d2" + ON "d2"."d_moy" <= 7 + AND "d2"."d_moy" >= 4 + AND "d2"."d_year" = 1998 + AND "store_returns"."sr_returned_date_sk" = "d2"."d_date_sk" +JOIN "date_dim" AS "d3" + ON "catalog_sales"."cs_sold_date_sk" = "d3"."d_date_sk" + AND "d3"."d_year" IN (1998, 1999, 2000) GROUP BY "item"."i_item_id", "item"."i_item_desc", @@ -3682,11 +3745,11 @@ WITH "customer_total_return" AS ( "customer_address"."ca_state" AS "ctr_state", SUM("web_returns"."wr_return_amt") AS "ctr_total_return" FROM "web_returns" AS "web_returns" + JOIN "customer_address" AS "customer_address" + ON "web_returns"."wr_returning_addr_sk" = "customer_address"."ca_address_sk" JOIN "date_dim" AS "date_dim" ON "date_dim"."d_year" = 2000 AND "web_returns"."wr_returned_date_sk" = "date_dim"."d_date_sk" - JOIN "customer_address" AS "customer_address" - ON "web_returns"."wr_returning_addr_sk" = "customer_address"."ca_address_sk" GROUP BY "web_returns"."wr_returning_customer_sk", "customer_address"."ca_state" @@ -3713,13 +3776,13 @@ SELECT "customer"."c_last_review_date" AS "c_last_review_date", "ctr1"."ctr_total_return" AS "ctr_total_return" FROM "customer_total_return" AS "ctr1" +LEFT JOIN "_u_0" AS "_u_0" + ON "ctr1"."ctr_state" = "_u_0"."_u_1" JOIN "customer" AS "customer" ON "ctr1"."ctr_customer_sk" = "customer"."c_customer_sk" JOIN "customer_address" AS "customer_address" ON "customer_address"."ca_address_sk" = "customer"."c_current_addr_sk" AND "customer_address"."ca_state" = 'IN' -LEFT JOIN "_u_0" AS "_u_0" - ON "ctr1"."ctr_state" = "_u_0"."_u_1" WHERE "ctr1"."ctr_total_return" > "_u_0"."_col_0" ORDER BY @@ -3813,17 +3876,17 @@ WHERE ss1.d_qoy = 1 ELSE NULL END ORDER BY ss1.d_year; -WITH "date_dim_2" AS ( +WITH "customer_address_2" AS ( + SELECT + "customer_address"."ca_address_sk" AS "ca_address_sk", + "customer_address"."ca_county" AS "ca_county" + FROM "customer_address" AS "customer_address" +), "date_dim_2" AS ( SELECT "date_dim"."d_date_sk" AS "d_date_sk", "date_dim"."d_year" AS "d_year", "date_dim"."d_qoy" AS "d_qoy" FROM "date_dim" AS "date_dim" -), "customer_address_2" AS ( - SELECT - "customer_address"."ca_address_sk" AS "ca_address_sk", - "customer_address"."ca_county" AS "ca_county" - FROM "customer_address" AS "customer_address" ), "ss" AS ( SELECT "customer_address"."ca_county" AS "ca_county", @@ -3831,10 +3894,10 @@ WITH "date_dim_2" AS ( "date_dim"."d_year" AS "d_year", SUM("store_sales"."ss_ext_sales_price") AS "store_sales" FROM "store_sales" AS "store_sales" - JOIN "date_dim_2" AS "date_dim" - ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" JOIN "customer_address_2" AS "customer_address" ON "store_sales"."ss_addr_sk" = "customer_address"."ca_address_sk" + JOIN "date_dim_2" AS "date_dim" + ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" GROUP BY "customer_address"."ca_county", "date_dim"."d_qoy", @@ -3846,10 +3909,10 @@ WITH "date_dim_2" AS ( "date_dim"."d_year" AS "d_year", SUM("web_sales"."ws_ext_sales_price") AS "web_sales" FROM "web_sales" AS "web_sales" - JOIN "date_dim_2" AS "date_dim" - ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" JOIN "customer_address_2" AS "customer_address" ON "web_sales"."ws_bill_addr_sk" = "customer_address"."ca_address_sk" + JOIN "date_dim_2" AS "date_dim" + ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" GROUP BY "customer_address"."ca_county", "date_dim"."d_qoy", @@ -3865,13 +3928,12 @@ SELECT FROM "ss" AS "ss1" JOIN "ss" AS "ss2" ON "ss1"."ca_county" = "ss2"."ca_county" AND "ss2"."d_qoy" = 2 AND "ss2"."d_year" = 2001 -JOIN "ws" AS "ws2" - ON "ws2"."d_qoy" = 2 AND "ws2"."d_year" = 2001 JOIN "ws" AS "ws1" - ON "ss1"."ca_county" = "ws1"."ca_county" - AND "ws1"."ca_county" = "ws2"."ca_county" - AND "ws1"."d_qoy" = 1 - AND "ws1"."d_year" = 2001 + ON "ss1"."ca_county" = "ws1"."ca_county" AND "ws1"."d_qoy" = 1 AND "ws1"."d_year" = 2001 +JOIN "ws" AS "ws2" + ON "ws1"."ca_county" = "ws2"."ca_county" + AND "ws2"."d_qoy" = 2 + AND "ws2"."d_year" = 2001 AND CASE WHEN "ws1"."web_sales" > 0 THEN "ws2"."web_sales" / "ws1"."web_sales" @@ -3951,10 +4013,10 @@ WITH "catalog_sales_2" AS ( SELECT SUM("catalog_sales"."cs_ext_discount_amt") AS "excess discount amount" FROM "catalog_sales_2" AS "catalog_sales" -JOIN "item" AS "item" - ON "item"."i_item_sk" = "catalog_sales"."cs_item_sk" AND "item"."i_manufact_id" = 610 JOIN "date_dim_2" AS "date_dim" ON "date_dim"."d_date_sk" = "catalog_sales"."cs_sold_date_sk" +JOIN "item" AS "item" + ON "item"."i_item_sk" = "catalog_sales"."cs_item_sk" AND "item"."i_manufact_id" = 610 LEFT JOIN "_u_0" AS "_u_0" ON "_u_0"."_u_1" = "item"."i_item_sk" WHERE @@ -4028,7 +4090,14 @@ FROM (SELECT * GROUP BY i_manufact_id ORDER BY total_sales LIMIT 100; -WITH "date_dim_2" AS ( +WITH "customer_address_2" AS ( + SELECT + "customer_address"."ca_address_sk" AS "ca_address_sk", + "customer_address"."ca_gmt_offset" AS "ca_gmt_offset" + FROM "customer_address" AS "customer_address" + WHERE + "customer_address"."ca_gmt_offset" = -5 +), "date_dim_2" AS ( SELECT "date_dim"."d_date_sk" AS "d_date_sk", "date_dim"."d_year" AS "d_year", @@ -4036,13 +4105,6 @@ WITH "date_dim_2" AS ( FROM "date_dim" AS "date_dim" WHERE "date_dim"."d_moy" = 3 AND "date_dim"."d_year" = 1999 -), "customer_address_2" AS ( - SELECT - "customer_address"."ca_address_sk" AS "ca_address_sk", - "customer_address"."ca_gmt_offset" AS "ca_gmt_offset" - FROM "customer_address" AS "customer_address" - WHERE - "customer_address"."ca_gmt_offset" = -5 ), "item_2" AS ( SELECT "item"."i_item_sk" AS "i_item_sk", @@ -4061,10 +4123,10 @@ WITH "date_dim_2" AS ( "item"."i_manufact_id" AS "i_manufact_id", SUM("store_sales"."ss_ext_sales_price") AS "total_sales" FROM "store_sales" AS "store_sales" - JOIN "date_dim_2" AS "date_dim" - ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" JOIN "customer_address_2" AS "customer_address" ON "store_sales"."ss_addr_sk" = "customer_address"."ca_address_sk" + JOIN "date_dim_2" AS "date_dim" + ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" JOIN "item_2" AS "item" ON "store_sales"."ss_item_sk" = "item"."i_item_sk" LEFT JOIN "_u_0" AS "_u_0" @@ -4078,10 +4140,10 @@ WITH "date_dim_2" AS ( "item"."i_manufact_id" AS "i_manufact_id", SUM("catalog_sales"."cs_ext_sales_price") AS "total_sales" FROM "catalog_sales" AS "catalog_sales" - JOIN "date_dim_2" AS "date_dim" - ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" JOIN "customer_address_2" AS "customer_address" ON "catalog_sales"."cs_bill_addr_sk" = "customer_address"."ca_address_sk" + JOIN "date_dim_2" AS "date_dim" + ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" JOIN "item_2" AS "item" ON "catalog_sales"."cs_item_sk" = "item"."i_item_sk" LEFT JOIN "_u_0" AS "_u_1" @@ -4095,10 +4157,10 @@ WITH "date_dim_2" AS ( "item"."i_manufact_id" AS "i_manufact_id", SUM("web_sales"."ws_ext_sales_price") AS "total_sales" FROM "web_sales" AS "web_sales" - JOIN "date_dim_2" AS "date_dim" - ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" JOIN "customer_address_2" AS "customer_address" ON "web_sales"."ws_bill_addr_sk" = "customer_address"."ca_address_sk" + JOIN "date_dim_2" AS "date_dim" + ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" JOIN "item_2" AS "item" ON "web_sales"."ws_item_sk" = "item"."i_item_sk" LEFT JOIN "_u_0" AS "_u_2" @@ -4203,9 +4265,6 @@ WITH "dn" AS ( "date_dim"."d_dom" <= 3 AND "date_dim"."d_dom" >= 1 ) ) - JOIN "store" AS "store" - ON "store"."s_county" IN ('Williamson County', 'Williamson County', 'Williamson County', 'Williamson County', 'Williamson County', 'Williamson County', 'Williamson County', 'Williamson County') - AND "store_sales"."ss_store_sk" = "store"."s_store_sk" JOIN "household_demographics" AS "household_demographics" ON ( "household_demographics"."hd_buy_potential" = '>10000' @@ -4218,6 +4277,9 @@ WITH "dn" AS ( THEN "household_demographics"."hd_dep_count" / "household_demographics"."hd_vehicle_count" ELSE NULL END > 1.2 + JOIN "store" AS "store" + ON "store"."s_county" IN ('Williamson County', 'Williamson County', 'Williamson County', 'Williamson County', 'Williamson County', 'Williamson County', 'Williamson County', 'Williamson County') + AND "store_sales"."ss_store_sk" = "store"."s_store_sk" GROUP BY "store_sales"."ss_ticket_number", "store_sales"."ss_customer_sk" @@ -4359,7 +4421,7 @@ WITH "date_dim_2" AS ( "catalog_sales"."cs_ship_customer_sk" ) SELECT - "customer_address"."ca_state" AS "ca_state", + "ca"."ca_state" AS "ca_state", "customer_demographics"."cd_gender" AS "cd_gender", "customer_demographics"."cd_marital_status" AS "cd_marital_status", "customer_demographics"."cd_dep_count" AS "cd_dep_count", @@ -4377,24 +4439,24 @@ SELECT STDDEV_SAMP("customer_demographics"."cd_dep_college_count") AS "_col_15", AVG("customer_demographics"."cd_dep_college_count") AS "_col_16", MAX("customer_demographics"."cd_dep_college_count") AS "_col_17" -FROM "customer" AS "customer" -JOIN "customer_address" AS "customer_address" - ON "customer"."c_current_addr_sk" = "customer_address"."ca_address_sk" -JOIN "customer_demographics" AS "customer_demographics" - ON "customer_demographics"."cd_demo_sk" = "customer"."c_current_cdemo_sk" +FROM "customer" AS "c" LEFT JOIN "_u_0" AS "_u_0" - ON "customer"."c_customer_sk" = "_u_0"."_u_1" + ON "c"."c_customer_sk" = "_u_0"."_u_1" LEFT JOIN "_u_2" AS "_u_2" - ON "customer"."c_customer_sk" = "_u_2"."_u_3" + ON "c"."c_customer_sk" = "_u_2"."_u_3" LEFT JOIN "_u_4" AS "_u_4" - ON "customer"."c_customer_sk" = "_u_4"."_u_5" + ON "c"."c_customer_sk" = "_u_4"."_u_5" +JOIN "customer_address" AS "ca" + ON "c"."c_current_addr_sk" = "ca"."ca_address_sk" +JOIN "customer_demographics" AS "customer_demographics" + ON "customer_demographics"."cd_demo_sk" = "c"."c_current_cdemo_sk" WHERE NOT "_u_0"."_u_1" IS NULL AND ( NOT "_u_2"."_u_3" IS NULL OR NOT "_u_4"."_u_5" IS NULL ) GROUP BY - "customer_address"."ca_state", + "ca"."ca_state", "customer_demographics"."cd_gender", "customer_demographics"."cd_marital_status", "customer_demographics"."cd_dep_count", @@ -4449,9 +4511,8 @@ SELECT GROUPING("item"."i_category") + GROUPING("item"."i_class") AS "lochierarchy", RANK() OVER (PARTITION BY GROUPING("item"."i_category") + GROUPING("item"."i_class"), CASE WHEN GROUPING("item"."i_class") = 0 THEN "item"."i_category" END ORDER BY SUM("store_sales"."ss_net_profit") / SUM("store_sales"."ss_ext_sales_price")) AS "rank_within_parent" FROM "store_sales" AS "store_sales" -JOIN "date_dim" AS "date_dim" - ON "date_dim"."d_date_sk" = "store_sales"."ss_sold_date_sk" - AND "date_dim"."d_year" = 2000 +JOIN "date_dim" AS "d1" + ON "d1"."d_date_sk" = "store_sales"."ss_sold_date_sk" AND "d1"."d_year" = 2000 JOIN "item" AS "item" ON "item"."i_item_sk" = "store_sales"."ss_item_sk" JOIN "store" AS "store" @@ -4497,6 +4558,8 @@ SELECT "item"."i_item_desc" AS "i_item_desc", "item"."i_current_price" AS "i_current_price" FROM "item" AS "item" +JOIN "catalog_sales" AS "catalog_sales" + ON "catalog_sales"."cs_item_sk" = "item"."i_item_sk" JOIN "inventory" AS "inventory" ON "inventory"."inv_item_sk" = "item"."i_item_sk" AND "inventory"."inv_quantity_on_hand" <= 500 @@ -4505,8 +4568,6 @@ JOIN "date_dim" AS "date_dim" ON "date_dim"."d_date_sk" = "inventory"."inv_date_sk" AND CAST("date_dim"."d_date" AS DATE) <= CAST('1999-05-05' AS DATE) AND CAST("date_dim"."d_date" AS DATE) >= CAST('1999-03-06' AS DATE) -JOIN "catalog_sales" AS "catalog_sales" - ON "catalog_sales"."cs_item_sk" = "item"."i_item_sk" WHERE "item"."i_current_price" <= 50 AND "item"."i_current_price" >= 20 @@ -4553,7 +4614,13 @@ FROM (SELECT DISTINCT c_last_name, AND web_sales.ws_bill_customer_sk = customer.c_customer_sk AND d_month_seq BETWEEN 1188 AND 1188 + 11) hot_cust LIMIT 100; -WITH "date_dim_2" AS ( +WITH "customer_2" AS ( + SELECT + "customer"."c_customer_sk" AS "c_customer_sk", + "customer"."c_first_name" AS "c_first_name", + "customer"."c_last_name" AS "c_last_name" + FROM "customer" AS "customer" +), "date_dim_2" AS ( SELECT "date_dim"."d_date_sk" AS "d_date_sk", "date_dim"."d_date" AS "d_date", @@ -4561,42 +4628,36 @@ WITH "date_dim_2" AS ( FROM "date_dim" AS "date_dim" WHERE "date_dim"."d_month_seq" <= 1199 AND "date_dim"."d_month_seq" >= 1188 -), "customer_2" AS ( - SELECT - "customer"."c_customer_sk" AS "c_customer_sk", - "customer"."c_first_name" AS "c_first_name", - "customer"."c_last_name" AS "c_last_name" - FROM "customer" AS "customer" ), "cte" AS ( SELECT DISTINCT "customer"."c_last_name" AS "c_last_name", "customer"."c_first_name" AS "c_first_name", "date_dim"."d_date" AS "d_date" FROM "store_sales" AS "store_sales" - JOIN "date_dim_2" AS "date_dim" - ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" JOIN "customer_2" AS "customer" ON "store_sales"."ss_customer_sk" = "customer"."c_customer_sk" + JOIN "date_dim_2" AS "date_dim" + ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" ), "cte_2" AS ( SELECT DISTINCT "customer"."c_last_name" AS "c_last_name", "customer"."c_first_name" AS "c_first_name", "date_dim"."d_date" AS "d_date" FROM "catalog_sales" AS "catalog_sales" - JOIN "date_dim_2" AS "date_dim" - ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" JOIN "customer_2" AS "customer" ON "catalog_sales"."cs_bill_customer_sk" = "customer"."c_customer_sk" + JOIN "date_dim_2" AS "date_dim" + ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" ), "cte_3" AS ( SELECT DISTINCT "customer"."c_last_name" AS "c_last_name", "customer"."c_first_name" AS "c_first_name", "date_dim"."d_date" AS "d_date" FROM "web_sales" AS "web_sales" - JOIN "date_dim_2" AS "date_dim" - ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" JOIN "customer_2" AS "customer" ON "web_sales"."ws_bill_customer_sk" = "customer"."c_customer_sk" + JOIN "date_dim_2" AS "date_dim" + ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" ), "cte_4" AS ( SELECT "cte_2"."c_last_name" AS "c_last_name", @@ -4695,12 +4756,12 @@ WITH "foo" AS ( STDDEV_SAMP("inventory"."inv_quantity_on_hand") AS "stdev", AVG("inventory"."inv_quantity_on_hand") AS "mean" FROM "inventory" AS "inventory" + JOIN "date_dim" AS "date_dim" + ON "date_dim"."d_year" = 2002 AND "inventory"."inv_date_sk" = "date_dim"."d_date_sk" JOIN "item" AS "item" ON "inventory"."inv_item_sk" = "item"."i_item_sk" JOIN "warehouse" AS "warehouse" ON "inventory"."inv_warehouse_sk" = "warehouse"."w_warehouse_sk" - JOIN "date_dim" AS "date_dim" - ON "date_dim"."d_year" = 2002 AND "inventory"."inv_date_sk" = "date_dim"."d_date_sk" GROUP BY "warehouse"."w_warehouse_name", "warehouse"."w_warehouse_sk", @@ -4803,16 +4864,16 @@ FROM "catalog_sales" AS "catalog_sales" LEFT JOIN "catalog_returns" AS "catalog_returns" ON "catalog_sales"."cs_item_sk" = "catalog_returns"."cr_item_sk" AND "catalog_sales"."cs_order_number" = "catalog_returns"."cr_order_number" -JOIN "warehouse" AS "warehouse" - ON "catalog_sales"."cs_warehouse_sk" = "warehouse"."w_warehouse_sk" -JOIN "item" AS "item" - ON "item"."i_current_price" <= 1.49 - AND "item"."i_current_price" >= 0.99 - AND "item"."i_item_sk" = "catalog_sales"."cs_item_sk" JOIN "date_dim" AS "date_dim" ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" AND CAST("date_dim"."d_date" AS DATE) <= CAST('2002-07-01' AS DATE) AND CAST("date_dim"."d_date" AS DATE) >= CAST('2002-05-02' AS DATE) +JOIN "item" AS "item" + ON "item"."i_current_price" <= 1.49 + AND "item"."i_current_price" >= 0.99 + AND "item"."i_item_sk" = "catalog_sales"."cs_item_sk" +JOIN "warehouse" AS "warehouse" + ON "catalog_sales"."cs_warehouse_sk" = "warehouse"."w_warehouse_sk" GROUP BY "warehouse"."w_state", "item"."i_item_id" @@ -4890,7 +4951,7 @@ WHERE i_manufact_id BETWEEN 765 AND 765 + 40 ORDER BY i_product_name LIMIT 100; SELECT DISTINCT - "i1"."i_product_name" AS "_col_0" + "i1"."i_product_name" AS "i_product_name" FROM "item" AS "i1" WHERE "i1"."i_manufact_id" <= 805 @@ -5035,24 +5096,24 @@ ORDER BY Sum(ss_ext_sales_price) DESC, item.i_category LIMIT 100; SELECT - "date_dim"."d_year" AS "d_year", + "dt"."d_year" AS "d_year", "item"."i_category_id" AS "i_category_id", "item"."i_category" AS "i_category", SUM("store_sales"."ss_ext_sales_price") AS "_col_3" -FROM "date_dim" AS "date_dim" +FROM "date_dim" AS "dt" JOIN "store_sales" AS "store_sales" - ON "date_dim"."d_date_sk" = "store_sales"."ss_sold_date_sk" + ON "dt"."d_date_sk" = "store_sales"."ss_sold_date_sk" JOIN "item" AS "item" ON "item"."i_manager_id" = 1 AND "store_sales"."ss_item_sk" = "item"."i_item_sk" WHERE - "date_dim"."d_moy" = 12 AND "date_dim"."d_year" = 2000 + "dt"."d_moy" = 12 AND "dt"."d_year" = 2000 GROUP BY - "date_dim"."d_year", + "dt"."d_year", "item"."i_category_id", "item"."i_category" ORDER BY SUM("store_sales"."ss_ext_sales_price") DESC, - "date_dim"."d_year", + "dt"."d_year", "item"."i_category_id", "item"."i_category" LIMIT 100; @@ -5278,11 +5339,6 @@ WITH "_u_0" AS ( "v2"."item_sk" AS "item_sk", RANK() OVER (ORDER BY "v2"."rank_col" DESC) AS "rnk" FROM "v2" AS "v2" -), "i1" AS ( - SELECT - "item"."i_item_sk" AS "i_item_sk", - "item"."i_product_name" AS "i_product_name" - FROM "item" AS "item" ) SELECT "v11"."rnk" AS "rnk", @@ -5291,9 +5347,9 @@ SELECT FROM "v11" AS "v11" JOIN "v21" AS "v21" ON "v11"."rnk" = "v21"."rnk" AND "v21"."rnk" < 11 -JOIN "i1" AS "i1" +JOIN "item" AS "i1" ON "i1"."i_item_sk" = "v11"."item_sk" -JOIN "i1" AS "i2" +JOIN "item" AS "i2" ON "i2"."i_item_sk" = "v21"."item_sk" WHERE "v11"."rnk" < 11 @@ -5347,8 +5403,6 @@ SELECT FROM "web_sales" AS "web_sales" JOIN "customer" AS "customer" ON "web_sales"."ws_bill_customer_sk" = "customer"."c_customer_sk" -JOIN "customer_address" AS "customer_address" - ON "customer"."c_current_addr_sk" = "customer_address"."ca_address_sk" JOIN "date_dim" AS "date_dim" ON "date_dim"."d_qoy" = 1 AND "date_dim"."d_year" = 2000 @@ -5357,6 +5411,8 @@ JOIN "item" AS "item" ON "web_sales"."ws_item_sk" = "item"."i_item_sk" LEFT JOIN "_u_0" AS "_u_0" ON "item"."i_item_id" = "_u_0"."i_item_id" +JOIN "customer_address" AS "customer_address" + ON "customer"."c_current_addr_sk" = "customer_address"."ca_address_sk" WHERE NOT "_u_0"."i_item_id" IS NULL OR SUBSTR("customer_address"."ca_zip", 1, 5) IN ('85669', '86197', '88274', '83405', '86475', '85392', '85460', '80348', '81792') @@ -5414,12 +5470,7 @@ ORDER BY c_last_name, bought_city, ss_ticket_number LIMIT 100; -WITH "customer_address_2" AS ( - SELECT - "customer_address"."ca_address_sk" AS "ca_address_sk", - "customer_address"."ca_city" AS "ca_city" - FROM "customer_address" AS "customer_address" -), "dn" AS ( +WITH "dn" AS ( SELECT "store_sales"."ss_ticket_number" AS "ss_ticket_number", "store_sales"."ss_customer_sk" AS "ss_customer_sk", @@ -5427,21 +5478,21 @@ WITH "customer_address_2" AS ( SUM("store_sales"."ss_coupon_amt") AS "amt", SUM("store_sales"."ss_net_profit") AS "profit" FROM "store_sales" AS "store_sales" + JOIN "customer_address" AS "customer_address" + ON "store_sales"."ss_addr_sk" = "customer_address"."ca_address_sk" JOIN "date_dim" AS "date_dim" ON "date_dim"."d_dow" IN (6, 0) AND "date_dim"."d_year" IN (2000, 2001, 2002) AND "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" - JOIN "store" AS "store" - ON "store"."s_city" IN ('Midway', 'Fairview', 'Fairview', 'Fairview', 'Fairview') - AND "store_sales"."ss_store_sk" = "store"."s_store_sk" JOIN "household_demographics" AS "household_demographics" ON ( "household_demographics"."hd_dep_count" = 6 OR "household_demographics"."hd_vehicle_count" = 0 ) AND "store_sales"."ss_hdemo_sk" = "household_demographics"."hd_demo_sk" - JOIN "customer_address_2" AS "customer_address" - ON "store_sales"."ss_addr_sk" = "customer_address"."ca_address_sk" + JOIN "store" AS "store" + ON "store"."s_city" IN ('Midway', 'Fairview', 'Fairview', 'Fairview', 'Fairview') + AND "store_sales"."ss_store_sk" = "store"."s_store_sk" GROUP BY "store_sales"."ss_ticket_number", "store_sales"."ss_customer_sk", @@ -5457,11 +5508,11 @@ SELECT "dn"."amt" AS "amt", "dn"."profit" AS "profit" FROM "dn" AS "dn" -JOIN "customer_address_2" AS "current_addr" - ON "current_addr"."ca_city" <> "dn"."bought_city" JOIN "customer" AS "customer" - ON "customer"."c_current_addr_sk" = "current_addr"."ca_address_sk" - AND "dn"."ss_customer_sk" = "customer"."c_customer_sk" + ON "dn"."ss_customer_sk" = "customer"."c_customer_sk" +JOIN "customer_address" AS "current_addr" + ON "current_addr"."ca_city" <> "dn"."bought_city" + AND "customer"."c_current_addr_sk" = "current_addr"."ca_address_sk" ORDER BY "c_last_name", "c_first_name", @@ -5655,30 +5706,6 @@ WHERE s_store_sk = ss_store_sk SELECT SUM("store_sales"."ss_quantity") AS "_col_0" FROM "store_sales" AS "store_sales" -JOIN "store" AS "store" - ON "store"."s_store_sk" = "store_sales"."ss_store_sk" -JOIN "customer_demographics" AS "customer_demographics" - ON ( - "customer_demographics"."cd_demo_sk" = "store_sales"."ss_cdemo_sk" - AND "customer_demographics"."cd_education_status" = '2 yr Degree' - AND "customer_demographics"."cd_marital_status" = 'D' - AND "store_sales"."ss_sales_price" <= 200.00 - AND "store_sales"."ss_sales_price" >= 150.00 - ) - OR ( - "customer_demographics"."cd_demo_sk" = "store_sales"."ss_cdemo_sk" - AND "customer_demographics"."cd_education_status" = 'Advanced Degree' - AND "customer_demographics"."cd_marital_status" = 'M' - AND "store_sales"."ss_sales_price" <= 100.00 - AND "store_sales"."ss_sales_price" >= 50.00 - ) - OR ( - "customer_demographics"."cd_demo_sk" = "store_sales"."ss_cdemo_sk" - AND "customer_demographics"."cd_education_status" = 'Secondary' - AND "customer_demographics"."cd_marital_status" = 'W' - AND "store_sales"."ss_sales_price" <= 150.00 - AND "store_sales"."ss_sales_price" >= 100.00 - ) JOIN "customer_address" AS "customer_address" ON ( "customer_address"."ca_country" = 'United States' @@ -5701,9 +5728,33 @@ JOIN "customer_address" AS "customer_address" AND "store_sales"."ss_net_profit" <= 2000 AND "store_sales"."ss_net_profit" >= 0 ) +JOIN "customer_demographics" AS "customer_demographics" + ON ( + "customer_demographics"."cd_demo_sk" = "store_sales"."ss_cdemo_sk" + AND "customer_demographics"."cd_education_status" = '2 yr Degree' + AND "customer_demographics"."cd_marital_status" = 'D' + AND "store_sales"."ss_sales_price" <= 200.00 + AND "store_sales"."ss_sales_price" >= 150.00 + ) + OR ( + "customer_demographics"."cd_demo_sk" = "store_sales"."ss_cdemo_sk" + AND "customer_demographics"."cd_education_status" = 'Advanced Degree' + AND "customer_demographics"."cd_marital_status" = 'M' + AND "store_sales"."ss_sales_price" <= 100.00 + AND "store_sales"."ss_sales_price" >= 50.00 + ) + OR ( + "customer_demographics"."cd_demo_sk" = "store_sales"."ss_cdemo_sk" + AND "customer_demographics"."cd_education_status" = 'Secondary' + AND "customer_demographics"."cd_marital_status" = 'W' + AND "store_sales"."ss_sales_price" <= 150.00 + AND "store_sales"."ss_sales_price" >= 100.00 + ) JOIN "date_dim" AS "date_dim" ON "date_dim"."d_year" = 1999 - AND "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk"; + AND "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" +JOIN "store" AS "store" + ON "store"."s_store_sk" = "store_sales"."ss_store_sk"; -------------------------------------- -- TPC-DS 49 @@ -5850,22 +5901,22 @@ WITH "date_dim_2" AS ( "date_dim"."d_moy" = 12 AND "date_dim"."d_year" = 1999 ), "in_web" AS ( SELECT - "web_sales"."ws_item_sk" AS "item", - CAST(SUM(COALESCE("web_returns"."wr_return_quantity", 0)) AS DECIMAL(15, 4)) / CAST(SUM(COALESCE("web_sales"."ws_quantity", 0)) AS DECIMAL(15, 4)) AS "return_ratio", - CAST(SUM(COALESCE("web_returns"."wr_return_amt", 0)) AS DECIMAL(15, 4)) / CAST(SUM(COALESCE("web_sales"."ws_net_paid", 0)) AS DECIMAL(15, 4)) AS "currency_ratio" - FROM "web_sales" AS "web_sales" - LEFT JOIN "web_returns" AS "web_returns" - ON "web_sales"."ws_item_sk" = "web_returns"."wr_item_sk" - AND "web_sales"."ws_order_number" = "web_returns"."wr_order_number" + "ws"."ws_item_sk" AS "item", + CAST(SUM(COALESCE("wr"."wr_return_quantity", 0)) AS DECIMAL(15, 4)) / CAST(SUM(COALESCE("ws"."ws_quantity", 0)) AS DECIMAL(15, 4)) AS "return_ratio", + CAST(SUM(COALESCE("wr"."wr_return_amt", 0)) AS DECIMAL(15, 4)) / CAST(SUM(COALESCE("ws"."ws_net_paid", 0)) AS DECIMAL(15, 4)) AS "currency_ratio" + FROM "web_sales" AS "ws" JOIN "date_dim_2" AS "date_dim" - ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" + ON "ws"."ws_sold_date_sk" = "date_dim"."d_date_sk" + LEFT JOIN "web_returns" AS "wr" + ON "ws"."ws_item_sk" = "wr"."wr_item_sk" + AND "ws"."ws_order_number" = "wr"."wr_order_number" WHERE - "web_returns"."wr_return_amt" > 10000 - AND "web_sales"."ws_net_paid" > 0 - AND "web_sales"."ws_net_profit" > 1 - AND "web_sales"."ws_quantity" > 0 + "wr"."wr_return_amt" > 10000 + AND "ws"."ws_net_paid" > 0 + AND "ws"."ws_net_profit" > 1 + AND "ws"."ws_quantity" > 0 GROUP BY - "web_sales"."ws_item_sk" + "ws"."ws_item_sk" ), "web" AS ( SELECT "in_web"."item" AS "item", @@ -5875,22 +5926,22 @@ WITH "date_dim_2" AS ( FROM "in_web" AS "in_web" ), "in_cat" AS ( SELECT - "catalog_sales"."cs_item_sk" AS "item", - CAST(SUM(COALESCE("catalog_returns"."cr_return_quantity", 0)) AS DECIMAL(15, 4)) / CAST(SUM(COALESCE("catalog_sales"."cs_quantity", 0)) AS DECIMAL(15, 4)) AS "return_ratio", - CAST(SUM(COALESCE("catalog_returns"."cr_return_amount", 0)) AS DECIMAL(15, 4)) / CAST(SUM(COALESCE("catalog_sales"."cs_net_paid", 0)) AS DECIMAL(15, 4)) AS "currency_ratio" - FROM "catalog_sales" AS "catalog_sales" - LEFT JOIN "catalog_returns" AS "catalog_returns" - ON "catalog_sales"."cs_item_sk" = "catalog_returns"."cr_item_sk" - AND "catalog_sales"."cs_order_number" = "catalog_returns"."cr_order_number" + "cs"."cs_item_sk" AS "item", + CAST(SUM(COALESCE("cr"."cr_return_quantity", 0)) AS DECIMAL(15, 4)) / CAST(SUM(COALESCE("cs"."cs_quantity", 0)) AS DECIMAL(15, 4)) AS "return_ratio", + CAST(SUM(COALESCE("cr"."cr_return_amount", 0)) AS DECIMAL(15, 4)) / CAST(SUM(COALESCE("cs"."cs_net_paid", 0)) AS DECIMAL(15, 4)) AS "currency_ratio" + FROM "catalog_sales" AS "cs" + LEFT JOIN "catalog_returns" AS "cr" + ON "cs"."cs_item_sk" = "cr"."cr_item_sk" + AND "cs"."cs_order_number" = "cr"."cr_order_number" JOIN "date_dim_2" AS "date_dim" - ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" + ON "cs"."cs_sold_date_sk" = "date_dim"."d_date_sk" WHERE - "catalog_returns"."cr_return_amount" > 10000 - AND "catalog_sales"."cs_net_paid" > 0 - AND "catalog_sales"."cs_net_profit" > 1 - AND "catalog_sales"."cs_quantity" > 0 + "cr"."cr_return_amount" > 10000 + AND "cs"."cs_net_paid" > 0 + AND "cs"."cs_net_profit" > 1 + AND "cs"."cs_quantity" > 0 GROUP BY - "catalog_sales"."cs_item_sk" + "cs"."cs_item_sk" ), "catalog" AS ( SELECT "in_cat"."item" AS "item", @@ -5900,22 +5951,22 @@ WITH "date_dim_2" AS ( FROM "in_cat" AS "in_cat" ), "in_store" AS ( SELECT - "store_sales"."ss_item_sk" AS "item", - CAST(SUM(COALESCE("store_returns"."sr_return_quantity", 0)) AS DECIMAL(15, 4)) / CAST(SUM(COALESCE("store_sales"."ss_quantity", 0)) AS DECIMAL(15, 4)) AS "return_ratio", - CAST(SUM(COALESCE("store_returns"."sr_return_amt", 0)) AS DECIMAL(15, 4)) / CAST(SUM(COALESCE("store_sales"."ss_net_paid", 0)) AS DECIMAL(15, 4)) AS "currency_ratio" - FROM "store_sales" AS "store_sales" - LEFT JOIN "store_returns" AS "store_returns" - ON "store_sales"."ss_item_sk" = "store_returns"."sr_item_sk" - AND "store_sales"."ss_ticket_number" = "store_returns"."sr_ticket_number" + "sts"."ss_item_sk" AS "item", + CAST(SUM(COALESCE("sr"."sr_return_quantity", 0)) AS DECIMAL(15, 4)) / CAST(SUM(COALESCE("sts"."ss_quantity", 0)) AS DECIMAL(15, 4)) AS "return_ratio", + CAST(SUM(COALESCE("sr"."sr_return_amt", 0)) AS DECIMAL(15, 4)) / CAST(SUM(COALESCE("sts"."ss_net_paid", 0)) AS DECIMAL(15, 4)) AS "currency_ratio" + FROM "store_sales" AS "sts" JOIN "date_dim_2" AS "date_dim" - ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" + ON "sts"."ss_sold_date_sk" = "date_dim"."d_date_sk" + LEFT JOIN "store_returns" AS "sr" + ON "sts"."ss_item_sk" = "sr"."sr_item_sk" + AND "sts"."ss_ticket_number" = "sr"."sr_ticket_number" WHERE - "store_returns"."sr_return_amt" > 10000 - AND "store_sales"."ss_net_paid" > 0 - AND "store_sales"."ss_net_profit" > 1 - AND "store_sales"."ss_quantity" > 0 + "sr"."sr_return_amt" > 10000 + AND "sts"."ss_net_paid" > 0 + AND "sts"."ss_net_profit" > 1 + AND "sts"."ss_quantity" > 0 GROUP BY - "store_sales"."ss_item_sk" + "sts"."ss_item_sk" ), "store" AS ( SELECT "in_store"."item" AS "item", @@ -6098,18 +6149,18 @@ SELECT END ) AS ">120 days" FROM "store_sales" AS "store_sales" +JOIN "date_dim" AS "d1" + ON "store_sales"."ss_sold_date_sk" = "d1"."d_date_sk" +JOIN "store" AS "store" + ON "store_sales"."ss_store_sk" = "store"."s_store_sk" JOIN "store_returns" AS "store_returns" ON "store_sales"."ss_customer_sk" = "store_returns"."sr_customer_sk" AND "store_sales"."ss_item_sk" = "store_returns"."sr_item_sk" AND "store_sales"."ss_ticket_number" = "store_returns"."sr_ticket_number" -JOIN "store" AS "store" - ON "store_sales"."ss_store_sk" = "store"."s_store_sk" -JOIN "date_dim" AS "date_dim" - ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" -JOIN "date_dim" AS "date_dim_2" - ON "date_dim_2"."d_moy" = 9 - AND "date_dim_2"."d_year" = 2002 - AND "store_returns"."sr_returned_date_sk" = "date_dim_2"."d_date_sk" +JOIN "date_dim" AS "d2" + ON "d2"."d_moy" = 9 + AND "d2"."d_year" = 2002 + AND "store_returns"."sr_returned_date_sk" = "d2"."d_date_sk" GROUP BY "store"."s_store_name", "store"."s_company_id", @@ -6286,23 +6337,23 @@ ORDER BY dt.d_year, brand_id LIMIT 100; SELECT - "date_dim"."d_year" AS "d_year", + "dt"."d_year" AS "d_year", "item"."i_brand_id" AS "brand_id", "item"."i_brand" AS "brand", SUM("store_sales"."ss_ext_sales_price") AS "ext_price" -FROM "date_dim" AS "date_dim" +FROM "date_dim" AS "dt" JOIN "store_sales" AS "store_sales" - ON "date_dim"."d_date_sk" = "store_sales"."ss_sold_date_sk" + ON "dt"."d_date_sk" = "store_sales"."ss_sold_date_sk" JOIN "item" AS "item" ON "item"."i_manager_id" = 1 AND "store_sales"."ss_item_sk" = "item"."i_item_sk" WHERE - "date_dim"."d_moy" = 11 AND "date_dim"."d_year" = 1999 + "dt"."d_moy" = 11 AND "dt"."d_year" = 1999 GROUP BY - "date_dim"."d_year", + "dt"."d_year", "item"."i_brand", "item"."i_brand_id" ORDER BY - "date_dim"."d_year", + "dt"."d_year", "ext_price" DESC, "brand_id" LIMIT 100; @@ -6502,16 +6553,16 @@ WITH "cs_or_ws_sales" AS ( "customer"."c_customer_sk" AS "c_customer_sk", "customer"."c_current_addr_sk" AS "c_current_addr_sk" FROM "cs_or_ws_sales" AS "cs_or_ws_sales" - JOIN "item" AS "item" - ON "cs_or_ws_sales"."item_sk" = "item"."i_item_sk" - AND "item"."i_category" = 'Sports' - AND "item"."i_class" = 'fitness' + JOIN "customer" AS "customer" + ON "customer"."c_customer_sk" = "cs_or_ws_sales"."customer_sk" JOIN "date_dim" AS "date_dim" ON "cs_or_ws_sales"."sold_date_sk" = "date_dim"."d_date_sk" AND "date_dim"."d_moy" = 5 AND "date_dim"."d_year" = 2000 - JOIN "customer" AS "customer" - ON "customer"."c_customer_sk" = "cs_or_ws_sales"."customer_sk" + JOIN "item" AS "item" + ON "cs_or_ws_sales"."item_sk" = "item"."i_item_sk" + AND "item"."i_category" = 'Sports' + AND "item"."i_class" = 'fitness' ), "_u_0" AS ( SELECT DISTINCT "date_dim"."d_month_seq" + 1 AS "_col_0" @@ -6528,12 +6579,12 @@ WITH "cs_or_ws_sales" AS ( SELECT SUM("store_sales"."ss_ext_sales_price") AS "revenue" FROM "my_customers" - CROSS JOIN "date_dim" AS "date_dim" - JOIN "store_sales" AS "store_sales" - ON "my_customers"."c_customer_sk" = "store_sales"."ss_customer_sk" - AND "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" JOIN "customer_address" AS "customer_address" ON "my_customers"."c_current_addr_sk" = "customer_address"."ca_address_sk" + JOIN "store_sales" AS "store_sales" + ON "my_customers"."c_customer_sk" = "store_sales"."ss_customer_sk" + JOIN "date_dim" AS "date_dim" + ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" JOIN "store" AS "store" ON "customer_address"."ca_county" = "store"."s_county" AND "customer_address"."ca_state" = "store"."s_state" @@ -6670,7 +6721,14 @@ FROM (SELECT * GROUP BY i_item_id ORDER BY total_sales LIMIT 100; -WITH "date_dim_2" AS ( +WITH "customer_address_2" AS ( + SELECT + "customer_address"."ca_address_sk" AS "ca_address_sk", + "customer_address"."ca_gmt_offset" AS "ca_gmt_offset" + FROM "customer_address" AS "customer_address" + WHERE + "customer_address"."ca_gmt_offset" = -6 +), "date_dim_2" AS ( SELECT "date_dim"."d_date_sk" AS "d_date_sk", "date_dim"."d_year" AS "d_year", @@ -6678,13 +6736,6 @@ WITH "date_dim_2" AS ( FROM "date_dim" AS "date_dim" WHERE "date_dim"."d_moy" = 3 AND "date_dim"."d_year" = 1998 -), "customer_address_2" AS ( - SELECT - "customer_address"."ca_address_sk" AS "ca_address_sk", - "customer_address"."ca_gmt_offset" AS "ca_gmt_offset" - FROM "customer_address" AS "customer_address" - WHERE - "customer_address"."ca_gmt_offset" = -6 ), "item_2" AS ( SELECT "item"."i_item_sk" AS "i_item_sk", @@ -6703,10 +6754,10 @@ WITH "date_dim_2" AS ( "item"."i_item_id" AS "i_item_id", SUM("store_sales"."ss_ext_sales_price") AS "total_sales" FROM "store_sales" AS "store_sales" - JOIN "date_dim_2" AS "date_dim" - ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" JOIN "customer_address_2" AS "customer_address" ON "store_sales"."ss_addr_sk" = "customer_address"."ca_address_sk" + JOIN "date_dim_2" AS "date_dim" + ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" JOIN "item_2" AS "item" ON "store_sales"."ss_item_sk" = "item"."i_item_sk" LEFT JOIN "_u_0" AS "_u_0" @@ -6720,10 +6771,10 @@ WITH "date_dim_2" AS ( "item"."i_item_id" AS "i_item_id", SUM("catalog_sales"."cs_ext_sales_price") AS "total_sales" FROM "catalog_sales" AS "catalog_sales" - JOIN "date_dim_2" AS "date_dim" - ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" JOIN "customer_address_2" AS "customer_address" ON "catalog_sales"."cs_bill_addr_sk" = "customer_address"."ca_address_sk" + JOIN "date_dim_2" AS "date_dim" + ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" JOIN "item_2" AS "item" ON "catalog_sales"."cs_item_sk" = "item"."i_item_sk" LEFT JOIN "_u_0" AS "_u_1" @@ -6737,10 +6788,10 @@ WITH "date_dim_2" AS ( "item"."i_item_id" AS "i_item_id", SUM("web_sales"."ws_ext_sales_price") AS "total_sales" FROM "web_sales" AS "web_sales" - JOIN "date_dim_2" AS "date_dim" - ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" JOIN "customer_address_2" AS "customer_address" ON "web_sales"."ws_bill_addr_sk" = "customer_address"."ca_address_sk" + JOIN "date_dim_2" AS "date_dim" + ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" JOIN "item_2" AS "item" ON "web_sales"."ws_item_sk" = "item"."i_item_sk" LEFT JOIN "_u_0" AS "_u_2" @@ -6860,6 +6911,8 @@ WITH "v1" AS ( FROM "item" AS "item" JOIN "catalog_sales" AS "catalog_sales" ON "catalog_sales"."cs_item_sk" = "item"."i_item_sk" + JOIN "call_center" AS "call_center" + ON "call_center"."cc_call_center_sk" = "catalog_sales"."cs_call_center_sk" JOIN "date_dim" AS "date_dim" ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" AND ( @@ -6874,8 +6927,6 @@ WITH "v1" AS ( AND ( "date_dim"."d_year" = 1999 OR "date_dim"."d_year" = 2000 OR "date_dim"."d_year" = 2001 ) - JOIN "call_center" AS "call_center" - ON "call_center"."cc_call_center_sk" = "catalog_sales"."cs_call_center_sk" GROUP BY "item"."i_category", "item"."i_brand", @@ -6914,6 +6965,205 @@ ORDER BY "v1"."avg_monthly_sales" LIMIT 100; +-------------------------------------- +-- TPC-DS 58 +-------------------------------------- +WITH ss_items + AS (SELECT i_item_id item_id, + Sum(ss_ext_sales_price) ss_item_rev + FROM store_sales, + item, + date_dim + WHERE ss_item_sk = i_item_sk + AND d_date IN (SELECT d_date + FROM date_dim + WHERE d_week_seq = (SELECT d_week_seq + FROM date_dim + WHERE d_date = '2002-02-25' + )) + AND ss_sold_date_sk = d_date_sk + GROUP BY i_item_id), + cs_items + AS (SELECT i_item_id item_id, + Sum(cs_ext_sales_price) cs_item_rev + FROM catalog_sales, + item, + date_dim + WHERE cs_item_sk = i_item_sk + AND d_date IN (SELECT d_date + FROM date_dim + WHERE d_week_seq = (SELECT d_week_seq + FROM date_dim + WHERE d_date = '2002-02-25' + )) + AND cs_sold_date_sk = d_date_sk + GROUP BY i_item_id), + ws_items + AS (SELECT i_item_id item_id, + Sum(ws_ext_sales_price) ws_item_rev + FROM web_sales, + item, + date_dim + WHERE ws_item_sk = i_item_sk + AND d_date IN (SELECT d_date + FROM date_dim + WHERE d_week_seq = (SELECT d_week_seq + FROM date_dim + WHERE d_date = '2002-02-25' + )) + AND ws_sold_date_sk = d_date_sk + GROUP BY i_item_id) +SELECT ss_items.item_id, + ss_item_rev, + ss_item_rev / ( ss_item_rev + cs_item_rev + ws_item_rev ) / 3 * + 100 ss_dev, + cs_item_rev, + cs_item_rev / ( ss_item_rev + cs_item_rev + ws_item_rev ) / 3 * + 100 cs_dev, + ws_item_rev, + ws_item_rev / ( ss_item_rev + cs_item_rev + ws_item_rev ) / 3 * + 100 ws_dev, + ( ss_item_rev + cs_item_rev + ws_item_rev ) / 3 + average +FROM ss_items, + cs_items, + ws_items +WHERE ss_items.item_id = cs_items.item_id + AND ss_items.item_id = ws_items.item_id + AND ss_item_rev BETWEEN 0.9 * cs_item_rev AND 1.1 * cs_item_rev + AND ss_item_rev BETWEEN 0.9 * ws_item_rev AND 1.1 * ws_item_rev + AND cs_item_rev BETWEEN 0.9 * ss_item_rev AND 1.1 * ss_item_rev + AND cs_item_rev BETWEEN 0.9 * ws_item_rev AND 1.1 * ws_item_rev + AND ws_item_rev BETWEEN 0.9 * ss_item_rev AND 1.1 * ss_item_rev + AND ws_item_rev BETWEEN 0.9 * cs_item_rev AND 1.1 * cs_item_rev +ORDER BY item_id, + ss_item_rev +LIMIT 100; +WITH "date_dim_2" AS ( + SELECT + "date_dim"."d_date_sk" AS "d_date_sk", + "date_dim"."d_date" AS "d_date" + FROM "date_dim" AS "date_dim" +), "item_2" AS ( + SELECT + "item"."i_item_sk" AS "i_item_sk", + "item"."i_item_id" AS "i_item_id" + FROM "item" AS "item" +), "_u_0" AS ( + SELECT + "date_dim"."d_week_seq" AS "d_week_seq" + FROM "date_dim" AS "date_dim" + WHERE + "date_dim"."d_date" = '2002-02-25' +), "_u_1" AS ( + SELECT + "date_dim"."d_date" AS "d_date" + FROM "date_dim" AS "date_dim" + JOIN "_u_0" AS "_u_0" + ON "date_dim"."d_week_seq" = "_u_0"."d_week_seq" + GROUP BY + "date_dim"."d_date" +), "ss_items" AS ( + SELECT + "item"."i_item_id" AS "item_id", + SUM("store_sales"."ss_ext_sales_price") AS "ss_item_rev" + FROM "store_sales" AS "store_sales" + JOIN "date_dim_2" AS "date_dim" + ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" + JOIN "item_2" AS "item" + ON "store_sales"."ss_item_sk" = "item"."i_item_sk" + LEFT JOIN "_u_1" AS "_u_1" + ON "date_dim"."d_date" = "_u_1"."d_date" + WHERE + NOT "_u_1"."d_date" IS NULL + GROUP BY + "item"."i_item_id" +), "_u_3" AS ( + SELECT + "date_dim"."d_date" AS "d_date" + FROM "date_dim" AS "date_dim" + JOIN "_u_0" AS "_u_2" + ON "date_dim"."d_week_seq" = "_u_2"."d_week_seq" + GROUP BY + "date_dim"."d_date" +), "cs_items" AS ( + SELECT + "item"."i_item_id" AS "item_id", + SUM("catalog_sales"."cs_ext_sales_price") AS "cs_item_rev" + FROM "catalog_sales" AS "catalog_sales" + JOIN "date_dim_2" AS "date_dim" + ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" + JOIN "item_2" AS "item" + ON "catalog_sales"."cs_item_sk" = "item"."i_item_sk" + LEFT JOIN "_u_3" AS "_u_3" + ON "date_dim"."d_date" = "_u_3"."d_date" + WHERE + NOT "_u_3"."d_date" IS NULL + GROUP BY + "item"."i_item_id" +), "_u_5" AS ( + SELECT + "date_dim"."d_date" AS "d_date" + FROM "date_dim" AS "date_dim" + JOIN "_u_0" AS "_u_4" + ON "date_dim"."d_week_seq" = "_u_4"."d_week_seq" + GROUP BY + "date_dim"."d_date" +), "ws_items" AS ( + SELECT + "item"."i_item_id" AS "item_id", + SUM("web_sales"."ws_ext_sales_price") AS "ws_item_rev" + FROM "web_sales" AS "web_sales" + JOIN "date_dim_2" AS "date_dim" + ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" + JOIN "item_2" AS "item" + ON "web_sales"."ws_item_sk" = "item"."i_item_sk" + LEFT JOIN "_u_5" AS "_u_5" + ON "date_dim"."d_date" = "_u_5"."d_date" + WHERE + NOT "_u_5"."d_date" IS NULL + GROUP BY + "item"."i_item_id" +) +SELECT + "ss_items"."item_id" AS "item_id", + "ss_items"."ss_item_rev" AS "ss_item_rev", + "ss_items"."ss_item_rev" / ( + "ss_items"."ss_item_rev" + "cs_items"."cs_item_rev" + "ws_items"."ws_item_rev" + ) / 3 * 100 AS "ss_dev", + "cs_items"."cs_item_rev" AS "cs_item_rev", + "cs_items"."cs_item_rev" / ( + "ss_items"."ss_item_rev" + "cs_items"."cs_item_rev" + "ws_items"."ws_item_rev" + ) / 3 * 100 AS "cs_dev", + "ws_items"."ws_item_rev" AS "ws_item_rev", + "ws_items"."ws_item_rev" / ( + "ss_items"."ss_item_rev" + "cs_items"."cs_item_rev" + "ws_items"."ws_item_rev" + ) / 3 * 100 AS "ws_dev", + ( + "ss_items"."ss_item_rev" + "cs_items"."cs_item_rev" + "ws_items"."ws_item_rev" + ) / 3 AS "average" +FROM "ss_items" +JOIN "ws_items" + ON "ss_items"."item_id" = "ws_items"."item_id" + AND "ss_items"."ss_item_rev" <= 1.1 * "ws_items"."ws_item_rev" + AND "ss_items"."ss_item_rev" >= 0.9 * "ws_items"."ws_item_rev" + AND "ws_items"."ws_item_rev" <= 1.1 * "ss_items"."ss_item_rev" + AND "ws_items"."ws_item_rev" >= 0.9 * "ss_items"."ss_item_rev" +JOIN "cs_items" + ON "cs_items"."cs_item_rev" <= 1.1 * "ss_items"."ss_item_rev" + AND "cs_items"."cs_item_rev" <= 1.1 * "ws_items"."ws_item_rev" + AND "cs_items"."cs_item_rev" >= 0.9 * "ss_items"."ss_item_rev" + AND "cs_items"."cs_item_rev" >= 0.9 * "ws_items"."ws_item_rev" + AND "ss_items"."item_id" = "cs_items"."item_id" + AND "ss_items"."ss_item_rev" <= 1.1 * "cs_items"."cs_item_rev" + AND "ss_items"."ss_item_rev" >= 0.9 * "cs_items"."cs_item_rev" + AND "ws_items"."ws_item_rev" <= 1.1 * "cs_items"."cs_item_rev" + AND "ws_items"."ws_item_rev" >= 0.9 * "cs_items"."cs_item_rev" +ORDER BY + "item_id", + "ss_item_rev" +LIMIT 100; + -------------------------------------- -- TPC-DS 59 -------------------------------------- @@ -7072,12 +7322,12 @@ WITH "wss" AS ( "wss"."fri_sales" AS "fri_sales2", "wss"."sat_sales" AS "sat_sales2" FROM "wss" + JOIN "date_dim" AS "d" + ON "d"."d_month_seq" <= 1219 + AND "d"."d_month_seq" >= 1208 + AND "d"."d_week_seq" = "wss"."d_week_seq" JOIN "store" AS "store" ON "wss"."ss_store_sk" = "store"."s_store_sk" - JOIN "date_dim" AS "date_dim" - ON "date_dim"."d_month_seq" <= 1219 - AND "date_dim"."d_month_seq" >= 1208 - AND "date_dim"."d_week_seq" = "wss"."d_week_seq" ) SELECT "store"."s_store_name" AS "s_store_name1", @@ -7091,12 +7341,12 @@ SELECT "wss"."fri_sales" / "x"."fri_sales2" AS "_col_8", "wss"."sat_sales" / "x"."sat_sales2" AS "_col_9" FROM "wss" +JOIN "date_dim" AS "d" + ON "d"."d_month_seq" <= 1207 + AND "d"."d_month_seq" >= 1196 + AND "d"."d_week_seq" = "wss"."d_week_seq" JOIN "store" AS "store" ON "wss"."ss_store_sk" = "store"."s_store_sk" -JOIN "date_dim" AS "date_dim" - ON "date_dim"."d_month_seq" <= 1207 - AND "date_dim"."d_month_seq" >= 1196 - AND "date_dim"."d_week_seq" = "wss"."d_week_seq" JOIN "x" AS "x" ON "store"."s_store_id" = "x"."s_store_id2" AND "wss"."d_week_seq" = "x"."d_week_seq2" - 52 @@ -7174,7 +7424,14 @@ GROUP BY i_item_id ORDER BY i_item_id, total_sales LIMIT 100; -WITH "date_dim_2" AS ( +WITH "customer_address_2" AS ( + SELECT + "customer_address"."ca_address_sk" AS "ca_address_sk", + "customer_address"."ca_gmt_offset" AS "ca_gmt_offset" + FROM "customer_address" AS "customer_address" + WHERE + "customer_address"."ca_gmt_offset" = -6 +), "date_dim_2" AS ( SELECT "date_dim"."d_date_sk" AS "d_date_sk", "date_dim"."d_year" AS "d_year", @@ -7182,13 +7439,6 @@ WITH "date_dim_2" AS ( FROM "date_dim" AS "date_dim" WHERE "date_dim"."d_moy" = 8 AND "date_dim"."d_year" = 1999 -), "customer_address_2" AS ( - SELECT - "customer_address"."ca_address_sk" AS "ca_address_sk", - "customer_address"."ca_gmt_offset" AS "ca_gmt_offset" - FROM "customer_address" AS "customer_address" - WHERE - "customer_address"."ca_gmt_offset" = -6 ), "item_2" AS ( SELECT "item"."i_item_sk" AS "i_item_sk", @@ -7207,10 +7457,10 @@ WITH "date_dim_2" AS ( "item"."i_item_id" AS "i_item_id", SUM("store_sales"."ss_ext_sales_price") AS "total_sales" FROM "store_sales" AS "store_sales" - JOIN "date_dim_2" AS "date_dim" - ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" JOIN "customer_address_2" AS "customer_address" ON "store_sales"."ss_addr_sk" = "customer_address"."ca_address_sk" + JOIN "date_dim_2" AS "date_dim" + ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" JOIN "item_2" AS "item" ON "store_sales"."ss_item_sk" = "item"."i_item_sk" LEFT JOIN "_u_0" AS "_u_0" @@ -7224,10 +7474,10 @@ WITH "date_dim_2" AS ( "item"."i_item_id" AS "i_item_id", SUM("catalog_sales"."cs_ext_sales_price") AS "total_sales" FROM "catalog_sales" AS "catalog_sales" - JOIN "date_dim_2" AS "date_dim" - ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" JOIN "customer_address_2" AS "customer_address" ON "catalog_sales"."cs_bill_addr_sk" = "customer_address"."ca_address_sk" + JOIN "date_dim_2" AS "date_dim" + ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" JOIN "item_2" AS "item" ON "catalog_sales"."cs_item_sk" = "item"."i_item_sk" LEFT JOIN "_u_0" AS "_u_1" @@ -7241,10 +7491,10 @@ WITH "date_dim_2" AS ( "item"."i_item_id" AS "i_item_id", SUM("web_sales"."ws_ext_sales_price") AS "total_sales" FROM "web_sales" AS "web_sales" - JOIN "date_dim_2" AS "date_dim" - ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" JOIN "customer_address_2" AS "customer_address" ON "web_sales"."ws_bill_addr_sk" = "customer_address"."ca_address_sk" + JOIN "date_dim_2" AS "date_dim" + ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" JOIN "item_2" AS "item" ON "web_sales"."ws_item_sk" = "item"."i_item_sk" LEFT JOIN "_u_0" AS "_u_2" @@ -7334,13 +7584,11 @@ FROM (SELECT Sum(ss_ext_sales_price) promotions ORDER BY promotions, total LIMIT 100; -WITH "store_2" AS ( +WITH "customer_2" AS ( SELECT - "store"."s_store_sk" AS "s_store_sk", - "store"."s_gmt_offset" AS "s_gmt_offset" - FROM "store" AS "store" - WHERE - "store"."s_gmt_offset" = -7 + "customer"."c_customer_sk" AS "c_customer_sk", + "customer"."c_current_addr_sk" AS "c_current_addr_sk" + FROM "customer" AS "customer" ), "date_dim_2" AS ( SELECT "date_dim"."d_date_sk" AS "d_date_sk", @@ -7349,11 +7597,20 @@ WITH "store_2" AS ( FROM "date_dim" AS "date_dim" WHERE "date_dim"."d_moy" = 12 AND "date_dim"."d_year" = 2001 -), "customer_2" AS ( +), "item_2" AS ( SELECT - "customer"."c_customer_sk" AS "c_customer_sk", - "customer"."c_current_addr_sk" AS "c_current_addr_sk" - FROM "customer" AS "customer" + "item"."i_item_sk" AS "i_item_sk", + "item"."i_category" AS "i_category" + FROM "item" AS "item" + WHERE + "item"."i_category" = 'Books' +), "store_2" AS ( + SELECT + "store"."s_store_sk" AS "s_store_sk", + "store"."s_gmt_offset" AS "s_gmt_offset" + FROM "store" AS "store" + WHERE + "store"."s_gmt_offset" = -7 ), "customer_address_2" AS ( SELECT "customer_address"."ca_address_sk" AS "ca_address_sk", @@ -7361,19 +7618,16 @@ WITH "store_2" AS ( FROM "customer_address" AS "customer_address" WHERE "customer_address"."ca_gmt_offset" = -7 -), "item_2" AS ( - SELECT - "item"."i_item_sk" AS "i_item_sk", - "item"."i_category" AS "i_category" - FROM "item" AS "item" - WHERE - "item"."i_category" = 'Books' ), "promotional_sales" AS ( SELECT SUM("store_sales"."ss_ext_sales_price") AS "promotions" FROM "store_sales" AS "store_sales" - JOIN "store_2" AS "store" - ON "store_sales"."ss_store_sk" = "store"."s_store_sk" + JOIN "customer_2" AS "customer" + ON "store_sales"."ss_customer_sk" = "customer"."c_customer_sk" + JOIN "date_dim_2" AS "date_dim" + ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" + JOIN "item_2" AS "item" + ON "store_sales"."ss_item_sk" = "item"."i_item_sk" JOIN "promotion" AS "promotion" ON ( "promotion"."p_channel_dmail" = 'Y' @@ -7381,28 +7635,24 @@ WITH "store_2" AS ( OR "promotion"."p_channel_tv" = 'Y' ) AND "store_sales"."ss_promo_sk" = "promotion"."p_promo_sk" - JOIN "date_dim_2" AS "date_dim" - ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" - JOIN "customer_2" AS "customer" - ON "store_sales"."ss_customer_sk" = "customer"."c_customer_sk" + JOIN "store_2" AS "store" + ON "store_sales"."ss_store_sk" = "store"."s_store_sk" JOIN "customer_address_2" AS "customer_address" ON "customer_address"."ca_address_sk" = "customer"."c_current_addr_sk" - JOIN "item_2" AS "item" - ON "store_sales"."ss_item_sk" = "item"."i_item_sk" ), "all_sales" AS ( SELECT SUM("store_sales"."ss_ext_sales_price") AS "total" FROM "store_sales" AS "store_sales" - JOIN "store_2" AS "store" - ON "store_sales"."ss_store_sk" = "store"."s_store_sk" - JOIN "date_dim_2" AS "date_dim" - ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" JOIN "customer_2" AS "customer" ON "store_sales"."ss_customer_sk" = "customer"."c_customer_sk" - JOIN "customer_address_2" AS "customer_address" - ON "customer_address"."ca_address_sk" = "customer"."c_current_addr_sk" + JOIN "date_dim_2" AS "date_dim" + ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" JOIN "item_2" AS "item" ON "store_sales"."ss_item_sk" = "item"."i_item_sk" + JOIN "store_2" AS "store" + ON "store_sales"."ss_store_sk" = "store"."s_store_sk" + JOIN "customer_address_2" AS "customer_address" + ON "customer_address"."ca_address_sk" = "customer"."c_current_addr_sk" ) SELECT "promotional_sales"."promotions" AS "promotions", @@ -7505,16 +7755,16 @@ SELECT END ) AS ">120 days" FROM "web_sales" AS "web_sales" -JOIN "warehouse" AS "warehouse" - ON "web_sales"."ws_warehouse_sk" = "warehouse"."w_warehouse_sk" -JOIN "ship_mode" AS "ship_mode" - ON "web_sales"."ws_ship_mode_sk" = "ship_mode"."sm_ship_mode_sk" -JOIN "web_site" AS "web_site" - ON "web_sales"."ws_web_site_sk" = "web_site"."web_site_sk" JOIN "date_dim" AS "date_dim" ON "date_dim"."d_month_seq" <= 1233 AND "date_dim"."d_month_seq" >= 1222 AND "web_sales"."ws_ship_date_sk" = "date_dim"."d_date_sk" +JOIN "ship_mode" AS "ship_mode" + ON "web_sales"."ws_ship_mode_sk" = "ship_mode"."sm_ship_mode_sk" +JOIN "warehouse" AS "warehouse" + ON "web_sales"."ws_warehouse_sk" = "warehouse"."w_warehouse_sk" +JOIN "web_site" AS "web_site" + ON "web_sales"."ws_web_site_sk" = "web_site"."web_site_sk" GROUP BY SUBSTR("warehouse"."w_warehouse_name", 1, 20), "ship_mode"."sm_type", @@ -7780,33 +8030,6 @@ WITH "cs_ui" AS ( SUM("catalog_sales"."cs_ext_list_price") > 2 * SUM( "catalog_returns"."cr_refunded_cash" + "catalog_returns"."cr_reversed_charge" + "catalog_returns"."cr_store_credit" ) -), "d1" AS ( - SELECT - "date_dim"."d_date_sk" AS "d_date_sk", - "date_dim"."d_year" AS "d_year" - FROM "date_dim" AS "date_dim" -), "ib2" AS ( - SELECT - "income_band"."ib_income_band_sk" AS "ib_income_band_sk" - FROM "income_band" AS "income_band" -), "hd2" AS ( - SELECT - "household_demographics"."hd_demo_sk" AS "hd_demo_sk", - "household_demographics"."hd_income_band_sk" AS "hd_income_band_sk" - FROM "household_demographics" AS "household_demographics" -), "cd1" AS ( - SELECT - "customer_demographics"."cd_demo_sk" AS "cd_demo_sk", - "customer_demographics"."cd_marital_status" AS "cd_marital_status" - FROM "customer_demographics" AS "customer_demographics" -), "ad1" AS ( - SELECT - "customer_address"."ca_address_sk" AS "ca_address_sk", - "customer_address"."ca_street_number" AS "ca_street_number", - "customer_address"."ca_street_name" AS "ca_street_name", - "customer_address"."ca_city" AS "ca_city", - "customer_address"."ca_zip" AS "ca_zip" - FROM "customer_address" AS "customer_address" ), "cross_sales" AS ( SELECT "item"."i_product_name" AS "product_name", @@ -7827,45 +8050,45 @@ WITH "cs_ui" AS ( SUM("store_sales"."ss_list_price") AS "s2", SUM("store_sales"."ss_coupon_amt") AS "s3" FROM "store_sales" AS "store_sales" - JOIN "store_returns" AS "store_returns" - ON "store_sales"."ss_item_sk" = "store_returns"."sr_item_sk" - AND "store_sales"."ss_ticket_number" = "store_returns"."sr_ticket_number" + CROSS JOIN "income_band" AS "ib2" + JOIN "customer_address" AS "ad1" + ON "store_sales"."ss_addr_sk" = "ad1"."ca_address_sk" JOIN "cs_ui" ON "store_sales"."ss_item_sk" = "cs_ui"."cs_item_sk" - JOIN "d1" AS "d1" + JOIN "date_dim" AS "d1" ON "store_sales"."ss_sold_date_sk" = "d1"."d_date_sk" - CROSS JOIN "ib2" AS "ib2" - JOIN "hd2" AS "hd2" - ON "hd2"."hd_income_band_sk" = "ib2"."ib_income_band_sk" - JOIN "customer" AS "customer" - ON "customer"."c_current_hdemo_sk" = "hd2"."hd_demo_sk" - AND "store_sales"."ss_customer_sk" = "customer"."c_customer_sk" - JOIN "d1" AS "d2" - ON "customer"."c_first_sales_date_sk" = "d2"."d_date_sk" - JOIN "d1" AS "d3" - ON "customer"."c_first_shipto_date_sk" = "d3"."d_date_sk" - JOIN "store" AS "store" - ON "store_sales"."ss_store_sk" = "store"."s_store_sk" - JOIN "cd1" AS "cd1" - ON "store_sales"."ss_cdemo_sk" = "cd1"."cd_demo_sk" - JOIN "cd1" AS "cd2" - ON "cd1"."cd_marital_status" <> "cd2"."cd_marital_status" - AND "customer"."c_current_cdemo_sk" = "cd2"."cd_demo_sk" - JOIN "promotion" AS "promotion" - ON "store_sales"."ss_promo_sk" = "promotion"."p_promo_sk" - JOIN "hd2" AS "hd1" + JOIN "household_demographics" AS "hd1" ON "store_sales"."ss_hdemo_sk" = "hd1"."hd_demo_sk" - JOIN "ad1" AS "ad1" - ON "store_sales"."ss_addr_sk" = "ad1"."ca_address_sk" - JOIN "ad1" AS "ad2" - ON "customer"."c_current_addr_sk" = "ad2"."ca_address_sk" - JOIN "ib2" AS "ib1" - ON "hd1"."hd_income_band_sk" = "ib1"."ib_income_band_sk" + JOIN "household_demographics" AS "hd2" + ON "hd2"."hd_income_band_sk" = "ib2"."ib_income_band_sk" JOIN "item" AS "item" ON "item"."i_color" IN ('cyan', 'peach', 'blush', 'frosted', 'powder', 'orange') AND "item"."i_current_price" <= 68 AND "item"."i_current_price" >= 59 AND "store_sales"."ss_item_sk" = "item"."i_item_sk" + JOIN "promotion" AS "promotion" + ON "store_sales"."ss_promo_sk" = "promotion"."p_promo_sk" + JOIN "store" AS "store" + ON "store_sales"."ss_store_sk" = "store"."s_store_sk" + JOIN "store_returns" AS "store_returns" + ON "store_sales"."ss_item_sk" = "store_returns"."sr_item_sk" + AND "store_sales"."ss_ticket_number" = "store_returns"."sr_ticket_number" + JOIN "customer" AS "customer" + ON "customer"."c_current_hdemo_sk" = "hd2"."hd_demo_sk" + AND "store_sales"."ss_customer_sk" = "customer"."c_customer_sk" + JOIN "income_band" AS "ib1" + ON "hd1"."hd_income_band_sk" = "ib1"."ib_income_band_sk" + JOIN "customer_address" AS "ad2" + ON "customer"."c_current_addr_sk" = "ad2"."ca_address_sk" + JOIN "customer_demographics" AS "cd2" + ON "customer"."c_current_cdemo_sk" = "cd2"."cd_demo_sk" + JOIN "date_dim" AS "d2" + ON "customer"."c_first_sales_date_sk" = "d2"."d_date_sk" + JOIN "date_dim" AS "d3" + ON "customer"."c_first_shipto_date_sk" = "d3"."d_date_sk" + JOIN "customer_demographics" AS "cd1" + ON "cd1"."cd_marital_status" <> "cd2"."cd_marital_status" + AND "store_sales"."ss_cdemo_sk" = "cd1"."cd_demo_sk" GROUP BY "item"."i_product_name", "item"."i_item_sk", @@ -7919,6 +8142,107 @@ ORDER BY "cs1"."store_name", "cs2"."cnt"; +-------------------------------------- +-- TPC-DS 65 +-------------------------------------- +SELECT s_store_name, + i_item_desc, + sc.revenue, + i_current_price, + i_wholesale_cost, + i_brand +FROM store, + item, + (SELECT ss_store_sk, + Avg(revenue) AS ave + FROM (SELECT ss_store_sk, + ss_item_sk, + Sum(ss_sales_price) AS revenue + FROM store_sales, + date_dim + WHERE ss_sold_date_sk = d_date_sk + AND d_month_seq BETWEEN 1199 AND 1199 + 11 + GROUP BY ss_store_sk, + ss_item_sk) sa + GROUP BY ss_store_sk) sb, + (SELECT ss_store_sk, + ss_item_sk, + Sum(ss_sales_price) AS revenue + FROM store_sales, + date_dim + WHERE ss_sold_date_sk = d_date_sk + AND d_month_seq BETWEEN 1199 AND 1199 + 11 + GROUP BY ss_store_sk, + ss_item_sk) sc +WHERE sb.ss_store_sk = sc.ss_store_sk + AND sc.revenue <= 0.1 * sb.ave + AND s_store_sk = sc.ss_store_sk + AND i_item_sk = sc.ss_item_sk +ORDER BY s_store_name, + i_item_desc +LIMIT 100; +WITH "store_sales_2" AS ( + SELECT + "store_sales"."ss_sold_date_sk" AS "ss_sold_date_sk", + "store_sales"."ss_item_sk" AS "ss_item_sk", + "store_sales"."ss_store_sk" AS "ss_store_sk", + "store_sales"."ss_sales_price" AS "ss_sales_price" + FROM "store_sales" AS "store_sales" +), "date_dim_2" AS ( + SELECT + "date_dim"."d_date_sk" AS "d_date_sk", + "date_dim"."d_month_seq" AS "d_month_seq" + FROM "date_dim" AS "date_dim" + WHERE + "date_dim"."d_month_seq" <= 1210 AND "date_dim"."d_month_seq" >= 1199 +), "sc" AS ( + SELECT + "store_sales"."ss_store_sk" AS "ss_store_sk", + "store_sales"."ss_item_sk" AS "ss_item_sk", + SUM("store_sales"."ss_sales_price") AS "revenue" + FROM "store_sales_2" AS "store_sales" + JOIN "date_dim_2" AS "date_dim" + ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" + GROUP BY + "store_sales"."ss_store_sk", + "store_sales"."ss_item_sk" +), "sa" AS ( + SELECT + "store_sales"."ss_store_sk" AS "ss_store_sk", + SUM("store_sales"."ss_sales_price") AS "revenue" + FROM "store_sales_2" AS "store_sales" + JOIN "date_dim_2" AS "date_dim" + ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" + GROUP BY + "store_sales"."ss_store_sk", + "store_sales"."ss_item_sk" +), "sb" AS ( + SELECT + "sa"."ss_store_sk" AS "ss_store_sk", + AVG("sa"."revenue") AS "ave" + FROM "sa" AS "sa" + GROUP BY + "sa"."ss_store_sk" +) +SELECT + "store"."s_store_name" AS "s_store_name", + "item"."i_item_desc" AS "i_item_desc", + "sc"."revenue" AS "revenue", + "item"."i_current_price" AS "i_current_price", + "item"."i_wholesale_cost" AS "i_wholesale_cost", + "item"."i_brand" AS "i_brand" +FROM "store" AS "store" +JOIN "sc" AS "sc" + ON "store"."s_store_sk" = "sc"."ss_store_sk" +JOIN "item" AS "item" + ON "item"."i_item_sk" = "sc"."ss_item_sk" +JOIN "sb" AS "sb" + ON "sb"."ss_store_sk" = "sc"."ss_store_sk" AND "sc"."revenue" <= 0.1 * "sb"."ave" +ORDER BY + "s_store_name", + "i_item_desc" +LIMIT 100; + -------------------------------------- -- TPC-DS 66 -------------------------------------- @@ -8227,17 +8551,7 @@ GROUP BY w_warehouse_name, year1 ORDER BY w_warehouse_name LIMIT 100; -WITH "warehouse_2" AS ( - SELECT - "warehouse"."w_warehouse_sk" AS "w_warehouse_sk", - "warehouse"."w_warehouse_name" AS "w_warehouse_name", - "warehouse"."w_warehouse_sq_ft" AS "w_warehouse_sq_ft", - "warehouse"."w_city" AS "w_city", - "warehouse"."w_county" AS "w_county", - "warehouse"."w_state" AS "w_state", - "warehouse"."w_country" AS "w_country" - FROM "warehouse" AS "warehouse" -), "date_dim_2" AS ( +WITH "date_dim_2" AS ( SELECT "date_dim"."d_date_sk" AS "d_date_sk", "date_dim"."d_year" AS "d_year", @@ -8245,6 +8559,13 @@ WITH "warehouse_2" AS ( FROM "date_dim" AS "date_dim" WHERE "date_dim"."d_year" = 1998 +), "ship_mode_2" AS ( + SELECT + "ship_mode"."sm_ship_mode_sk" AS "sm_ship_mode_sk", + "ship_mode"."sm_carrier" AS "sm_carrier" + FROM "ship_mode" AS "ship_mode" + WHERE + "ship_mode"."sm_carrier" IN ('ZOUROS', 'ZHOU') ), "time_dim_2" AS ( SELECT "time_dim"."t_time_sk" AS "t_time_sk", @@ -8252,13 +8573,16 @@ WITH "warehouse_2" AS ( FROM "time_dim" AS "time_dim" WHERE "time_dim"."t_time" <= 36049 AND "time_dim"."t_time" >= 7249 -), "ship_mode_2" AS ( +), "warehouse_2" AS ( SELECT - "ship_mode"."sm_ship_mode_sk" AS "sm_ship_mode_sk", - "ship_mode"."sm_carrier" AS "sm_carrier" - FROM "ship_mode" AS "ship_mode" - WHERE - "ship_mode"."sm_carrier" IN ('ZOUROS', 'ZHOU') + "warehouse"."w_warehouse_sk" AS "w_warehouse_sk", + "warehouse"."w_warehouse_name" AS "w_warehouse_name", + "warehouse"."w_warehouse_sq_ft" AS "w_warehouse_sq_ft", + "warehouse"."w_city" AS "w_city", + "warehouse"."w_county" AS "w_county", + "warehouse"."w_state" AS "w_state", + "warehouse"."w_country" AS "w_country" + FROM "warehouse" AS "warehouse" ), "cte" AS ( SELECT "warehouse"."w_warehouse_name" AS "w_warehouse_name", @@ -8438,14 +8762,14 @@ WITH "warehouse_2" AS ( END ) AS "dec_net" FROM "web_sales" AS "web_sales" - JOIN "warehouse_2" AS "warehouse" - ON "web_sales"."ws_warehouse_sk" = "warehouse"."w_warehouse_sk" JOIN "date_dim_2" AS "date_dim" ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" - JOIN "time_dim_2" AS "time_dim" - ON "web_sales"."ws_sold_time_sk" = "time_dim"."t_time_sk" JOIN "ship_mode_2" AS "ship_mode" ON "web_sales"."ws_ship_mode_sk" = "ship_mode"."sm_ship_mode_sk" + JOIN "time_dim_2" AS "time_dim" + ON "web_sales"."ws_sold_time_sk" = "time_dim"."t_time_sk" + JOIN "warehouse_2" AS "warehouse" + ON "web_sales"."ws_warehouse_sk" = "warehouse"."w_warehouse_sk" GROUP BY "warehouse"."w_warehouse_name", "warehouse"."w_warehouse_sq_ft", @@ -8633,14 +8957,14 @@ WITH "warehouse_2" AS ( END ) AS "dec_net" FROM "catalog_sales" AS "catalog_sales" - JOIN "warehouse_2" AS "warehouse" - ON "catalog_sales"."cs_warehouse_sk" = "warehouse"."w_warehouse_sk" JOIN "date_dim_2" AS "date_dim" ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" - JOIN "time_dim_2" AS "time_dim" - ON "catalog_sales"."cs_sold_time_sk" = "time_dim"."t_time_sk" JOIN "ship_mode_2" AS "ship_mode" ON "catalog_sales"."cs_ship_mode_sk" = "ship_mode"."sm_ship_mode_sk" + JOIN "time_dim_2" AS "time_dim" + ON "catalog_sales"."cs_sold_time_sk" = "time_dim"."t_time_sk" + JOIN "warehouse_2" AS "warehouse" + ON "catalog_sales"."cs_warehouse_sk" = "warehouse"."w_warehouse_sk" GROUP BY "warehouse"."w_warehouse_name", "warehouse"."w_warehouse_sq_ft", @@ -8840,10 +9164,10 @@ WITH "dw1" AS ( ON "date_dim"."d_month_seq" <= 1192 AND "date_dim"."d_month_seq" >= 1181 AND "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" - JOIN "store" AS "store" - ON "store_sales"."ss_store_sk" = "store"."s_store_sk" JOIN "item" AS "item" ON "store_sales"."ss_item_sk" = "item"."i_item_sk" + JOIN "store" AS "store" + ON "store_sales"."ss_store_sk" = "store"."s_store_sk" GROUP BY ROLLUP ( "item"."i_category", @@ -8939,12 +9263,7 @@ WHERE ss_customer_sk = c_customer_sk ORDER BY c_last_name, ss_ticket_number LIMIT 100; -WITH "customer_address_2" AS ( - SELECT - "customer_address"."ca_address_sk" AS "ca_address_sk", - "customer_address"."ca_city" AS "ca_city" - FROM "customer_address" AS "customer_address" -), "dn" AS ( +WITH "dn" AS ( SELECT "store_sales"."ss_ticket_number" AS "ss_ticket_number", "store_sales"."ss_customer_sk" AS "ss_customer_sk", @@ -8953,22 +9272,22 @@ WITH "customer_address_2" AS ( SUM("store_sales"."ss_ext_list_price") AS "list_price", SUM("store_sales"."ss_ext_tax") AS "extended_tax" FROM "store_sales" AS "store_sales" + JOIN "customer_address" AS "customer_address" + ON "store_sales"."ss_addr_sk" = "customer_address"."ca_address_sk" JOIN "date_dim" AS "date_dim" ON "date_dim"."d_dom" <= 2 AND "date_dim"."d_dom" >= 1 AND "date_dim"."d_year" IN (1998, 1999, 2000) AND "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" - JOIN "store" AS "store" - ON "store"."s_city" IN ('Fairview', 'Midway') - AND "store_sales"."ss_store_sk" = "store"."s_store_sk" JOIN "household_demographics" AS "household_demographics" ON ( "household_demographics"."hd_dep_count" = 8 OR "household_demographics"."hd_vehicle_count" = 3 ) AND "store_sales"."ss_hdemo_sk" = "household_demographics"."hd_demo_sk" - JOIN "customer_address_2" AS "customer_address" - ON "store_sales"."ss_addr_sk" = "customer_address"."ca_address_sk" + JOIN "store" AS "store" + ON "store"."s_city" IN ('Fairview', 'Midway') + AND "store_sales"."ss_store_sk" = "store"."s_store_sk" GROUP BY "store_sales"."ss_ticket_number", "store_sales"."ss_customer_sk", @@ -8985,11 +9304,11 @@ SELECT "dn"."extended_tax" AS "extended_tax", "dn"."list_price" AS "list_price" FROM "dn" AS "dn" -JOIN "customer_address_2" AS "current_addr" - ON "current_addr"."ca_city" <> "dn"."bought_city" JOIN "customer" AS "customer" - ON "customer"."c_current_addr_sk" = "current_addr"."ca_address_sk" - AND "dn"."ss_customer_sk" = "customer"."c_customer_sk" + ON "dn"."ss_customer_sk" = "customer"."c_customer_sk" +JOIN "customer_address" AS "current_addr" + ON "current_addr"."ca_city" <> "dn"."bought_city" + AND "customer"."c_current_addr_sk" = "current_addr"."ca_address_sk" ORDER BY "c_last_name", "ss_ticket_number" @@ -9111,18 +9430,18 @@ SELECT COUNT(*) AS "cnt2", "customer_demographics"."cd_credit_rating" AS "cd_credit_rating", COUNT(*) AS "cnt3" -FROM "customer" AS "customer" -JOIN "customer_address" AS "customer_address" - ON "customer"."c_current_addr_sk" = "customer_address"."ca_address_sk" - AND "customer_address"."ca_state" IN ('KS', 'AZ', 'NE') -JOIN "customer_demographics" AS "customer_demographics" - ON "customer_demographics"."cd_demo_sk" = "customer"."c_current_cdemo_sk" +FROM "customer" AS "c" LEFT JOIN "_u_0" AS "_u_0" - ON "customer"."c_customer_sk" = "_u_0"."_u_1" + ON "c"."c_customer_sk" = "_u_0"."_u_1" LEFT JOIN "_u_2" AS "_u_2" - ON "customer"."c_customer_sk" = "_u_2"."_u_3" + ON "c"."c_customer_sk" = "_u_2"."_u_3" LEFT JOIN "_u_4" AS "_u_4" - ON "customer"."c_customer_sk" = "_u_4"."_u_5" + ON "c"."c_customer_sk" = "_u_4"."_u_5" +JOIN "customer_address" AS "ca" + ON "c"."c_current_addr_sk" = "ca"."ca_address_sk" + AND "ca"."ca_state" IN ('KS', 'AZ', 'NE') +JOIN "customer_demographics" AS "customer_demographics" + ON "customer_demographics"."cd_demo_sk" = "c"."c_current_cdemo_sk" WHERE "_u_2"."_u_3" IS NULL AND "_u_4"."_u_5" IS NULL AND NOT "_u_0"."_u_1" IS NULL GROUP BY @@ -9187,22 +9506,17 @@ WITH "store_sales_2" AS ( "store_sales"."ss_store_sk" AS "ss_store_sk", "store_sales"."ss_net_profit" AS "ss_net_profit" FROM "store_sales" AS "store_sales" -), "d1" AS ( - SELECT - "date_dim"."d_date_sk" AS "d_date_sk", - "date_dim"."d_month_seq" AS "d_month_seq" - FROM "date_dim" AS "date_dim" - WHERE - "date_dim"."d_month_seq" <= 1211 AND "date_dim"."d_month_seq" >= 1200 ), "tmp1" AS ( SELECT "store"."s_state" AS "s_state", RANK() OVER (PARTITION BY "store"."s_state" ORDER BY SUM("store_sales"."ss_net_profit") DESC) AS "ranking" FROM "store_sales_2" AS "store_sales" + JOIN "date_dim" AS "date_dim" + ON "date_dim"."d_date_sk" = "store_sales"."ss_sold_date_sk" + AND "date_dim"."d_month_seq" <= 1211 + AND "date_dim"."d_month_seq" >= 1200 JOIN "store" AS "store" ON "store"."s_store_sk" = "store_sales"."ss_store_sk" - JOIN "d1" AS "date_dim" - ON "date_dim"."d_date_sk" = "store_sales"."ss_sold_date_sk" GROUP BY "store"."s_state" ), "_u_0" AS ( @@ -9221,8 +9535,10 @@ SELECT GROUPING("store"."s_state") + GROUPING("store"."s_county") AS "lochierarchy", RANK() OVER (PARTITION BY GROUPING("store"."s_state") + GROUPING("store"."s_county"), CASE WHEN GROUPING("store"."s_county") = 0 THEN "store"."s_state" END ORDER BY SUM("store_sales"."ss_net_profit") DESC) AS "rank_within_parent" FROM "store_sales_2" AS "store_sales" -JOIN "d1" AS "d1" +JOIN "date_dim" AS "d1" ON "d1"."d_date_sk" = "store_sales"."ss_sold_date_sk" + AND "d1"."d_month_seq" <= 1211 + AND "d1"."d_month_seq" >= 1200 JOIN "store" AS "store" ON "store"."s_store_sk" = "store_sales"."ss_store_sk" LEFT JOIN "_u_0" AS "_u_0" @@ -9408,42 +9724,42 @@ LIMIT 100; SELECT "item"."i_item_desc" AS "i_item_desc", "warehouse"."w_warehouse_name" AS "w_warehouse_name", - "date_dim_2"."d_week_seq" AS "d_week_seq", + "d1"."d_week_seq" AS "d_week_seq", SUM(CASE WHEN "promotion"."p_promo_sk" IS NULL THEN 1 ELSE 0 END) AS "no_promo", SUM(CASE WHEN NOT "promotion"."p_promo_sk" IS NULL THEN 1 ELSE 0 END) AS "promo", COUNT(*) AS "total_cnt" FROM "catalog_sales" AS "catalog_sales" -JOIN "inventory" AS "inventory" - ON "catalog_sales"."cs_item_sk" = "inventory"."inv_item_sk" - AND "inventory"."inv_quantity_on_hand" < "catalog_sales"."cs_quantity" -JOIN "warehouse" AS "warehouse" - ON "warehouse"."w_warehouse_sk" = "inventory"."inv_warehouse_sk" -JOIN "item" AS "item" - ON "item"."i_item_sk" = "catalog_sales"."cs_item_sk" +LEFT JOIN "catalog_returns" AS "catalog_returns" + ON "catalog_returns"."cr_item_sk" = "catalog_sales"."cs_item_sk" + AND "catalog_returns"."cr_order_number" = "catalog_sales"."cs_order_number" JOIN "customer_demographics" AS "customer_demographics" ON "catalog_sales"."cs_bill_cdemo_sk" = "customer_demographics"."cd_demo_sk" AND "customer_demographics"."cd_marital_status" = 'M' +JOIN "date_dim" AS "d3" + ON "catalog_sales"."cs_ship_date_sk" = "d3"."d_date_sk" JOIN "household_demographics" AS "household_demographics" ON "catalog_sales"."cs_bill_hdemo_sk" = "household_demographics"."hd_demo_sk" AND "household_demographics"."hd_buy_potential" = '501-1000' -JOIN "date_dim" AS "date_dim" - ON "inventory"."inv_date_sk" = "date_dim"."d_date_sk" -JOIN "date_dim" AS "date_dim_2" - ON "catalog_sales"."cs_sold_date_sk" = "date_dim_2"."d_date_sk" - AND "date_dim_2"."d_week_seq" = "date_dim"."d_week_seq" - AND "date_dim_2"."d_year" = 2002 -JOIN "date_dim" AS "date_dim_3" - ON "catalog_sales"."cs_ship_date_sk" = "date_dim_3"."d_date_sk" - AND "date_dim_3"."d_date" > CONCAT("date_dim_2"."d_date", INTERVAL '5' day) +JOIN "inventory" AS "inventory" + ON "catalog_sales"."cs_item_sk" = "inventory"."inv_item_sk" + AND "inventory"."inv_quantity_on_hand" < "catalog_sales"."cs_quantity" +JOIN "item" AS "item" + ON "item"."i_item_sk" = "catalog_sales"."cs_item_sk" LEFT JOIN "promotion" AS "promotion" ON "catalog_sales"."cs_promo_sk" = "promotion"."p_promo_sk" -LEFT JOIN "catalog_returns" AS "catalog_returns" - ON "catalog_returns"."cr_item_sk" = "catalog_sales"."cs_item_sk" - AND "catalog_returns"."cr_order_number" = "catalog_sales"."cs_order_number" +JOIN "date_dim" AS "d2" + ON "inventory"."inv_date_sk" = "d2"."d_date_sk" +JOIN "warehouse" AS "warehouse" + ON "warehouse"."w_warehouse_sk" = "inventory"."inv_warehouse_sk" +JOIN "date_dim" AS "d1" + ON "catalog_sales"."cs_sold_date_sk" = "d1"."d_date_sk" + AND "d1"."d_week_seq" = "d2"."d_week_seq" + AND "d1"."d_year" = 2002 + AND "d3"."d_date" > CONCAT("d1"."d_date", INTERVAL '5' day) GROUP BY "item"."i_item_desc", "warehouse"."w_warehouse_name", - "date_dim_2"."d_week_seq" + "d1"."d_week_seq" ORDER BY "total_cnt" DESC, "i_item_desc", @@ -9503,9 +9819,6 @@ WITH "dj" AS ( AND "date_dim"."d_dom" >= 1 AND "date_dim"."d_year" IN (2000, 2001, 2002) AND "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" - JOIN "store" AS "store" - ON "store"."s_county" IN ('Williamson County', 'Williamson County', 'Williamson County', 'Williamson County') - AND "store_sales"."ss_store_sk" = "store"."s_store_sk" JOIN "household_demographics" AS "household_demographics" ON ( "household_demographics"."hd_buy_potential" = '0-500' @@ -9518,6 +9831,9 @@ WITH "dj" AS ( THEN "household_demographics"."hd_dep_count" / "household_demographics"."hd_vehicle_count" ELSE NULL END > 1 + JOIN "store" AS "store" + ON "store"."s_county" IN ('Williamson County', 'Williamson County', 'Williamson County', 'Williamson County') + AND "store_sales"."ss_store_sk" = "store"."s_store_sk" GROUP BY "store_sales"."ss_ticket_number", "store_sales"."ss_customer_sk" @@ -9683,19 +9999,19 @@ SELECT "t_s_secyear"."customer_first_name" AS "customer_first_name", "t_s_secyear"."customer_last_name" AS "customer_last_name" FROM "year_total" AS "t_s_firstyear" -JOIN "year_total" AS "t_s_secyear" - ON "t_s_secyear"."customer_id" = "t_s_firstyear"."customer_id" - AND "t_s_secyear"."sale_type" = 's' - AND "t_s_secyear"."year1" = 2000 -JOIN "year_total" AS "t_w_secyear" - ON "t_s_firstyear"."customer_id" = "t_w_secyear"."customer_id" - AND "t_w_secyear"."sale_type" = 'w' - AND "t_w_secyear"."year1" = 2000 JOIN "year_total" AS "t_w_firstyear" ON "t_s_firstyear"."customer_id" = "t_w_firstyear"."customer_id" AND "t_w_firstyear"."sale_type" = 'w' AND "t_w_firstyear"."year1" = 1999 AND "t_w_firstyear"."year_total" > 0 +JOIN "year_total" AS "t_w_secyear" + ON "t_s_firstyear"."customer_id" = "t_w_secyear"."customer_id" + AND "t_w_secyear"."sale_type" = 'w' + AND "t_w_secyear"."year1" = 2000 +JOIN "year_total" AS "t_s_secyear" + ON "t_s_secyear"."customer_id" = "t_s_firstyear"."customer_id" + AND "t_s_secyear"."sale_type" = 's' + AND "t_s_secyear"."year1" = 2000 AND CASE WHEN "t_w_firstyear"."year_total" > 0 THEN "t_w_secyear"."year_total" / "t_w_firstyear"."year_total" @@ -9810,7 +10126,12 @@ WHERE curr_yr.i_brand_id = prev_yr.i_brand_id < 0.9 ORDER BY sales_cnt_diff LIMIT 100; -WITH "item_2" AS ( +WITH "date_dim_2" AS ( + SELECT + "date_dim"."d_date_sk" AS "d_date_sk", + "date_dim"."d_year" AS "d_year" + FROM "date_dim" AS "date_dim" +), "item_2" AS ( SELECT "item"."i_item_sk" AS "i_item_sk", "item"."i_brand_id" AS "i_brand_id", @@ -9821,11 +10142,6 @@ WITH "item_2" AS ( FROM "item" AS "item" WHERE "item"."i_category" = 'Men' -), "date_dim_2" AS ( - SELECT - "date_dim"."d_date_sk" AS "d_date_sk", - "date_dim"."d_year" AS "d_year" - FROM "date_dim" AS "date_dim" ), "cte_4" AS ( SELECT "date_dim"."d_year" AS "d_year", @@ -9836,10 +10152,10 @@ WITH "item_2" AS ( "store_sales"."ss_quantity" - COALESCE("store_returns"."sr_return_quantity", 0) AS "sales_cnt", "store_sales"."ss_ext_sales_price" - COALESCE("store_returns"."sr_return_amt", 0.0) AS "sales_amt" FROM "store_sales" AS "store_sales" - JOIN "item_2" AS "item" - ON "item"."i_item_sk" = "store_sales"."ss_item_sk" JOIN "date_dim_2" AS "date_dim" ON "date_dim"."d_date_sk" = "store_sales"."ss_sold_date_sk" + JOIN "item_2" AS "item" + ON "item"."i_item_sk" = "store_sales"."ss_item_sk" LEFT JOIN "store_returns" AS "store_returns" ON "store_sales"."ss_item_sk" = "store_returns"."sr_item_sk" AND "store_sales"."ss_ticket_number" = "store_returns"."sr_ticket_number" @@ -9853,10 +10169,10 @@ WITH "item_2" AS ( "web_sales"."ws_quantity" - COALESCE("web_returns"."wr_return_quantity", 0) AS "sales_cnt", "web_sales"."ws_ext_sales_price" - COALESCE("web_returns"."wr_return_amt", 0.0) AS "sales_amt" FROM "web_sales" AS "web_sales" - JOIN "item_2" AS "item" - ON "item"."i_item_sk" = "web_sales"."ws_item_sk" JOIN "date_dim_2" AS "date_dim" ON "date_dim"."d_date_sk" = "web_sales"."ws_sold_date_sk" + JOIN "item_2" AS "item" + ON "item"."i_item_sk" = "web_sales"."ws_item_sk" LEFT JOIN "web_returns" AS "web_returns" ON "web_sales"."ws_item_sk" = "web_returns"."wr_item_sk" AND "web_sales"."ws_order_number" = "web_returns"."wr_order_number" @@ -9870,13 +10186,13 @@ WITH "item_2" AS ( "catalog_sales"."cs_quantity" - COALESCE("catalog_returns"."cr_return_quantity", 0) AS "sales_cnt", "catalog_sales"."cs_ext_sales_price" - COALESCE("catalog_returns"."cr_return_amount", 0.0) AS "sales_amt" FROM "catalog_sales" AS "catalog_sales" - JOIN "item_2" AS "item" - ON "item"."i_item_sk" = "catalog_sales"."cs_item_sk" - JOIN "date_dim_2" AS "date_dim" - ON "date_dim"."d_date_sk" = "catalog_sales"."cs_sold_date_sk" LEFT JOIN "catalog_returns" AS "catalog_returns" ON "catalog_sales"."cs_item_sk" = "catalog_returns"."cr_item_sk" AND "catalog_sales"."cs_order_number" = "catalog_returns"."cr_order_number" + JOIN "date_dim_2" AS "date_dim" + ON "date_dim"."d_date_sk" = "catalog_sales"."cs_sold_date_sk" + JOIN "item_2" AS "item" + ON "item"."i_item_sk" = "catalog_sales"."cs_item_sk" UNION SELECT "cte_4"."d_year" AS "d_year", @@ -9988,17 +10304,17 @@ ORDER BY channel, d_qoy, i_category LIMIT 100; -WITH "item_2" AS ( - SELECT - "item"."i_item_sk" AS "i_item_sk", - "item"."i_category" AS "i_category" - FROM "item" AS "item" -), "date_dim_2" AS ( +WITH "date_dim_2" AS ( SELECT "date_dim"."d_date_sk" AS "d_date_sk", "date_dim"."d_year" AS "d_year", "date_dim"."d_qoy" AS "d_qoy" FROM "date_dim" AS "date_dim" +), "item_2" AS ( + SELECT + "item"."i_item_sk" AS "i_item_sk", + "item"."i_category" AS "i_category" + FROM "item" AS "item" ), "cte_4" AS ( SELECT 'web' AS "channel", @@ -10008,10 +10324,10 @@ WITH "item_2" AS ( "item"."i_category" AS "i_category", "web_sales"."ws_ext_sales_price" AS "ext_sales_price" FROM "web_sales" AS "web_sales" - JOIN "item_2" AS "item" - ON "web_sales"."ws_item_sk" = "item"."i_item_sk" JOIN "date_dim_2" AS "date_dim" ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" + JOIN "item_2" AS "item" + ON "web_sales"."ws_item_sk" = "item"."i_item_sk" WHERE "web_sales"."ws_ship_hdemo_sk" IS NULL UNION ALL @@ -10023,10 +10339,10 @@ WITH "item_2" AS ( "item"."i_category" AS "i_category", "catalog_sales"."cs_ext_sales_price" AS "ext_sales_price" FROM "catalog_sales" AS "catalog_sales" - JOIN "item_2" AS "item" - ON "catalog_sales"."cs_item_sk" = "item"."i_item_sk" JOIN "date_dim_2" AS "date_dim" ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" + JOIN "item_2" AS "item" + ON "catalog_sales"."cs_item_sk" = "item"."i_item_sk" WHERE "catalog_sales"."cs_warehouse_sk" IS NULL ), "foo" AS ( @@ -10038,10 +10354,10 @@ WITH "item_2" AS ( "item"."i_category" AS "i_category", "store_sales"."ss_ext_sales_price" AS "ext_sales_price" FROM "store_sales" AS "store_sales" - JOIN "item_2" AS "item" - ON "store_sales"."ss_item_sk" = "item"."i_item_sk" JOIN "date_dim_2" AS "date_dim" ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" + JOIN "item_2" AS "item" + ON "store_sales"."ss_item_sk" = "item"."i_item_sk" WHERE "store_sales"."ss_hdemo_sk" IS NULL UNION ALL @@ -10426,11 +10742,11 @@ WITH "date_dim_2" AS ( SUM("web_sales"."ws_wholesale_cost") AS "ws_wc", SUM("web_sales"."ws_sales_price") AS "ws_sp" FROM "web_sales" AS "web_sales" + JOIN "date_dim_2" AS "date_dim" + ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" LEFT JOIN "web_returns" AS "web_returns" ON "web_returns"."wr_order_number" = "web_sales"."ws_order_number" AND "web_sales"."ws_item_sk" = "web_returns"."wr_item_sk" - JOIN "date_dim_2" AS "date_dim" - ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" WHERE "web_returns"."wr_order_number" IS NULL GROUP BY @@ -10466,11 +10782,11 @@ WITH "date_dim_2" AS ( SUM("store_sales"."ss_wholesale_cost") AS "ss_wc", SUM("store_sales"."ss_sales_price") AS "ss_sp" FROM "store_sales" AS "store_sales" + JOIN "date_dim_2" AS "date_dim" + ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" LEFT JOIN "store_returns" AS "store_returns" ON "store_returns"."sr_ticket_number" = "store_sales"."ss_ticket_number" AND "store_sales"."ss_item_sk" = "store_returns"."sr_item_sk" - JOIN "date_dim_2" AS "date_dim" - ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" WHERE "store_returns"."sr_ticket_number" IS NULL GROUP BY @@ -10488,14 +10804,14 @@ SELECT COALESCE("ws"."ws_wc", 0) + COALESCE("cs"."cs_wc", 0) AS "other_chan_wholesale_cost", COALESCE("ws"."ws_sp", 0) + COALESCE("cs"."cs_sp", 0) AS "other_chan_sales_price" FROM "ss" -LEFT JOIN "ws" - ON "ws"."ws_customer_sk" = "ss"."ss_customer_sk" - AND "ws"."ws_item_sk" = "ss"."ss_item_sk" - AND "ws"."ws_sold_year" = "ss"."ss_sold_year" LEFT JOIN "cs" ON "cs"."cs_customer_sk" = "ss"."ss_customer_sk" AND "cs"."cs_item_sk" = "cs"."cs_item_sk" AND "cs"."cs_sold_year" = "ss"."ss_sold_year" +LEFT JOIN "ws" + ON "ws"."ws_customer_sk" = "ss"."ss_customer_sk" + AND "ws"."ws_item_sk" = "ss"."ss_item_sk" + AND "ws"."ws_sold_year" = "ss"."ss_sold_year" WHERE "ss"."ss_sold_year" = 1999 AND COALESCE("cs"."cs_qty", 0) > 0 @@ -10557,19 +10873,19 @@ WITH "ms" AS ( SUM("store_sales"."ss_net_profit") AS "profit" FROM "store_sales" AS "store_sales" JOIN "date_dim" AS "date_dim" - ON "date_dim"."d_dow" = 1 - AND "date_dim"."d_year" IN (2000, 2001, 2002) - AND "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" - JOIN "store" AS "store" - ON "store"."s_number_employees" <= 295 - AND "store"."s_number_employees" >= 200 - AND "store_sales"."ss_store_sk" = "store"."s_store_sk" + ON "date_dim"."d_dow" = 1 + AND "date_dim"."d_year" IN (2000, 2001, 2002) + AND "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" JOIN "household_demographics" AS "household_demographics" ON ( "household_demographics"."hd_dep_count" = 8 OR "household_demographics"."hd_vehicle_count" > 4 ) AND "store_sales"."ss_hdemo_sk" = "household_demographics"."hd_demo_sk" + JOIN "store" AS "store" + ON "store"."s_number_employees" <= 295 + AND "store"."s_number_employees" >= 200 + AND "store_sales"."ss_store_sk" = "store"."s_store_sk" GROUP BY "store_sales"."ss_ticket_number", "store_sales"."ss_customer_sk", @@ -10729,17 +11045,17 @@ WITH "date_dim_2" AS ( SUM(COALESCE("store_returns"."sr_return_amt", 0)) AS "returns1", SUM("store_sales"."ss_net_profit" - COALESCE("store_returns"."sr_net_loss", 0)) AS "profit" FROM "store_sales" AS "store_sales" - LEFT JOIN "store_returns" AS "store_returns" - ON "store_sales"."ss_item_sk" = "store_returns"."sr_item_sk" - AND "store_sales"."ss_ticket_number" = "store_returns"."sr_ticket_number" JOIN "date_dim_2" AS "date_dim" ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" - JOIN "store" AS "store" - ON "store_sales"."ss_store_sk" = "store"."s_store_sk" JOIN "item_2" AS "item" ON "store_sales"."ss_item_sk" = "item"."i_item_sk" JOIN "promotion_2" AS "promotion" ON "store_sales"."ss_promo_sk" = "promotion"."p_promo_sk" + JOIN "store" AS "store" + ON "store_sales"."ss_store_sk" = "store"."s_store_sk" + LEFT JOIN "store_returns" AS "store_returns" + ON "store_sales"."ss_item_sk" = "store_returns"."sr_item_sk" + AND "store_sales"."ss_ticket_number" = "store_returns"."sr_ticket_number" GROUP BY "store"."s_store_id" ), "csr" AS ( @@ -10749,13 +11065,13 @@ WITH "date_dim_2" AS ( SUM(COALESCE("catalog_returns"."cr_return_amount", 0)) AS "returns1", SUM("catalog_sales"."cs_net_profit" - COALESCE("catalog_returns"."cr_net_loss", 0)) AS "profit" FROM "catalog_sales" AS "catalog_sales" + JOIN "catalog_page" AS "catalog_page" + ON "catalog_sales"."cs_catalog_page_sk" = "catalog_page"."cp_catalog_page_sk" LEFT JOIN "catalog_returns" AS "catalog_returns" ON "catalog_sales"."cs_item_sk" = "catalog_returns"."cr_item_sk" AND "catalog_sales"."cs_order_number" = "catalog_returns"."cr_order_number" JOIN "date_dim_2" AS "date_dim" ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" - JOIN "catalog_page" AS "catalog_page" - ON "catalog_sales"."cs_catalog_page_sk" = "catalog_page"."cp_catalog_page_sk" JOIN "item_2" AS "item" ON "catalog_sales"."cs_item_sk" = "item"."i_item_sk" JOIN "promotion_2" AS "promotion" @@ -10769,17 +11085,17 @@ WITH "date_dim_2" AS ( SUM(COALESCE("web_returns"."wr_return_amt", 0)) AS "returns1", SUM("web_sales"."ws_net_profit" - COALESCE("web_returns"."wr_net_loss", 0)) AS "profit" FROM "web_sales" AS "web_sales" - LEFT JOIN "web_returns" AS "web_returns" - ON "web_sales"."ws_item_sk" = "web_returns"."wr_item_sk" - AND "web_sales"."ws_order_number" = "web_returns"."wr_order_number" JOIN "date_dim_2" AS "date_dim" ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" - JOIN "web_site" AS "web_site" - ON "web_sales"."ws_web_site_sk" = "web_site"."web_site_sk" JOIN "item_2" AS "item" ON "web_sales"."ws_item_sk" = "item"."i_item_sk" JOIN "promotion_2" AS "promotion" ON "web_sales"."ws_promo_sk" = "promotion"."p_promo_sk" + LEFT JOIN "web_returns" AS "web_returns" + ON "web_sales"."ws_item_sk" = "web_returns"."wr_item_sk" + AND "web_sales"."ws_order_number" = "web_returns"."wr_order_number" + JOIN "web_site" AS "web_site" + ON "web_sales"."ws_web_site_sk" = "web_site"."web_site_sk" GROUP BY "web_site"."web_site_id" ), "cte_4" AS ( @@ -10896,11 +11212,11 @@ WITH "customer_total_return" AS ( "customer_address"."ca_state" AS "ctr_state", SUM("catalog_returns"."cr_return_amt_inc_tax") AS "ctr_total_return" FROM "catalog_returns" AS "catalog_returns" + JOIN "customer_address" AS "customer_address" + ON "catalog_returns"."cr_returning_addr_sk" = "customer_address"."ca_address_sk" JOIN "date_dim" AS "date_dim" ON "catalog_returns"."cr_returned_date_sk" = "date_dim"."d_date_sk" AND "date_dim"."d_year" = 1999 - JOIN "customer_address" AS "customer_address" - ON "catalog_returns"."cr_returning_addr_sk" = "customer_address"."ca_address_sk" GROUP BY "catalog_returns"."cr_returning_customer_sk", "customer_address"."ca_state" @@ -10930,13 +11246,13 @@ SELECT "customer_address"."ca_location_type" AS "ca_location_type", "ctr1"."ctr_total_return" AS "ctr_total_return" FROM "customer_total_return" AS "ctr1" +LEFT JOIN "_u_0" AS "_u_0" + ON "ctr1"."ctr_state" = "_u_0"."_u_1" JOIN "customer" AS "customer" ON "ctr1"."ctr_customer_sk" = "customer"."c_customer_sk" JOIN "customer_address" AS "customer_address" ON "customer_address"."ca_address_sk" = "customer"."c_current_addr_sk" AND "customer_address"."ca_state" = 'TX' -LEFT JOIN "_u_0" AS "_u_0" - ON "ctr1"."ctr_state" = "_u_0"."_u_1" WHERE "ctr1"."ctr_total_return" > "_u_0"."_col_0" ORDER BY @@ -10992,12 +11308,12 @@ JOIN "inventory" AS "inventory" ON "inventory"."inv_item_sk" = "item"."i_item_sk" AND "inventory"."inv_quantity_on_hand" <= 500 AND "inventory"."inv_quantity_on_hand" >= 100 +JOIN "store_sales" AS "store_sales" + ON "store_sales"."ss_item_sk" = "item"."i_item_sk" JOIN "date_dim" AS "date_dim" ON "date_dim"."d_date_sk" = "inventory"."inv_date_sk" AND CAST("date_dim"."d_date" AS DATE) <= CAST('1998-06-26' AS DATE) AND CAST("date_dim"."d_date" AS DATE) >= CAST('1998-04-27' AS DATE) -JOIN "store_sales" AS "store_sales" - ON "store_sales"."ss_item_sk" = "item"."i_item_sk" WHERE "item"."i_current_price" <= 93 AND "item"."i_current_price" >= 63 @@ -11087,16 +11403,16 @@ WHERE sr_items.item_id = cr_items.item_id ORDER BY sr_items.item_id, sr_item_qty LIMIT 100; -WITH "item_2" AS ( - SELECT - "item"."i_item_sk" AS "i_item_sk", - "item"."i_item_id" AS "i_item_id" - FROM "item" AS "item" -), "date_dim_2" AS ( +WITH "date_dim_2" AS ( SELECT "date_dim"."d_date_sk" AS "d_date_sk", "date_dim"."d_date" AS "d_date" FROM "date_dim" AS "date_dim" +), "item_2" AS ( + SELECT + "item"."i_item_sk" AS "i_item_sk", + "item"."i_item_id" AS "i_item_id" + FROM "item" AS "item" ), "_u_0" AS ( SELECT "date_dim"."d_week_seq" AS "d_week_seq" @@ -11120,10 +11436,10 @@ WITH "item_2" AS ( "item"."i_item_id" AS "item_id", SUM("store_returns"."sr_return_quantity") AS "sr_item_qty" FROM "store_returns" AS "store_returns" - JOIN "item_2" AS "item" - ON "store_returns"."sr_item_sk" = "item"."i_item_sk" JOIN "date_dim_2" AS "date_dim" ON "store_returns"."sr_returned_date_sk" = "date_dim"."d_date_sk" + JOIN "item_2" AS "item" + ON "store_returns"."sr_item_sk" = "item"."i_item_sk" LEFT JOIN "_u_1" AS "_u_1" ON "date_dim"."d_date" = "_u_1"."d_date" WHERE @@ -11145,10 +11461,10 @@ WITH "item_2" AS ( "item"."i_item_id" AS "item_id", SUM("catalog_returns"."cr_return_quantity") AS "cr_item_qty" FROM "catalog_returns" AS "catalog_returns" - JOIN "item_2" AS "item" - ON "catalog_returns"."cr_item_sk" = "item"."i_item_sk" JOIN "date_dim_2" AS "date_dim" ON "catalog_returns"."cr_returned_date_sk" = "date_dim"."d_date_sk" + JOIN "item_2" AS "item" + ON "catalog_returns"."cr_item_sk" = "item"."i_item_sk" LEFT JOIN "_u_3" AS "_u_3" ON "date_dim"."d_date" = "_u_3"."d_date" WHERE @@ -11170,10 +11486,10 @@ WITH "item_2" AS ( "item"."i_item_id" AS "item_id", SUM("web_returns"."wr_return_quantity") AS "wr_item_qty" FROM "web_returns" AS "web_returns" - JOIN "item_2" AS "item" - ON "web_returns"."wr_item_sk" = "item"."i_item_sk" JOIN "date_dim_2" AS "date_dim" ON "web_returns"."wr_returned_date_sk" = "date_dim"."d_date_sk" + JOIN "item_2" AS "item" + ON "web_returns"."wr_item_sk" = "item"."i_item_sk" LEFT JOIN "_u_5" AS "_u_5" ON "date_dim"."d_date" = "_u_5"."d_date" WHERE @@ -11306,27 +11622,46 @@ ORDER BY Substr(r_reason_desc, 1, 20), Avg(wr_refunded_cash), Avg(wr_fee) LIMIT 100; -WITH "cd2" AS ( - SELECT - "customer_demographics"."cd_demo_sk" AS "cd_demo_sk", - "customer_demographics"."cd_marital_status" AS "cd_marital_status", - "customer_demographics"."cd_education_status" AS "cd_education_status" - FROM "customer_demographics" AS "customer_demographics" -) SELECT SUBSTR("reason"."r_reason_desc", 1, 20) AS "_col_0", AVG("web_sales"."ws_quantity") AS "_col_1", AVG("web_returns"."wr_refunded_cash") AS "_col_2", AVG("web_returns"."wr_fee") AS "_col_3" FROM "web_sales" AS "web_sales" +JOIN "date_dim" AS "date_dim" + ON "date_dim"."d_year" = 2001 AND "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" +JOIN "web_page" AS "web_page" + ON "web_sales"."ws_web_page_sk" = "web_page"."wp_web_page_sk" JOIN "web_returns" AS "web_returns" ON "web_sales"."ws_item_sk" = "web_returns"."wr_item_sk" AND "web_sales"."ws_order_number" = "web_returns"."wr_order_number" -JOIN "web_page" AS "web_page" - ON "web_sales"."ws_web_page_sk" = "web_page"."wp_web_page_sk" -JOIN "cd2" AS "cd2" +JOIN "customer_demographics" AS "cd2" ON "cd2"."cd_demo_sk" = "web_returns"."wr_returning_cdemo_sk" -JOIN "cd2" AS "cd1" +JOIN "customer_address" AS "customer_address" + ON "customer_address"."ca_address_sk" = "web_returns"."wr_refunded_addr_sk" + AND ( + ( + "customer_address"."ca_country" = 'United States' + AND "customer_address"."ca_state" IN ('FL', 'WI', 'KS') + AND "web_sales"."ws_net_profit" <= 250 + AND "web_sales"."ws_net_profit" >= 50 + ) + OR ( + "customer_address"."ca_country" = 'United States' + AND "customer_address"."ca_state" IN ('KY', 'ME', 'IL') + AND "web_sales"."ws_net_profit" <= 200 + AND "web_sales"."ws_net_profit" >= 100 + ) + OR ( + "customer_address"."ca_country" = 'United States' + AND "customer_address"."ca_state" IN ('OK', 'NE', 'MN') + AND "web_sales"."ws_net_profit" <= 300 + AND "web_sales"."ws_net_profit" >= 150 + ) + ) +JOIN "reason" AS "reason" + ON "reason"."r_reason_sk" = "web_returns"."wr_reason_sk" +JOIN "customer_demographics" AS "cd1" ON "cd1"."cd_demo_sk" = "web_returns"."wr_refunded_cdemo_sk" AND ( ( @@ -11354,32 +11689,6 @@ JOIN "cd2" AS "cd1" AND "web_sales"."ws_sales_price" >= 50.00 ) ) -JOIN "customer_address" AS "customer_address" - ON "customer_address"."ca_address_sk" = "web_returns"."wr_refunded_addr_sk" - AND ( - ( - "customer_address"."ca_country" = 'United States' - AND "customer_address"."ca_state" IN ('FL', 'WI', 'KS') - AND "web_sales"."ws_net_profit" <= 250 - AND "web_sales"."ws_net_profit" >= 50 - ) - OR ( - "customer_address"."ca_country" = 'United States' - AND "customer_address"."ca_state" IN ('KY', 'ME', 'IL') - AND "web_sales"."ws_net_profit" <= 200 - AND "web_sales"."ws_net_profit" >= 100 - ) - OR ( - "customer_address"."ca_country" = 'United States' - AND "customer_address"."ca_state" IN ('OK', 'NE', 'MN') - AND "web_sales"."ws_net_profit" <= 300 - AND "web_sales"."ws_net_profit" >= 150 - ) - ) -JOIN "date_dim" AS "date_dim" - ON "date_dim"."d_year" = 2001 AND "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" -JOIN "reason" AS "reason" - ON "reason"."r_reason_sk" = "web_returns"."wr_reason_sk" GROUP BY "reason"."r_reason_desc" ORDER BY @@ -11422,10 +11731,10 @@ SELECT GROUPING("item"."i_category") + GROUPING("item"."i_class") AS "lochierarchy", RANK() OVER (PARTITION BY GROUPING("item"."i_category") + GROUPING("item"."i_class"), CASE WHEN GROUPING("item"."i_class") = 0 THEN "item"."i_category" END ORDER BY SUM("web_sales"."ws_net_paid") DESC) AS "rank_within_parent" FROM "web_sales" AS "web_sales" -JOIN "date_dim" AS "date_dim" - ON "date_dim"."d_date_sk" = "web_sales"."ws_sold_date_sk" - AND "date_dim"."d_month_seq" <= 1194 - AND "date_dim"."d_month_seq" >= 1183 +JOIN "date_dim" AS "d1" + ON "d1"."d_date_sk" = "web_sales"."ws_sold_date_sk" + AND "d1"."d_month_seq" <= 1194 + AND "d1"."d_month_seq" >= 1183 JOIN "item" AS "item" ON "item"."i_item_sk" = "web_sales"."ws_item_sk" GROUP BY @@ -11462,7 +11771,13 @@ from ((select distinct c_last_name, c_first_name, d_date and d_month_seq between 1188 and 1188+11) ) cool_cust ; -WITH "date_dim_2" AS ( +WITH "customer_2" AS ( + SELECT + "customer"."c_customer_sk" AS "c_customer_sk", + "customer"."c_first_name" AS "c_first_name", + "customer"."c_last_name" AS "c_last_name" + FROM "customer" AS "customer" +), "date_dim_2" AS ( SELECT "date_dim"."d_date_sk" AS "d_date_sk", "date_dim"."d_date" AS "d_date", @@ -11470,42 +11785,36 @@ WITH "date_dim_2" AS ( FROM "date_dim" AS "date_dim" WHERE "date_dim"."d_month_seq" <= 1199 AND "date_dim"."d_month_seq" >= 1188 -), "customer_2" AS ( - SELECT - "customer"."c_customer_sk" AS "c_customer_sk", - "customer"."c_first_name" AS "c_first_name", - "customer"."c_last_name" AS "c_last_name" - FROM "customer" AS "customer" ), "cte" AS ( SELECT DISTINCT "customer"."c_last_name" AS "c_last_name", "customer"."c_first_name" AS "c_first_name", "date_dim"."d_date" AS "d_date" FROM "store_sales" AS "store_sales" - JOIN "date_dim_2" AS "date_dim" - ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" JOIN "customer_2" AS "customer" ON "store_sales"."ss_customer_sk" = "customer"."c_customer_sk" + JOIN "date_dim_2" AS "date_dim" + ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" ), "cte_2" AS ( SELECT DISTINCT "customer"."c_last_name" AS "c_last_name", "customer"."c_first_name" AS "c_first_name", "date_dim"."d_date" AS "d_date" FROM "catalog_sales" AS "catalog_sales" - JOIN "date_dim_2" AS "date_dim" - ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" JOIN "customer_2" AS "customer" ON "catalog_sales"."cs_bill_customer_sk" = "customer"."c_customer_sk" + JOIN "date_dim_2" AS "date_dim" + ON "catalog_sales"."cs_sold_date_sk" = "date_dim"."d_date_sk" ), "cte_3" AS ( SELECT DISTINCT "customer"."c_last_name" AS "c_last_name", "customer"."c_first_name" AS "c_first_name", "date_dim"."d_date" AS "d_date" FROM "web_sales" AS "web_sales" - JOIN "date_dim_2" AS "date_dim" - ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" JOIN "customer_2" AS "customer" ON "web_sales"."ws_bill_customer_sk" = "customer"."c_customer_sk" + JOIN "date_dim_2" AS "date_dim" + ON "web_sales"."ws_sold_date_sk" = "date_dim"."d_date_sk" ), "cte_4" AS ( ( SELECT @@ -11676,96 +11985,96 @@ WITH "store_sales_2" AS ( FROM "store_sales_2" AS "store_sales" JOIN "household_demographics_2" AS "household_demographics" ON "store_sales"."ss_hdemo_sk" = "household_demographics"."hd_demo_sk" + JOIN "store_2" AS "store" + ON "store_sales"."ss_store_sk" = "store"."s_store_sk" JOIN "time_dim" AS "time_dim" ON "store_sales"."ss_sold_time_sk" = "time_dim"."t_time_sk" AND "time_dim"."t_hour" = 8 AND "time_dim"."t_minute" >= 30 - JOIN "store_2" AS "store" - ON "store_sales"."ss_store_sk" = "store"."s_store_sk" ), "s2" AS ( SELECT COUNT(*) AS "h9_to_9_30" FROM "store_sales_2" AS "store_sales" JOIN "household_demographics_2" AS "household_demographics" ON "store_sales"."ss_hdemo_sk" = "household_demographics"."hd_demo_sk" + JOIN "store_2" AS "store" + ON "store_sales"."ss_store_sk" = "store"."s_store_sk" JOIN "time_dim" AS "time_dim" ON "store_sales"."ss_sold_time_sk" = "time_dim"."t_time_sk" AND "time_dim"."t_hour" = 9 AND "time_dim"."t_minute" < 30 - JOIN "store_2" AS "store" - ON "store_sales"."ss_store_sk" = "store"."s_store_sk" ), "s3" AS ( SELECT COUNT(*) AS "h9_30_to_10" FROM "store_sales_2" AS "store_sales" JOIN "household_demographics_2" AS "household_demographics" ON "store_sales"."ss_hdemo_sk" = "household_demographics"."hd_demo_sk" + JOIN "store_2" AS "store" + ON "store_sales"."ss_store_sk" = "store"."s_store_sk" JOIN "time_dim" AS "time_dim" ON "store_sales"."ss_sold_time_sk" = "time_dim"."t_time_sk" AND "time_dim"."t_hour" = 9 AND "time_dim"."t_minute" >= 30 - JOIN "store_2" AS "store" - ON "store_sales"."ss_store_sk" = "store"."s_store_sk" ), "s4" AS ( SELECT COUNT(*) AS "h10_to_10_30" FROM "store_sales_2" AS "store_sales" JOIN "household_demographics_2" AS "household_demographics" ON "store_sales"."ss_hdemo_sk" = "household_demographics"."hd_demo_sk" + JOIN "store_2" AS "store" + ON "store_sales"."ss_store_sk" = "store"."s_store_sk" JOIN "time_dim" AS "time_dim" ON "store_sales"."ss_sold_time_sk" = "time_dim"."t_time_sk" AND "time_dim"."t_hour" = 10 AND "time_dim"."t_minute" < 30 - JOIN "store_2" AS "store" - ON "store_sales"."ss_store_sk" = "store"."s_store_sk" ), "s5" AS ( SELECT COUNT(*) AS "h10_30_to_11" FROM "store_sales_2" AS "store_sales" JOIN "household_demographics_2" AS "household_demographics" ON "store_sales"."ss_hdemo_sk" = "household_demographics"."hd_demo_sk" + JOIN "store_2" AS "store" + ON "store_sales"."ss_store_sk" = "store"."s_store_sk" JOIN "time_dim" AS "time_dim" ON "store_sales"."ss_sold_time_sk" = "time_dim"."t_time_sk" AND "time_dim"."t_hour" = 10 AND "time_dim"."t_minute" >= 30 - JOIN "store_2" AS "store" - ON "store_sales"."ss_store_sk" = "store"."s_store_sk" ), "s6" AS ( SELECT COUNT(*) AS "h11_to_11_30" FROM "store_sales_2" AS "store_sales" JOIN "household_demographics_2" AS "household_demographics" ON "store_sales"."ss_hdemo_sk" = "household_demographics"."hd_demo_sk" + JOIN "store_2" AS "store" + ON "store_sales"."ss_store_sk" = "store"."s_store_sk" JOIN "time_dim" AS "time_dim" ON "store_sales"."ss_sold_time_sk" = "time_dim"."t_time_sk" AND "time_dim"."t_hour" = 11 AND "time_dim"."t_minute" < 30 - JOIN "store_2" AS "store" - ON "store_sales"."ss_store_sk" = "store"."s_store_sk" ), "s7" AS ( SELECT COUNT(*) AS "h11_30_to_12" FROM "store_sales_2" AS "store_sales" JOIN "household_demographics_2" AS "household_demographics" ON "store_sales"."ss_hdemo_sk" = "household_demographics"."hd_demo_sk" + JOIN "store_2" AS "store" + ON "store_sales"."ss_store_sk" = "store"."s_store_sk" JOIN "time_dim" AS "time_dim" ON "store_sales"."ss_sold_time_sk" = "time_dim"."t_time_sk" AND "time_dim"."t_hour" = 11 AND "time_dim"."t_minute" >= 30 - JOIN "store_2" AS "store" - ON "store_sales"."ss_store_sk" = "store"."s_store_sk" ), "s8" AS ( SELECT COUNT(*) AS "h12_to_12_30" FROM "store_sales_2" AS "store_sales" JOIN "household_demographics_2" AS "household_demographics" ON "store_sales"."ss_hdemo_sk" = "household_demographics"."hd_demo_sk" + JOIN "store_2" AS "store" + ON "store_sales"."ss_store_sk" = "store"."s_store_sk" JOIN "time_dim" AS "time_dim" ON "store_sales"."ss_sold_time_sk" = "time_dim"."t_time_sk" AND "time_dim"."t_hour" = 12 AND "time_dim"."t_minute" < 30 - JOIN "store_2" AS "store" - ON "store_sales"."ss_store_sk" = "store"."s_store_sk" ) SELECT "s1"."h8_30_to_9" AS "h8_30_to_9", @@ -12016,16 +12325,19 @@ SELECT "call_center"."cc_manager" AS "manager", SUM("catalog_returns"."cr_net_loss") AS "returns_loss" FROM "call_center" AS "call_center" -JOIN "catalog_returns" AS "catalog_returns" - ON "catalog_returns"."cr_call_center_sk" = "call_center"."cc_call_center_sk" -JOIN "date_dim" AS "date_dim" - ON "catalog_returns"."cr_returned_date_sk" = "date_dim"."d_date_sk" - AND "date_dim"."d_moy" = 12 - AND "date_dim"."d_year" = 1999 JOIN "household_demographics" AS "household_demographics" ON "household_demographics"."hd_buy_potential" LIKE 'Unknown%' +JOIN "customer" AS "customer" + ON "household_demographics"."hd_demo_sk" = "customer"."c_current_hdemo_sk" +JOIN "catalog_returns" AS "catalog_returns" + ON "catalog_returns"."cr_call_center_sk" = "call_center"."cc_call_center_sk" + AND "catalog_returns"."cr_returning_customer_sk" = "customer"."c_customer_sk" +JOIN "customer_address" AS "customer_address" + ON "customer_address"."ca_address_sk" = "customer"."c_current_addr_sk" + AND "customer_address"."ca_gmt_offset" = -7 JOIN "customer_demographics" AS "customer_demographics" - ON ( + ON "customer_demographics"."cd_demo_sk" = "customer"."c_current_cdemo_sk" + AND ( "customer_demographics"."cd_education_status" = 'Advanced Degree' OR "customer_demographics"."cd_education_status" = 'Unknown' ) @@ -12041,13 +12353,10 @@ JOIN "customer_demographics" AS "customer_demographics" "customer_demographics"."cd_marital_status" = 'M' OR "customer_demographics"."cd_marital_status" = 'W' ) -JOIN "customer_address" AS "customer_address" - ON "customer_address"."ca_gmt_offset" = -7 -JOIN "customer" AS "customer" - ON "catalog_returns"."cr_returning_customer_sk" = "customer"."c_customer_sk" - AND "customer_address"."ca_address_sk" = "customer"."c_current_addr_sk" - AND "customer_demographics"."cd_demo_sk" = "customer"."c_current_cdemo_sk" - AND "household_demographics"."hd_demo_sk" = "customer"."c_current_hdemo_sk" +JOIN "date_dim" AS "date_dim" + ON "catalog_returns"."cr_returned_date_sk" = "date_dim"."d_date_sk" + AND "date_dim"."d_moy" = 12 + AND "date_dim"."d_year" = 1999 GROUP BY "call_center"."cc_call_center_id", "call_center"."cc_name", @@ -12108,10 +12417,10 @@ WITH "web_sales_2" AS ( SELECT SUM("web_sales"."ws_ext_discount_amt") AS "Excess Discount Amount" FROM "web_sales_2" AS "web_sales" -JOIN "item" AS "item" - ON "item"."i_item_sk" = "web_sales"."ws_item_sk" AND "item"."i_manufact_id" = 718 JOIN "date_dim_2" AS "date_dim" ON "date_dim"."d_date_sk" = "web_sales"."ws_sold_date_sk" +JOIN "item" AS "item" + ON "item"."i_item_sk" = "web_sales"."ws_item_sk" AND "item"."i_manufact_id" = 718 LEFT JOIN "_u_0" AS "_u_0" ON "_u_0"."_u_1" = "item"."i_item_sk" WHERE @@ -12158,11 +12467,11 @@ SELECT END ) AS "sumsales" FROM "store_sales" AS "store_sales" +JOIN "reason" AS "reason" + ON "reason"."r_reason_desc" = 'reason 38' LEFT JOIN "store_returns" AS "store_returns" ON "store_returns"."sr_item_sk" = "store_sales"."ss_item_sk" AND "store_returns"."sr_ticket_number" = "store_sales"."ss_ticket_number" -JOIN "reason" AS "reason" - ON "reason"."r_reason_desc" = 'reason 38' WHERE "store_returns"."sr_reason_sk" = "reason"."r_reason_sk" GROUP BY @@ -12218,32 +12527,32 @@ WITH "_u_0" AS ( "wr1"."wr_order_number" ) SELECT - COUNT(DISTINCT "web_sales"."ws_order_number") AS "order count", - SUM("web_sales"."ws_ext_ship_cost") AS "total shipping cost", - SUM("web_sales"."ws_net_profit") AS "total net profit" -FROM "web_sales" AS "web_sales" + COUNT(DISTINCT "ws1"."ws_order_number") AS "order count", + SUM("ws1"."ws_ext_ship_cost") AS "total shipping cost", + SUM("ws1"."ws_net_profit") AS "total net profit" +FROM "web_sales" AS "ws1" +LEFT JOIN "_u_0" AS "_u_0" + ON "ws1"."ws_order_number" = "_u_0"."_u_1" +LEFT JOIN "_u_3" AS "_u_3" + ON "ws1"."ws_order_number" = "_u_3"."_u_4" +JOIN "customer_address" AS "customer_address" + ON "customer_address"."ca_state" = 'MT' + AND "ws1"."ws_ship_addr_sk" = "customer_address"."ca_address_sk" JOIN "date_dim" AS "date_dim" ON "date_dim"."d_date" >= '2000-3-01' - AND "web_sales"."ws_ship_date_sk" = "date_dim"."d_date_sk" + AND "ws1"."ws_ship_date_sk" = "date_dim"."d_date_sk" AND CAST("date_dim"."d_date" AS DATE) <= ( CAST('2000-3-01' AS DATE) + INTERVAL '60' day ) -JOIN "customer_address" AS "customer_address" - ON "customer_address"."ca_state" = 'MT' - AND "web_sales"."ws_ship_addr_sk" = "customer_address"."ca_address_sk" JOIN "web_site" AS "web_site" - ON "web_sales"."ws_web_site_sk" = "web_site"."web_site_sk" - AND "web_site"."web_company_name" = 'pri' -LEFT JOIN "_u_0" AS "_u_0" - ON "web_sales"."ws_order_number" = "_u_0"."_u_1" -LEFT JOIN "_u_3" AS "_u_3" - ON "web_sales"."ws_order_number" = "_u_3"."_u_4" + ON "web_site"."web_company_name" = 'pri' + AND "ws1"."ws_web_site_sk" = "web_site"."web_site_sk" WHERE "_u_3"."_u_4" IS NULL - AND ARRAY_ANY("_u_0"."_u_2", "_x" -> "web_sales"."ws_warehouse_sk" <> "_x") + AND ARRAY_ANY("_u_0"."_u_2", "_x" -> "ws1"."ws_warehouse_sk" <> "_x") AND NOT "_u_0"."_u_1" IS NULL ORDER BY - COUNT(DISTINCT "web_sales"."ws_order_number") + COUNT(DISTINCT "ws1"."ws_order_number") LIMIT 100; -------------------------------------- @@ -12285,16 +12594,11 @@ AND ws1.ws_order_number IN WHERE wr_order_number = ws_wh.ws_order_number) ORDER BY count(DISTINCT ws_order_number) LIMIT 100; -WITH "ws1" AS ( - SELECT - "web_sales"."ws_warehouse_sk" AS "ws_warehouse_sk", - "web_sales"."ws_order_number" AS "ws_order_number" - FROM "web_sales" AS "web_sales" -), "ws_wh" AS ( +WITH "ws_wh" AS ( SELECT "ws1"."ws_order_number" AS "ws_order_number" - FROM "ws1" AS "ws1" - JOIN "ws1" AS "ws2" + FROM "web_sales" AS "ws1" + JOIN "web_sales" AS "ws2" ON "ws1"."ws_order_number" = "ws2"."ws_order_number" AND "ws1"."ws_warehouse_sk" <> "ws2"."ws_warehouse_sk" ), "_u_0" AS ( @@ -12313,30 +12617,30 @@ WITH "ws1" AS ( "web_returns"."wr_order_number" ) SELECT - COUNT(DISTINCT "web_sales"."ws_order_number") AS "order count", - SUM("web_sales"."ws_ext_ship_cost") AS "total shipping cost", - SUM("web_sales"."ws_net_profit") AS "total net profit" -FROM "web_sales" AS "web_sales" + COUNT(DISTINCT "ws1"."ws_order_number") AS "order count", + SUM("ws1"."ws_ext_ship_cost") AS "total shipping cost", + SUM("ws1"."ws_net_profit") AS "total net profit" +FROM "web_sales" AS "ws1" +LEFT JOIN "_u_0" AS "_u_0" + ON "ws1"."ws_order_number" = "_u_0"."ws_order_number" +LEFT JOIN "_u_1" AS "_u_1" + ON "ws1"."ws_order_number" = "_u_1"."wr_order_number" +JOIN "customer_address" AS "customer_address" + ON "customer_address"."ca_state" = 'IN' + AND "ws1"."ws_ship_addr_sk" = "customer_address"."ca_address_sk" JOIN "date_dim" AS "date_dim" ON "date_dim"."d_date" >= '2000-4-01' - AND "web_sales"."ws_ship_date_sk" = "date_dim"."d_date_sk" + AND "ws1"."ws_ship_date_sk" = "date_dim"."d_date_sk" AND CAST("date_dim"."d_date" AS DATE) <= ( CAST('2000-4-01' AS DATE) + INTERVAL '60' day ) -JOIN "customer_address" AS "customer_address" - ON "customer_address"."ca_state" = 'IN' - AND "web_sales"."ws_ship_addr_sk" = "customer_address"."ca_address_sk" JOIN "web_site" AS "web_site" - ON "web_sales"."ws_web_site_sk" = "web_site"."web_site_sk" - AND "web_site"."web_company_name" = 'pri' -LEFT JOIN "_u_0" AS "_u_0" - ON "web_sales"."ws_order_number" = "_u_0"."ws_order_number" -LEFT JOIN "_u_1" AS "_u_1" - ON "web_sales"."ws_order_number" = "_u_1"."wr_order_number" + ON "web_site"."web_company_name" = 'pri' + AND "ws1"."ws_web_site_sk" = "web_site"."web_site_sk" WHERE NOT "_u_0"."ws_order_number" IS NULL AND NOT "_u_1"."wr_order_number" IS NULL ORDER BY - COUNT(DISTINCT "web_sales"."ws_order_number") + COUNT(DISTINCT "ws1"."ws_order_number") LIMIT 100; -------------------------------------- @@ -12362,12 +12666,12 @@ FROM "store_sales" AS "store_sales" JOIN "household_demographics" AS "household_demographics" ON "household_demographics"."hd_dep_count" = 7 AND "store_sales"."ss_hdemo_sk" = "household_demographics"."hd_demo_sk" +JOIN "store" AS "store" + ON "store"."s_store_name" = 'ese' AND "store_sales"."ss_store_sk" = "store"."s_store_sk" JOIN "time_dim" AS "time_dim" ON "store_sales"."ss_sold_time_sk" = "time_dim"."t_time_sk" AND "time_dim"."t_hour" = 15 AND "time_dim"."t_minute" >= 30 -JOIN "store" AS "store" - ON "store"."s_store_name" = 'ese' AND "store_sales"."ss_store_sk" = "store"."s_store_sk" ORDER BY COUNT(*) LIMIT 100; @@ -12509,13 +12813,13 @@ SELECT SUM("store_sales"."ss_ext_sales_price") AS "itemrevenue", SUM("store_sales"."ss_ext_sales_price") * 100 / SUM(SUM("store_sales"."ss_ext_sales_price")) OVER (PARTITION BY "item"."i_class") AS "revenueratio" FROM "store_sales" AS "store_sales" -JOIN "item" AS "item" - ON "item"."i_category" IN ('Men', 'Home', 'Electronics') - AND "store_sales"."ss_item_sk" = "item"."i_item_sk" JOIN "date_dim" AS "date_dim" ON "store_sales"."ss_sold_date_sk" = "date_dim"."d_date_sk" AND CAST("date_dim"."d_date" AS DATE) <= CAST('2000-06-17' AS DATE) AND CAST("date_dim"."d_date" AS DATE) >= CAST('2000-05-18' AS DATE) +JOIN "item" AS "item" + ON "item"."i_category" IN ('Men', 'Home', 'Electronics') + AND "store_sales"."ss_item_sk" = "item"."i_item_sk" GROUP BY "item"."i_item_id", "item"."i_item_desc", @@ -12621,16 +12925,16 @@ SELECT END ) AS ">120 days" FROM "catalog_sales" AS "catalog_sales" -JOIN "warehouse" AS "warehouse" - ON "catalog_sales"."cs_warehouse_sk" = "warehouse"."w_warehouse_sk" -JOIN "ship_mode" AS "ship_mode" - ON "catalog_sales"."cs_ship_mode_sk" = "ship_mode"."sm_ship_mode_sk" JOIN "call_center" AS "call_center" ON "catalog_sales"."cs_call_center_sk" = "call_center"."cc_call_center_sk" JOIN "date_dim" AS "date_dim" ON "catalog_sales"."cs_ship_date_sk" = "date_dim"."d_date_sk" AND "date_dim"."d_month_seq" <= 1211 AND "date_dim"."d_month_seq" >= 1200 +JOIN "ship_mode" AS "ship_mode" + ON "catalog_sales"."cs_ship_mode_sk" = "ship_mode"."sm_ship_mode_sk" +JOIN "warehouse" AS "warehouse" + ON "catalog_sales"."cs_warehouse_sk" = "warehouse"."w_warehouse_sk" GROUP BY SUBSTR("warehouse"."w_warehouse_name", 1, 20), "ship_mode"."sm_type", diff --git a/tests/fixtures/optimizer/tpc-h/tpc-h.sql b/tests/fixtures/optimizer/tpc-h/tpc-h.sql index a25e247..942295e 100644 --- a/tests/fixtures/optimizer/tpc-h/tpc-h.sql +++ b/tests/fixtures/optimizer/tpc-h/tpc-h.sql @@ -117,12 +117,12 @@ WITH "region_2" AS ( MIN("partsupp"."ps_supplycost") AS "_col_0", "partsupp"."ps_partkey" AS "_u_1" FROM "partsupp_2" AS "partsupp" - CROSS JOIN "region_2" AS "region" - JOIN "nation" AS "nation" - ON "nation"."n_regionkey" = "region"."r_regionkey" JOIN "supplier" AS "supplier" + ON "supplier"."s_suppkey" = "partsupp"."ps_suppkey" + JOIN "nation" AS "nation" ON "supplier"."s_nationkey" = "nation"."n_nationkey" - AND "supplier"."s_suppkey" = "partsupp"."ps_suppkey" + JOIN "region_2" AS "region" + ON "nation"."n_regionkey" = "region"."r_regionkey" GROUP BY "partsupp"."ps_partkey" ) @@ -137,6 +137,8 @@ SELECT "supplier"."s_comment" AS "s_comment" FROM "part" AS "part" CROSS JOIN "region_2" AS "region" +LEFT JOIN "_u_0" AS "_u_0" + ON "part"."p_partkey" = "_u_0"."_u_1" JOIN "nation" AS "nation" ON "nation"."n_regionkey" = "region"."r_regionkey" JOIN "partsupp_2" AS "partsupp" @@ -144,8 +146,6 @@ JOIN "partsupp_2" AS "partsupp" JOIN "supplier" AS "supplier" ON "supplier"."s_nationkey" = "nation"."n_nationkey" AND "supplier"."s_suppkey" = "partsupp"."ps_suppkey" -LEFT JOIN "_u_0" AS "_u_0" - ON "part"."p_partkey" = "_u_0"."_u_1" WHERE "part"."p_size" = 15 AND "part"."p_type" LIKE '%BRASS' @@ -294,16 +294,15 @@ JOIN "orders" AS "orders" ON "customer"."c_custkey" = "orders"."o_custkey" AND CAST("orders"."o_orderdate" AS DATE) < CAST('1995-01-01' AS DATE) AND CAST("orders"."o_orderdate" AS DATE) >= CAST('1994-01-01' AS DATE) -JOIN "region" AS "region" - ON "region"."r_name" = 'ASIA' -JOIN "nation" AS "nation" - ON "nation"."n_regionkey" = "region"."r_regionkey" JOIN "supplier" AS "supplier" ON "customer"."c_nationkey" = "supplier"."s_nationkey" - AND "supplier"."s_nationkey" = "nation"."n_nationkey" JOIN "lineitem" AS "lineitem" ON "lineitem"."l_orderkey" = "orders"."o_orderkey" AND "lineitem"."l_suppkey" = "supplier"."s_suppkey" +JOIN "nation" AS "nation" + ON "supplier"."s_nationkey" = "nation"."n_nationkey" +JOIN "region" AS "region" + ON "nation"."n_regionkey" = "region"."r_regionkey" AND "region"."r_name" = 'ASIA' GROUP BY "nation"."n_name" ORDER BY @@ -373,14 +372,6 @@ order by supp_nation, cust_nation, l_year; -WITH "n1" AS ( - SELECT - "nation"."n_nationkey" AS "n_nationkey", - "nation"."n_name" AS "n_name" - FROM "nation" AS "nation" - WHERE - "nation"."n_name" = 'FRANCE' OR "nation"."n_name" = 'GERMANY' -) SELECT "n1"."n_name" AS "supp_nation", "n2"."n_name" AS "cust_nation", @@ -393,20 +384,26 @@ JOIN "lineitem" AS "lineitem" ON "supplier"."s_suppkey" = "lineitem"."l_suppkey" AND CAST("lineitem"."l_shipdate" AS DATE) <= CAST('1996-12-31' AS DATE) AND CAST("lineitem"."l_shipdate" AS DATE) >= CAST('1995-01-01' AS DATE) -JOIN "orders" AS "orders" - ON "orders"."o_orderkey" = "lineitem"."l_orderkey" -JOIN "customer" AS "customer" - ON "customer"."c_custkey" = "orders"."o_custkey" -JOIN "n1" AS "n1" - ON "supplier"."s_nationkey" = "n1"."n_nationkey" -JOIN "n1" AS "n2" - ON "customer"."c_nationkey" = "n2"."n_nationkey" - AND ( +JOIN "nation" AS "n1" + ON ( + "n1"."n_name" = 'FRANCE' OR "n1"."n_name" = 'GERMANY' + ) + AND "supplier"."s_nationkey" = "n1"."n_nationkey" +JOIN "nation" AS "n2" + ON ( "n1"."n_name" = 'FRANCE' OR "n2"."n_name" = 'FRANCE' ) AND ( "n1"."n_name" = 'GERMANY' OR "n2"."n_name" = 'GERMANY' ) + AND ( + "n2"."n_name" = 'FRANCE' OR "n2"."n_name" = 'GERMANY' + ) +JOIN "customer" AS "customer" + ON "customer"."c_nationkey" = "n2"."n_nationkey" +JOIN "orders" AS "orders" + ON "customer"."c_custkey" = "orders"."o_custkey" + AND "orders"."o_orderkey" = "lineitem"."l_orderkey" GROUP BY "n1"."n_name", "n2"."n_name", @@ -460,7 +457,7 @@ SELECT EXTRACT(year FROM CAST("orders"."o_orderdate" AS DATE)) AS "o_year", SUM( CASE - WHEN "nation_2"."n_name" = 'BRAZIL' + WHEN "n2"."n_name" = 'BRAZIL' THEN "lineitem"."l_extendedprice" * ( 1 - "lineitem"."l_discount" ) @@ -472,21 +469,21 @@ SELECT FROM "part" AS "part" JOIN "region" AS "region" ON "region"."r_name" = 'AMERICA' -JOIN "nation" AS "nation" - ON "nation"."n_regionkey" = "region"."r_regionkey" +JOIN "lineitem" AS "lineitem" + ON "part"."p_partkey" = "lineitem"."l_partkey" +JOIN "nation" AS "n1" + ON "n1"."n_regionkey" = "region"."r_regionkey" JOIN "customer" AS "customer" - ON "customer"."c_nationkey" = "nation"."n_nationkey" + ON "customer"."c_nationkey" = "n1"."n_nationkey" +JOIN "supplier" AS "supplier" + ON "supplier"."s_suppkey" = "lineitem"."l_suppkey" +JOIN "nation" AS "n2" + ON "supplier"."s_nationkey" = "n2"."n_nationkey" JOIN "orders" AS "orders" - ON "orders"."o_custkey" = "customer"."c_custkey" + ON "lineitem"."l_orderkey" = "orders"."o_orderkey" + AND "orders"."o_custkey" = "customer"."c_custkey" AND CAST("orders"."o_orderdate" AS DATE) <= CAST('1996-12-31' AS DATE) AND CAST("orders"."o_orderdate" AS DATE) >= CAST('1995-01-01' AS DATE) -JOIN "lineitem" AS "lineitem" - ON "lineitem"."l_orderkey" = "orders"."o_orderkey" - AND "part"."p_partkey" = "lineitem"."l_partkey" -JOIN "supplier" AS "supplier" - ON "supplier"."s_suppkey" = "lineitem"."l_suppkey" -JOIN "nation" AS "nation_2" - ON "supplier"."s_nationkey" = "nation_2"."n_nationkey" WHERE "part"."p_type" = 'ECONOMY ANODIZED STEEL' GROUP BY @@ -540,13 +537,13 @@ SELECT FROM "part" AS "part" JOIN "lineitem" AS "lineitem" ON "part"."p_partkey" = "lineitem"."l_partkey" -JOIN "supplier" AS "supplier" - ON "supplier"."s_suppkey" = "lineitem"."l_suppkey" +JOIN "orders" AS "orders" + ON "orders"."o_orderkey" = "lineitem"."l_orderkey" JOIN "partsupp" AS "partsupp" ON "partsupp"."ps_partkey" = "lineitem"."l_partkey" AND "partsupp"."ps_suppkey" = "lineitem"."l_suppkey" -JOIN "orders" AS "orders" - ON "orders"."o_orderkey" = "lineitem"."l_orderkey" +JOIN "supplier" AS "supplier" + ON "supplier"."s_suppkey" = "lineitem"."l_suppkey" JOIN "nation" AS "nation" ON "supplier"."s_nationkey" = "nation"."n_nationkey" WHERE @@ -606,14 +603,14 @@ SELECT "customer"."c_phone" AS "c_phone", "customer"."c_comment" AS "c_comment" FROM "customer" AS "customer" +JOIN "nation" AS "nation" + ON "customer"."c_nationkey" = "nation"."n_nationkey" JOIN "orders" AS "orders" ON "customer"."c_custkey" = "orders"."o_custkey" AND CAST("orders"."o_orderdate" AS DATE) < CAST('1994-01-01' AS DATE) AND CAST("orders"."o_orderdate" AS DATE) >= CAST('1993-10-01' AS DATE) JOIN "lineitem" AS "lineitem" ON "lineitem"."l_orderkey" = "orders"."o_orderkey" AND "lineitem"."l_returnflag" = 'R' -JOIN "nation" AS "nation" - ON "customer"."c_nationkey" = "nation"."n_nationkey" GROUP BY "customer"."c_custkey", "customer"."c_name", @@ -681,11 +678,11 @@ SELECT "partsupp"."ps_partkey" AS "ps_partkey", SUM("partsupp"."ps_supplycost" * "partsupp"."ps_availqty") AS "value" FROM "partsupp" AS "partsupp" +CROSS JOIN "_u_0" AS "_u_0" JOIN "supplier_2" AS "supplier" ON "partsupp"."ps_suppkey" = "supplier"."s_suppkey" JOIN "nation_2" AS "nation" ON "supplier"."s_nationkey" = "nation"."n_nationkey" -CROSS JOIN "_u_0" AS "_u_0" GROUP BY "partsupp"."ps_partkey" HAVING @@ -950,13 +947,13 @@ SELECT "part"."p_size" AS "p_size", COUNT(DISTINCT "partsupp"."ps_suppkey") AS "supplier_cnt" FROM "partsupp" AS "partsupp" +LEFT JOIN "_u_0" AS "_u_0" + ON "partsupp"."ps_suppkey" = "_u_0"."s_suppkey" JOIN "part" AS "part" ON "part"."p_brand" <> 'Brand#45' AND "part"."p_partkey" = "partsupp"."ps_partkey" AND "part"."p_size" IN (49, 14, 23, 45, 19, 3, 36, 9) AND NOT "part"."p_type" LIKE 'MEDIUM POLISHED%' -LEFT JOIN "_u_0" AS "_u_0" - ON "partsupp"."ps_suppkey" = "_u_0"."s_suppkey" WHERE "_u_0"."s_suppkey" IS NULL GROUP BY @@ -1066,10 +1063,10 @@ SELECT FROM "customer" AS "customer" JOIN "orders" AS "orders" ON "customer"."c_custkey" = "orders"."o_custkey" -JOIN "lineitem" AS "lineitem" - ON "orders"."o_orderkey" = "lineitem"."l_orderkey" LEFT JOIN "_u_0" AS "_u_0" ON "orders"."o_orderkey" = "_u_0"."l_orderkey" +JOIN "lineitem" AS "lineitem" + ON "orders"."o_orderkey" = "lineitem"."l_orderkey" WHERE NOT "_u_0"."l_orderkey" IS NULL GROUP BY @@ -1260,10 +1257,10 @@ SELECT "supplier"."s_name" AS "s_name", "supplier"."s_address" AS "s_address" FROM "supplier" AS "supplier" -JOIN "nation" AS "nation" - ON "nation"."n_name" = 'CANADA' AND "supplier"."s_nationkey" = "nation"."n_nationkey" LEFT JOIN "_u_4" AS "_u_4" ON "supplier"."s_suppkey" = "_u_4"."ps_suppkey" +JOIN "nation" AS "nation" + ON "nation"."n_name" = 'CANADA' AND "supplier"."s_nationkey" = "nation"."n_nationkey" WHERE NOT "_u_4"."ps_suppkey" IS NULL ORDER BY @@ -1334,24 +1331,24 @@ SELECT "supplier"."s_name" AS "s_name", COUNT(*) AS "numwait" FROM "supplier" AS "supplier" -JOIN "lineitem" AS "lineitem" - ON "lineitem"."l_receiptdate" > "lineitem"."l_commitdate" - AND "supplier"."s_suppkey" = "lineitem"."l_suppkey" -JOIN "orders" AS "orders" - ON "orders"."o_orderkey" = "lineitem"."l_orderkey" AND "orders"."o_orderstatus" = 'F' +JOIN "lineitem" AS "l1" + ON "l1"."l_receiptdate" > "l1"."l_commitdate" + AND "supplier"."s_suppkey" = "l1"."l_suppkey" JOIN "nation" AS "nation" ON "nation"."n_name" = 'SAUDI ARABIA' AND "supplier"."s_nationkey" = "nation"."n_nationkey" LEFT JOIN "_u_0" AS "_u_0" - ON "_u_0"."l_orderkey" = "lineitem"."l_orderkey" + ON "_u_0"."l_orderkey" = "l1"."l_orderkey" LEFT JOIN "_u_2" AS "_u_2" - ON "_u_2"."l_orderkey" = "lineitem"."l_orderkey" + ON "_u_2"."l_orderkey" = "l1"."l_orderkey" +JOIN "orders" AS "orders" + ON "orders"."o_orderkey" = "l1"."l_orderkey" AND "orders"."o_orderstatus" = 'F' WHERE ( "_u_2"."l_orderkey" IS NULL - OR NOT ARRAY_ANY("_u_2"."_u_3", "_x" -> "_x" <> "lineitem"."l_suppkey") + OR NOT ARRAY_ANY("_u_2"."_u_3", "_x" -> "_x" <> "l1"."l_suppkey") ) - AND ARRAY_ANY("_u_0"."_u_1", "_x" -> "_x" <> "lineitem"."l_suppkey") + AND ARRAY_ANY("_u_0"."_u_1", "_x" -> "_x" <> "l1"."l_suppkey") AND NOT "_u_0"."l_orderkey" IS NULL GROUP BY "supplier"."s_name" @@ -1430,3 +1427,4 @@ GROUP BY SUBSTRING("customer"."c_phone", 1, 2) ORDER BY "cntrycode"; + diff --git a/tests/test_build.py b/tests/test_build.py index 1e28689..f354640 100644 --- a/tests/test_build.py +++ b/tests/test_build.py @@ -127,6 +127,16 @@ class TestBuild(unittest.TestCase): "SELECT x FROM tbl WHERE x > 0 FOR SHARE", "postgres", ), + ( + lambda: select("x").from_("tbl").hint("repartition(100)"), + "SELECT /*+ REPARTITION(100) */ x FROM tbl", + "spark", + ), + ( + lambda: select("x").from_("tbl").hint("coalesce(3)", "broadcast(x)"), + "SELECT /*+ COALESCE(3), BROADCAST(x) */ x FROM tbl", + "spark", + ), ( lambda: select("x", "y").from_("tbl").group_by("x"), "SELECT x, y FROM tbl GROUP BY x", diff --git a/tests/test_executor.py b/tests/test_executor.py index a121dea..bb01dee 100644 --- a/tests/test_executor.py +++ b/tests/test_executor.py @@ -1,6 +1,7 @@ import datetime import unittest from datetime import date +from multiprocessing import Pool import duckdb import pandas as pd @@ -76,13 +77,21 @@ class TestExecutor(unittest.TestCase): ) return expression - for i, (sql, _) in enumerate(self.sqls): - with self.subTest(f"tpch-h {i + 1}"): - a = self.cached_execute(sql) - sql = parse_one(sql).transform(to_csv).sql(pretty=True) - table = execute(sql, TPCH_SCHEMA) - b = pd.DataFrame(table.rows, columns=table.columns) - assert_frame_equal(a, b, check_dtype=False, check_index_type=False) + with Pool() as pool: + for i, table in enumerate( + pool.starmap( + execute, + ( + (parse_one(sql).transform(to_csv).sql(pretty=True), TPCH_SCHEMA) + for sql, _ in self.sqls + ), + ) + ): + with self.subTest(f"tpch-h {i + 1}"): + sql, _ = self.sqls[i] + a = self.cached_execute(sql) + b = pd.DataFrame(table.rows, columns=table.columns) + assert_frame_equal(a, b, check_dtype=False, check_index_type=False) def test_execute_callable(self): tables = { @@ -496,6 +505,7 @@ class TestExecutor(unittest.TestCase): ("SELECT 1", ["1"], [(1,)]), ("SELECT 1 + 2 AS x", ["x"], [(3,)]), ("SELECT CONCAT('a', 'b') AS x", ["x"], [("ab",)]), + ("SELECT CONCAT('a', 1) AS x", ["x"], [("a1",)]), ("SELECT 1 AS x, 2 AS y", ["x", "y"], [(1, 2)]), ("SELECT 'foo' LIMIT 1", ["foo"], [("foo",)]), ( diff --git a/tests/test_expressions.py b/tests/test_expressions.py index 7735e78..c9b5279 100644 --- a/tests/test_expressions.py +++ b/tests/test_expressions.py @@ -534,6 +534,7 @@ class TestExpressions(unittest.TestCase): self.assertIsInstance(parse_one("HLL(a)"), exp.Hll) self.assertIsInstance(parse_one("ARRAY(time, foo)"), exp.Array) self.assertIsInstance(parse_one("STANDARD_HASH('hello', 'sha256')"), exp.StandardHash) + self.assertIsInstance(parse_one("DATE(foo)"), exp.Date) def test_column(self): column = parse_one("a.b.c.d") @@ -590,7 +591,7 @@ class TestExpressions(unittest.TestCase): unit = parse_one("timestamp_trunc(current_timestamp, week(thursday))") self.assertIsNotNone(unit.find(exp.CurrentTimestamp)) week = unit.find(exp.Week) - self.assertEqual(week.this, exp.Var(this="thursday")) + self.assertEqual(week.this, exp.var("thursday")) def test_identifier(self): self.assertTrue(exp.to_identifier('"x"').quoted) @@ -601,7 +602,7 @@ class TestExpressions(unittest.TestCase): def test_function_normalizer(self): self.assertEqual(parse_one("HELLO()").sql(normalize_functions="lower"), "hello()") self.assertEqual(parse_one("hello()").sql(normalize_functions="upper"), "HELLO()") - self.assertEqual(parse_one("heLLO()").sql(normalize_functions=None), "heLLO()") + self.assertEqual(parse_one("heLLO()").sql(normalize_functions=False), "heLLO()") self.assertEqual(parse_one("SUM(x)").sql(normalize_functions="lower"), "sum(x)") self.assertEqual(parse_one("sum(x)").sql(normalize_functions="upper"), "SUM(x)") @@ -786,7 +787,7 @@ FROM foo""", self.assertEqual(exp.DataType.build("DECIMAL").sql(), "DECIMAL") self.assertEqual(exp.DataType.build("BOOLEAN").sql(), "BOOLEAN") self.assertEqual(exp.DataType.build("JSON").sql(), "JSON") - self.assertEqual(exp.DataType.build("JSONB").sql(), "JSONB") + self.assertEqual(exp.DataType.build("JSONB", dialect="postgres").sql(), "JSONB") self.assertEqual(exp.DataType.build("INTERVAL").sql(), "INTERVAL") self.assertEqual(exp.DataType.build("TIME").sql(), "TIME") self.assertEqual(exp.DataType.build("TIMESTAMP").sql(), "TIMESTAMP") @@ -801,22 +802,17 @@ FROM foo""", self.assertEqual(exp.DataType.build("GEOMETRY").sql(), "GEOMETRY") self.assertEqual(exp.DataType.build("STRUCT").sql(), "STRUCT") self.assertEqual(exp.DataType.build("NULLABLE").sql(), "NULLABLE") - self.assertEqual(exp.DataType.build("HLLSKETCH").sql(), "HLLSKETCH") - self.assertEqual(exp.DataType.build("HSTORE").sql(), "HSTORE") - self.assertEqual(exp.DataType.build("SUPER").sql(), "SUPER") - self.assertEqual(exp.DataType.build("SERIAL").sql(), "SERIAL") - self.assertEqual(exp.DataType.build("SMALLSERIAL").sql(), "SMALLSERIAL") - self.assertEqual(exp.DataType.build("BIGSERIAL").sql(), "BIGSERIAL") - self.assertEqual(exp.DataType.build("XML").sql(), "XML") - self.assertEqual(exp.DataType.build("UNIQUEIDENTIFIER").sql(), "UNIQUEIDENTIFIER") - self.assertEqual(exp.DataType.build("MONEY").sql(), "MONEY") - self.assertEqual(exp.DataType.build("SMALLMONEY").sql(), "SMALLMONEY") - self.assertEqual(exp.DataType.build("ROWVERSION").sql(), "ROWVERSION") - self.assertEqual(exp.DataType.build("IMAGE").sql(), "IMAGE") - self.assertEqual(exp.DataType.build("VARIANT").sql(), "VARIANT") - self.assertEqual(exp.DataType.build("OBJECT").sql(), "OBJECT") + self.assertEqual(exp.DataType.build("HLLSKETCH", dialect="redshift").sql(), "HLLSKETCH") + self.assertEqual(exp.DataType.build("HSTORE", dialect="postgres").sql(), "HSTORE") self.assertEqual(exp.DataType.build("NULL").sql(), "NULL") + self.assertEqual(exp.DataType.build("NULL", dialect="bigquery").sql(), "NULL") self.assertEqual(exp.DataType.build("UNKNOWN").sql(), "UNKNOWN") + self.assertEqual(exp.DataType.build("UNKNOWN", dialect="bigquery").sql(), "UNKNOWN") + self.assertEqual(exp.DataType.build("UNKNOWN", dialect="snowflake").sql(), "UNKNOWN") + self.assertEqual(exp.DataType.build("TIMESTAMP", dialect="bigquery").sql(), "TIMESTAMPTZ") + self.assertEqual( + exp.DataType.build("struct", dialect="spark").sql(), "STRUCT" + ) def test_rename_table(self): self.assertEqual( diff --git a/tests/test_helper.py b/tests/test_helper.py index 82d917e..7d63c34 100644 --- a/tests/test_helper.py +++ b/tests/test_helper.py @@ -6,17 +6,16 @@ from sqlglot.helper import name_sequence, tsort class TestHelper(unittest.TestCase): def test_tsort(self): - self.assertEqual(tsort({"a": []}), ["a"]) - self.assertEqual(tsort({"a": ["b", "b"]}), ["b", "a"]) - self.assertEqual(tsort({"a": ["b"]}), ["b", "a"]) - self.assertEqual(tsort({"a": ["c"], "b": [], "c": []}), ["c", "a", "b"]) + self.assertEqual(tsort({"a": set()}), ["a"]) + self.assertEqual(tsort({"a": {"b"}}), ["b", "a"]) + self.assertEqual(tsort({"a": {"c"}, "b": set(), "c": set()}), ["b", "c", "a"]) self.assertEqual( tsort( { - "a": ["b", "c"], - "b": ["c"], - "c": [], - "d": ["a"], + "a": {"b", "c"}, + "b": {"c"}, + "c": set(), + "d": {"a"}, } ), ["c", "b", "a", "d"], @@ -25,9 +24,9 @@ class TestHelper(unittest.TestCase): with self.assertRaises(ValueError): tsort( { - "a": ["b", "c"], - "b": ["a"], - "c": [], + "a": {"b", "c"}, + "b": {"a"}, + "c": set(), } ) diff --git a/tests/test_optimizer.py b/tests/test_optimizer.py index 2ae6da9..94bd0ba 100644 --- a/tests/test_optimizer.py +++ b/tests/test_optimizer.py @@ -198,6 +198,15 @@ class TestOptimizer(unittest.TestCase): self.check_file("normalize", normalize) def test_qualify_columns(self): + self.assertEqual( + optimizer.qualify_columns.qualify_columns( + parse_one("WITH x AS (SELECT a FROM db.y) SELECT z FROM db.x"), + schema={"db": {"x": {"z": "int"}, "y": {"a": "int"}}}, + infer_schema=False, + ).sql(), + "WITH x AS (SELECT y.a AS a FROM db.y) SELECT x.z AS z FROM db.x", + ) + self.assertEqual( optimizer.qualify_columns.qualify_columns( parse_one("select y from x"), @@ -544,9 +553,10 @@ FROM READ_CSV('tests/fixtures/optimizer/tpc-h/nation.csv.gz', 'delimiter', '|') def test_function_annotation(self): schema = {"x": {"cola": "VARCHAR", "colb": "CHAR"}} - sql = "SELECT x.cola || TRIM(x.colb) AS col FROM x AS x" + sql = "SELECT x.cola || TRIM(x.colb) AS col, DATE(x.colb) FROM x AS x" - concat_expr_alias = annotate_types(parse_one(sql), schema=schema).expressions[0] + expression = annotate_types(parse_one(sql), schema=schema) + concat_expr_alias = expression.expressions[0] self.assertEqual(concat_expr_alias.type.this, exp.DataType.Type.VARCHAR) concat_expr = concat_expr_alias.this @@ -555,6 +565,9 @@ FROM READ_CSV('tests/fixtures/optimizer/tpc-h/nation.csv.gz', 'delimiter', '|') self.assertEqual(concat_expr.right.type.this, exp.DataType.Type.VARCHAR) # TRIM(x.colb) self.assertEqual(concat_expr.right.this.type.this, exp.DataType.Type.CHAR) # x.colb + date_expr = expression.expressions[1] + self.assertEqual(date_expr.type.this, exp.DataType.Type.DATE) + sql = "SELECT CASE WHEN 1=1 THEN x.cola ELSE x.colb END AS col FROM x AS x" case_expr_alias = annotate_types(parse_one(sql), schema=schema).expressions[0] diff --git a/tests/test_parser.py b/tests/test_parser.py index 897357f..96192cd 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -81,6 +81,11 @@ class TestParser(unittest.TestCase): def test_float(self): self.assertEqual(parse_one(".2"), parse_one("0.2")) + def test_unnest_projection(self): + expr = parse_one("SELECT foo IN UNNEST(bla) AS bar") + self.assertIsInstance(expr.selects[0], exp.Alias) + self.assertEqual(expr.selects[0].output_name, "bar") + def test_unary_plus(self): self.assertEqual(parse_one("+15"), exp.Literal.number(15)) diff --git a/tests/test_tokens.py b/tests/test_tokens.py index 30af34f..d5a2b7f 100644 --- a/tests/test_tokens.py +++ b/tests/test_tokens.py @@ -1,5 +1,6 @@ import unittest +from sqlglot.dialects import BigQuery from sqlglot.tokens import Tokenizer, TokenType @@ -68,7 +69,8 @@ x""" Tokenizer().tokenize("select /*") def test_jinja(self): - tokenizer = Tokenizer() + # Check that {#, #} are treated as token delimiters, even though BigQuery overrides COMMENTS + tokenizer = BigQuery.Tokenizer() tokens = tokenizer.tokenize( """ diff --git a/tests/test_transpile.py b/tests/test_transpile.py index 1085b09..8d762d3 100644 --- a/tests/test_transpile.py +++ b/tests/test_transpile.py @@ -280,6 +280,11 @@ FROM v""", "select * from t where ((condition = 1)/*test*/)", "SELECT * FROM t WHERE ((condition = 1) /* test */)", ) + self.validate( + "SELECT 1 // hi this is a comment", + "SELECT 1 /* hi this is a comment */", + read="snowflake", + ) def test_types(self): self.validate("INT 1", "CAST(1 AS INT)") diff --git a/tests/tpch.py b/tests/tpch.py index 0b6de63..ef2b666 100644 --- a/tests/tpch.py +++ b/tests/tpch.py @@ -2,11 +2,89 @@ import time from sqlglot.optimizer import optimize -INPUT = "" -OUTPUT = "" -NUM = 99 -SCHEMA = {} -KIND = "DS" +INPUT = "/home/toby/dev/tpch/{i}.sql" +OUTPUT = "/home/toby/dev/sqlglot/tests/fixtures/optimizer/tpc-h/tpc-h.sql" +NUM = 22 +SCHEMA = { + "lineitem": { + "l_orderkey": "bigint", + "l_partkey": "bigint", + "l_suppkey": "bigint", + "l_linenumber": "bigint", + "l_quantity": "double", + "l_extendedprice": "double", + "l_discount": "double", + "l_tax": "double", + "l_returnflag": "string", + "l_linestatus": "string", + "l_shipdate": "string", + "l_commitdate": "string", + "l_receiptdate": "string", + "l_shipinstruct": "string", + "l_shipmode": "string", + "l_comment": "string", + }, + "orders": { + "o_orderkey": "bigint", + "o_custkey": "bigint", + "o_orderstatus": "string", + "o_totalprice": "double", + "o_orderdate": "string", + "o_orderpriority": "string", + "o_clerk": "string", + "o_shippriority": "int", + "o_comment": "string", + }, + "customer": { + "c_custkey": "bigint", + "c_name": "string", + "c_address": "string", + "c_nationkey": "bigint", + "c_phone": "string", + "c_acctbal": "double", + "c_mktsegment": "string", + "c_comment": "string", + }, + "part": { + "p_partkey": "bigint", + "p_name": "string", + "p_mfgr": "string", + "p_brand": "string", + "p_type": "string", + "p_size": "int", + "p_container": "string", + "p_retailprice": "double", + "p_comment": "string", + }, + "supplier": { + "s_suppkey": "bigint", + "s_name": "string", + "s_address": "string", + "s_nationkey": "bigint", + "s_phone": "string", + "s_acctbal": "double", + "s_comment": "string", + }, + "partsupp": { + "ps_partkey": "bigint", + "ps_suppkey": "bigint", + "ps_availqty": "int", + "ps_supplycost": "double", + "ps_comment": "string", + }, + "nation": { + "n_nationkey": "bigint", + "n_name": "string", + "n_regionkey": "bigint", + "n_comment": "string", + }, + "region": { + "r_regionkey": "bigint", + "r_name": "string", + "r_comment": "string", + }, +} +KIND = "H" with open(OUTPUT, "w", encoding="UTF-8") as fixture: for i in range(NUM): @@ -17,7 +95,7 @@ with open(OUTPUT, "w", encoding="UTF-8") as fixture: for line in file.read().split(";")[0].split("\n") if not line.startswith("--") ) - original = original.replace("`", '"') + original = original.replace("`", '"').strip() now = time.time() try: optimized = optimize(original, schema=SCHEMA) -- cgit v1.2.3